aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--CREDITS8
-rw-r--r--Documentation/ABI/testing/sysfs-block64
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-cleancache11
-rw-r--r--Documentation/ABI/testing/sysfs-ptp98
-rw-r--r--Documentation/DocBook/dvb/dvbproperty.xml5
-rw-r--r--Documentation/DocBook/media-entities.tmpl7
-rw-r--r--Documentation/DocBook/v4l/media-controller.xml6
-rw-r--r--Documentation/DocBook/v4l/pixfmt.xml1
-rw-r--r--Documentation/DocBook/v4l/subdev-formats.xml10
-rw-r--r--Documentation/IRQ-affinity.txt17
-rw-r--r--Documentation/accounting/getdelays.c38
-rw-r--r--Documentation/atomic_ops.txt2
-rw-r--r--Documentation/blockdev/cciss.txt15
-rw-r--r--Documentation/cachetlb.txt2
-rw-r--r--Documentation/cgroups/cgroups.txt41
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt54
-rw-r--r--Documentation/filesystems/9p.txt29
-rw-r--r--Documentation/filesystems/configfs/configfs_example_explicit.c6
-rw-r--r--Documentation/filesystems/configfs/configfs_example_macros.c6
-rw-r--r--Documentation/filesystems/ext4.txt4
-rw-r--r--Documentation/filesystems/proc.txt11
-rw-r--r--Documentation/filesystems/xfs.txt6
-rw-r--r--Documentation/hwmon/emc6w20142
-rw-r--r--Documentation/hwmon/f71882fg4
-rw-r--r--Documentation/hwmon/fam15h_power37
-rw-r--r--Documentation/hwmon/k10temp3
-rw-r--r--Documentation/hwmon/max665021
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kbuild/kconfig-language.txt32
-rw-r--r--Documentation/kbuild/kconfig.txt5
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/lockstat.txt2
-rw-r--r--Documentation/mmc/00-INDEX2
-rw-r--r--Documentation/mmc/mmc-dev-attrs.txt10
-rw-r--r--Documentation/mmc/mmc-dev-parts.txt27
-rw-r--r--Documentation/networking/bonding.txt13
-rw-r--r--Documentation/power/regulator/machine.txt4
-rw-r--r--Documentation/ptp/ptp.txt89
-rw-r--r--Documentation/ptp/testptp.c381
-rw-r--r--Documentation/ptp/testptp.mk33
-rw-r--r--Documentation/sysctl/kernel.txt3
-rw-r--r--Documentation/virtual/uml/UserModeLinux-HOWTO.txt10
-rw-r--r--Documentation/vm/cleancache.txt278
-rw-r--r--Documentation/vm/locking2
-rw-r--r--MAINTAINERS68
-rw-r--r--Makefile17
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/Kconfig8
-rw-r--r--arch/alpha/include/asm/gpio.h55
-rw-r--r--arch/alpha/include/asm/smp.h2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/alpha/kernel/smp.c7
-rw-r--r--arch/alpha/kernel/sys_dp264.c2
-rw-r--r--arch/alpha/kernel/sys_titan.c13
-rw-r--r--arch/alpha/mm/init.c2
-rw-r--r--arch/alpha/mm/numa.c1
-rw-r--r--arch/arm/Kconfig.debug7
-rw-r--r--arch/arm/configs/omap2plus_defconfig83
-rw-r--r--arch/arm/include/asm/bitops.h46
-rw-r--r--arch/arm/include/asm/smp.h6
-rw-r--r--arch/arm/include/asm/tlb.h53
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h78
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile4
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c27
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c155
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c125
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c10
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c60
-rw-r--r--arch/arm/mach-omap2/board-apollon.c29
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c240
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c9
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c135
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c453
-rw-r--r--arch/arm/mach-omap2/board-igep0030.c458
-rw-r--r--arch/arm/mach-omap2/board-ldp.c138
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c28
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c198
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c246
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c14
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c92
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c137
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c121
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c83
-rw-r--r--arch/arm/mach-omap2/board-overo.c269
-rw-r--r--arch/arm/mach-omap2/board-rm680.c21
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c71
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c7
-rw-r--r--arch/arm/mach-omap2/board-rx51.c18
-rw-r--r--arch/arm/mach-omap2/board-zoom-debugboard.c65
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c33
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c29
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c163
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h35
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c436
-rw-r--r--arch/arm/mach-omap2/display.c77
-rw-r--r--arch/arm/mach-omap2/gpmc-smc91x.c11
-rw-r--r--arch/arm/mach-omap2/gpmc-smsc911x.c44
-rw-r--r--arch/arm/mach-omap2/include/mach/board-zoom.h2
-rw-r--r--arch/arm/mach-omap2/omap_l3_noc.c51
-rw-r--r--arch/arm/mach-omap2/omap_l3_smx.c42
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c9
-rw-r--r--arch/arm/mach-omap2/pm.h17
-rw-r--r--arch/arm/mach-omap2/pm34xx.c14
-rw-r--r--arch/arm/mach-omap2/pm44xx.c2
-rw-r--r--arch/arm/mach-omap2/smartreflex.c23
-rw-r--r--arch/arm/mach-omap2/usb-musb.c22
-rw-r--r--arch/arm/mach-omap2/usb-tusb6010.c3
-rw-r--r--arch/arm/mach-omap2/voltage.c1
-rw-r--r--arch/arm/mach-shmobile/Makefile5
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c118
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c30
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c2
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c272
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c21
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c19
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c92
-rw-r--r--arch/arm/mach-shmobile/headsmp.S2
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h7
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-ap4evb.txt3
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-mackerel.txt3
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h1
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh73a0.h30
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c46
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c108
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c223
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c217
-rw-r--r--arch/arm/mach-shmobile/setup-sh7377.c239
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c244
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S260
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c9
-rw-r--r--arch/arm/mach-shmobile/suspend.c47
-rw-r--r--arch/arm/mach-tegra/include/mach/sdhci.h1
-rw-r--r--arch/arm/mach-ux500/Kconfig3
-rw-r--r--arch/arm/mach-ux500/Makefile4
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c2
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c7
-rw-r--r--arch/arm/mach-ux500/cpu.c7
-rw-r--r--arch/arm/mach-ux500/cpufreq.c211
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-regs.h20
-rw-r--r--arch/arm/mach-ux500/include/mach/db8500-regs.h37
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-ux500/include/mach/id.h20
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-mop500.h5
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-u5500.h21
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-db5500.h27
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-db8500.h54
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs.h46
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu-defs.h30
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu.h28
-rw-r--r--arch/arm/mach-ux500/prcmu.c394
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/plat-omap/include/plat/gpmc-smsc911x.h4
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h1
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h2
-rw-r--r--arch/avr32/include/asm/bitops.h15
-rw-r--r--arch/avr32/mm/init.c2
-rw-r--r--arch/blackfin/Kconfig5
-rw-r--r--arch/blackfin/Kconfig.debug11
-rw-r--r--arch/blackfin/configs/BF527-EZKIT-V2_defconfig12
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig14
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig2
-rw-r--r--arch/blackfin/include/asm/bfin-global.h10
-rw-r--r--arch/blackfin/include/asm/bfin_pfmon.h44
-rw-r--r--arch/blackfin/include/asm/bfin_sport.h4
-rw-r--r--arch/blackfin/include/asm/cacheflush.h23
-rw-r--r--arch/blackfin/include/asm/cpu.h3
-rw-r--r--arch/blackfin/include/asm/def_LPBlackfin.h12
-rw-r--r--arch/blackfin/include/asm/irq_handler.h25
-rw-r--r--arch/blackfin/include/asm/kgdb.h7
-rw-r--r--arch/blackfin/include/asm/perf_event.h1
-rw-r--r--arch/blackfin/include/asm/ptrace.h7
-rw-r--r--arch/blackfin/include/mach-common/irq.h57
-rw-r--r--arch/blackfin/kernel/Makefile3
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c5
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c34
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c1
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c1860
-rw-r--r--arch/blackfin/kernel/ipipe.c1
-rw-r--r--arch/blackfin/kernel/irqchip.c1
-rw-r--r--arch/blackfin/kernel/nmi.c8
-rw-r--r--arch/blackfin/kernel/perf_event.c498
-rw-r--r--arch/blackfin/kernel/process.c6
-rw-r--r--arch/blackfin/kernel/reboot.c65
-rw-r--r--arch/blackfin/kernel/setup.c54
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S8
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h4
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF512.h16
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF512.h8
-rw-r--r--arch/blackfin/mach-bf518/include/mach/irq.h262
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c74
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h8
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF522.h16
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF522.h8
-rw-r--r--arch/blackfin/mach-bf527/include/mach/irq.h266
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h11
-rw-r--r--arch/blackfin/mach-bf533/include/mach/irq.h168
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c106
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h10
-rw-r--r--arch/blackfin/mach-bf537/include/mach/irq.h365
-rw-r--r--arch/blackfin/mach-bf537/ints-priority.c163
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h9
-rw-r--r--arch/blackfin/mach-bf538/include/mach/irq.h89
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c116
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h8
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h89
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c10
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h15
-rw-r--r--arch/blackfin/mach-bf561/include/mach/irq.h505
-rw-r--r--arch/blackfin/mach-bf561/smp.c17
-rw-r--r--arch/blackfin/mach-common/dpmc.c7
-rw-r--r--arch/blackfin/mach-common/ints-priority.c476
-rw-r--r--arch/blackfin/mach-common/smp.c28
-rw-r--r--arch/blackfin/mm/sram-alloc.c43
-rw-r--r--arch/cris/Kconfig4
-rw-r--r--arch/cris/arch-v32/kernel/irq.c4
-rw-r--r--arch/cris/arch-v32/kernel/smp.c33
-rw-r--r--arch/cris/mm/init.c2
-rw-r--r--arch/frv/Kconfig8
-rw-r--r--arch/frv/mm/init.c2
-rw-r--r--arch/h8300/Kconfig8
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/include/asm/tlb.h66
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/ia64/mm/contig.c10
-rw-r--r--arch/ia64/mm/discontig.c10
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/m32r/Kconfig8
-rw-r--r--arch/m32r/Kconfig.debug9
-rw-r--r--arch/m32r/include/asm/smp.h6
-rw-r--r--arch/m32r/kernel/smp.c64
-rw-r--r--arch/m32r/kernel/smpboot.c48
-rw-r--r--arch/m32r/mm/discontig.c1
-rw-r--r--arch/m32r/mm/init.c2
-rw-r--r--arch/m68k/Kconfig.nommu4
-rw-r--r--arch/m68k/include/asm/bitops_mm.h8
-rw-r--r--arch/m68k/include/asm/bitops_no.h4
-rw-r--r--arch/m68k/mm/init_mm.c2
-rw-r--r--arch/microblaze/Kconfig6
-rw-r--r--arch/microblaze/mm/init.c2
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--arch/mips/Kconfig.debug9
-rw-r--r--arch/mips/configs/bcm47xx_defconfig1
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mn10300/Kconfig3
-rw-r--r--arch/mn10300/configs/asb2364_defconfig1
-rw-r--r--arch/mn10300/kernel/irq.c16
-rw-r--r--arch/mn10300/kernel/smp.c75
-rw-r--r--arch/mn10300/mm/cache-smp.c8
-rw-r--r--arch/mn10300/mm/init.c2
-rw-r--r--arch/mn10300/mm/tlb-smp.c32
-rw-r--r--arch/parisc/Kconfig8
-rw-r--r--arch/parisc/include/asm/smp.h9
-rw-r--r--arch/parisc/mm/init.c4
-rw-r--r--arch/powerpc/Kconfig10
-rw-r--r--arch/powerpc/Kconfig.debug21
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts18
-rw-r--r--arch/powerpc/boot/dts/katmai.dts18
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts28
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts13
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts13
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dts13
-rw-r--r--arch/powerpc/boot/dts/p2020rdb.dts13
-rw-r--r--arch/powerpc/boot/dts/redwood.dts20
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/include/asm/fsl_lbc.h2
-rw-r--r--arch/powerpc/include/asm/ftrace.h14
-rw-r--r--arch/powerpc/include/asm/hvcall.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc.h21
-rw-r--r--arch/powerpc/include/asm/rio.h5
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/syscall.h5
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/ftrace.c8
-rw-r--r--arch/powerpc/kernel/irq.c46
-rw-r--r--arch/powerpc/kernel/process.c23
-rw-r--r--arch/powerpc/kernel/ptrace.c10
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/traps.c13
-rw-r--r--arch/powerpc/mm/pgtable.c104
-rw-r--r--arch/powerpc/mm/tlb_hash32.c3
-rw-r--r--arch/powerpc/mm/tlb_hash64.c5
-rw-r--r--arch/powerpc/mm/tlb_nohash.c3
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c24
-rw-r--r--arch/powerpc/platforms/40x/Kconfig2
-rw-r--r--arch/powerpc/platforms/44x/Kconfig6
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c55
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h2
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/sysdev/Kconfig7
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c9
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c100
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c276
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/include/asm/bitops.h45
-rw-r--r--arch/s390/include/asm/delay.h8
-rw-r--r--arch/s390/include/asm/irq.h8
-rw-r--r--arch/s390/include/asm/s390_ext.h17
-rw-r--r--arch/s390/include/asm/tlb.h62
-rw-r--r--arch/s390/include/asm/topology.h4
-rw-r--r--arch/s390/include/asm/uaccess.h11
-rw-r--r--arch/s390/kernel/Makefile8
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/irq.c137
-rw-r--r--arch/s390/kernel/s390_ext.c108
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/topology.c1
-rw-r--r--arch/s390/kernel/traps.c1
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/lib/delay.c15
-rw-r--r--arch/s390/mm/fault.c62
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/s390/oprofile/hwsampler.c4
-rw-r--r--arch/score/Kconfig3
-rw-r--r--arch/score/Kconfig.debug9
-rw-r--r--arch/score/mm/init.c2
-rw-r--r--arch/sh/Kconfig6
-rw-r--r--arch/sh/Kconfig.debug9
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig1
-rw-r--r--arch/sh/configs/sdk7786_defconfig1
-rw-r--r--arch/sh/configs/se7206_defconfig1
-rw-r--r--arch/sh/configs/shx3_defconfig1
-rw-r--r--arch/sh/configs/urquell_defconfig1
-rw-r--r--arch/sh/include/asm/kgdb.h1
-rw-r--r--arch/sh/include/asm/ptrace.h6
-rw-r--r--arch/sh/include/asm/tlb.h28
-rw-r--r--arch/sh/mm/init.c1
-rw-r--r--arch/sparc/Kconfig8
-rw-r--r--arch/sparc/Kconfig.debug9
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h15
-rw-r--r--arch/sparc/include/asm/tlb_64.h91
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h12
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/mm/init_32.c4
-rw-r--r--arch/sparc/mm/tlb.c43
-rw-r--r--arch/sparc/mm/tsb.c15
-rw-r--r--arch/tile/Kconfig9
-rw-r--r--arch/tile/Kconfig.debug9
-rw-r--r--arch/tile/configs/tile_defconfig71
-rw-r--r--arch/tile/configs/tilegx_defconfig1833
-rw-r--r--arch/tile/configs/tilepro_defconfig1163
-rw-r--r--arch/tile/include/arch/chip_tilegx.h258
-rw-r--r--arch/tile/include/arch/icache.h11
-rw-r--r--arch/tile/include/arch/interrupts_64.h276
-rw-r--r--arch/tile/include/arch/spr_def.h13
-rw-r--r--arch/tile/include/arch/spr_def_64.h173
-rw-r--r--arch/tile/include/asm/atomic.h49
-rw-r--r--arch/tile/include/asm/atomic_32.h10
-rw-r--r--arch/tile/include/asm/atomic_64.h156
-rw-r--r--arch/tile/include/asm/backtrace.h82
-rw-r--r--arch/tile/include/asm/bitops.h1
-rw-r--r--arch/tile/include/asm/bitops_32.h1
-rw-r--r--arch/tile/include/asm/bitops_64.h105
-rw-r--r--arch/tile/include/asm/cacheflush.h18
-rw-r--r--arch/tile/include/asm/compat.h4
-rw-r--r--arch/tile/include/asm/dma-mapping.h3
-rw-r--r--arch/tile/include/asm/fb.h1
-rw-r--r--arch/tile/include/asm/io.h18
-rw-r--r--arch/tile/include/asm/irq.h2
-rw-r--r--arch/tile/include/asm/mmu_context.h4
-rw-r--r--arch/tile/include/asm/opcode-tile_32.h7
-rw-r--r--arch/tile/include/asm/opcode-tile_64.h1500
-rw-r--r--arch/tile/include/asm/opcode_constants_64.h1043
-rw-r--r--arch/tile/include/asm/page.h18
-rw-r--r--arch/tile/include/asm/parport.h1
-rw-r--r--arch/tile/include/asm/pci.h3
-rw-r--r--arch/tile/include/asm/pgtable_64.h175
-rw-r--r--arch/tile/include/asm/processor.h9
-rw-r--r--arch/tile/include/asm/serial.h1
-rw-r--r--arch/tile/include/asm/signal.h4
-rw-r--r--arch/tile/include/asm/spinlock_64.h161
-rw-r--r--arch/tile/include/asm/stat.h2
-rw-r--r--arch/tile/include/asm/swab.h6
-rw-r--r--arch/tile/include/asm/thread_info.h5
-rw-r--r--arch/tile/include/asm/topology.h75
-rw-r--r--arch/tile/include/asm/traps.h4
-rw-r--r--arch/tile/include/asm/unistd.h2
-rw-r--r--arch/tile/include/asm/vga.h (renamed from arch/tile/include/hv/pagesize.h)35
-rw-r--r--arch/tile/include/hv/hypervisor.h12
-rw-r--r--arch/tile/kernel/backtrace.c103
-rw-r--r--arch/tile/kernel/compat.c13
-rw-r--r--arch/tile/kernel/compat_signal.c4
-rw-r--r--arch/tile/kernel/futex_64.S55
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/head_64.S269
-rw-r--r--arch/tile/kernel/intvec_32.S175
-rw-r--r--arch/tile/kernel/intvec_64.S1231
-rw-r--r--arch/tile/kernel/module.c8
-rw-r--r--arch/tile/kernel/pci-dma.c2
-rw-r--r--arch/tile/kernel/pci.c206
-rw-r--r--arch/tile/kernel/process.c52
-rw-r--r--arch/tile/kernel/regs_64.S145
-rw-r--r--arch/tile/kernel/setup.c6
-rw-r--r--arch/tile/kernel/signal.c128
-rw-r--r--arch/tile/kernel/single_step.c12
-rw-r--r--arch/tile/kernel/stack.c14
-rw-r--r--arch/tile/kernel/sys.c9
-rw-r--r--arch/tile/kernel/tile-desc_32.c11
-rw-r--r--arch/tile/kernel/tile-desc_64.c2200
-rw-r--r--arch/tile/kernel/time.c2
-rw-r--r--arch/tile/kernel/tlb.c12
-rw-r--r--arch/tile/kernel/traps.c1
-rw-r--r--arch/tile/lib/atomic_asm_32.S2
-rw-r--r--arch/tile/lib/cacheflush.c18
-rw-r--r--arch/tile/lib/memchr_64.c71
-rw-r--r--arch/tile/lib/memcpy_64.c220
-rw-r--r--arch/tile/lib/memcpy_user_64.c86
-rw-r--r--arch/tile/lib/memset_64.c145
-rw-r--r--arch/tile/lib/spinlock_64.c104
-rw-r--r--arch/tile/lib/strchr_64.c67
-rw-r--r--arch/tile/lib/strlen_64.c38
-rw-r--r--arch/tile/lib/usercopy_64.S196
-rw-r--r--arch/tile/mm/fault.c30
-rw-r--r--arch/tile/mm/init.c2
-rw-r--r--arch/tile/mm/migrate_64.S187
-rw-r--r--arch/um/Kconfig.debug16
-rw-r--r--arch/um/Kconfig.x861
-rw-r--r--arch/um/drivers/Makefile4
-rw-r--r--arch/um/drivers/mcast.h24
-rw-r--r--arch/um/drivers/mcast_kern.c120
-rw-r--r--arch/um/drivers/mcast_user.c165
-rw-r--r--arch/um/drivers/umcast.h27
-rw-r--r--arch/um/drivers/umcast_kern.c188
-rw-r--r--arch/um/drivers/umcast_user.c186
-rw-r--r--arch/um/drivers/xterm.c2
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/include/asm/smp.h1
-rw-r--r--arch/um/include/asm/tlb.h29
-rw-r--r--arch/um/include/shared/os.h7
-rw-r--r--arch/um/kernel/Makefile1
-rw-r--r--arch/um/kernel/early_printk.c33
-rw-r--r--arch/um/kernel/smp.c3
-rw-r--r--arch/um/kernel/trap.c24
-rw-r--r--arch/um/os-Linux/main.c3
-rw-r--r--arch/um/os-Linux/process.c1
-rw-r--r--arch/um/os-Linux/util.c5
-rw-r--r--arch/unicore32/Kconfig.debug7
-rw-r--r--arch/unicore32/mm/init.c2
-rw-r--r--arch/unicore32/mm/mmu.c2
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/Kconfig.debug20
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/include/asm/io.h24
-rw-r--r--arch/x86/include/asm/kgdb.h1
-rw-r--r--arch/x86/include/asm/ptrace.h18
-rw-r--r--arch/x86/include/asm/tsc.h4
-rw-r--r--arch/x86/include/asm/vdso.h14
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/include/asm/vsyscall.h12
-rw-r--r--arch/x86/include/asm/vvar.h52
-rw-r--r--arch/x86/include/asm/xen/hypercall.h7
-rw-r--r--arch/x86/kernel/Makefile8
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/tboot.c1
-rw-r--r--arch/x86/kernel/time.c2
-rw-r--r--arch/x86/kernel/tsc.c19
-rw-r--r--arch/x86/kernel/vmlinux.lds.S34
-rw-r--r--arch/x86/kernel/vread_tsc_64.c36
-rw-r--r--arch/x86/kernel/vsyscall_64.c48
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/mm/fault.c12
-rw-r--r--arch/x86/mm/hugetlbpage.c4
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/vdso/Makefile17
-rw-r--r--arch/x86/vdso/vclock_gettime.c74
-rw-r--r--arch/x86/vdso/vdso.lds.S9
-rw-r--r--arch/x86/vdso/vextern.h16
-rw-r--r--arch/x86/vdso/vgetcpu.c3
-rw-r--r--arch/x86/vdso/vma.c27
-rw-r--r--arch/x86/vdso/vvar.c12
-rw-r--r--arch/x86/xen/mmu.c284
-rw-r--r--arch/x86/xen/mmu.h37
-rw-r--r--arch/xtensa/Kconfig6
-rw-r--r--arch/xtensa/include/asm/page.h4
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/mm/pgtable.c72
-rw-r--r--block/blk-cgroup.c218
-rw-r--r--block/blk-cgroup.h40
-rw-r--r--block/blk-core.c32
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c16
-rw-r--r--block/blk-ioc.c3
-rw-r--r--block/blk-lib.c82
-rw-r--r--block/blk-settings.c9
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk-throttle.c313
-rw-r--r--block/blk.h23
-rw-r--r--block/cfq-iosched.c232
-rw-r--r--block/elevator.c11
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/Kconfig11
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/apei/einj.c8
-rw-r--r--drivers/acpi/atomicio.c4
-rw-r--r--drivers/ata/libata-scsi.c13
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/bcma/host_pci.c2
-rw-r--r--drivers/block/Kconfig21
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/cciss.c571
-rw-r--r--drivers/block/cciss.h11
-rw-r--r--drivers/block/cciss_cmd.h11
-rw-r--r--drivers/block/cciss_scsi.c41
-rw-r--r--drivers/block/cciss_scsi.h4
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c6
-rw-r--r--drivers/block/drbd/drbd_int.h19
-rw-r--r--drivers/block/drbd/drbd_main.c37
-rw-r--r--drivers/block/drbd/drbd_nl.c127
-rw-r--r--drivers/block/drbd/drbd_receiver.c68
-rw-r--r--drivers/block/drbd/drbd_req.c20
-rw-r--r--drivers/block/drbd/drbd_req.h5
-rw-r--r--drivers/block/drbd/drbd_worker.c98
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/rbd.c27
-rw-r--r--drivers/block/xen-blkback/Makefile3
-rw-r--r--drivers/block/xen-blkback/blkback.c824
-rw-r--r--drivers/block/xen-blkback/common.h233
-rw-r--r--drivers/block/xen-blkback/xenbus.c768
-rw-r--r--drivers/block/xen-blkfront.c51
-rw-r--r--drivers/cdrom/viocd.c4
-rw-r--r--drivers/char/i8k.c166
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c138
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c88
-rw-r--r--drivers/char/mspec.c5
-rw-r--r--drivers/char/ppdev.c1
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c169
-rw-r--r--drivers/dma/shdma.c42
-rw-r--r--drivers/dma/shdma.h2
-rw-r--r--drivers/dma/timb_dma.c3
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/amd8111_edac.c2
-rw-r--r--drivers/edac/amd8131_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_core.h12
-rw-r--r--drivers/edac/edac_device.c24
-rw-r--r--drivers/edac/edac_mc.c16
-rw-r--r--drivers/edac/edac_module.c2
-rw-r--r--drivers/edac/edac_pci.c21
-rw-r--r--drivers/edac/i3200_edac.c13
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.h2
-rw-r--r--drivers/edac/mv64x60_edac.h2
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/gpio/Kconfig30
-rw-r--r--drivers/gpio/Makefile8
-rw-r--r--drivers/gpio/basic_mmio_gpio.c517
-rw-r--r--drivers/gpio/gpiolib.c19
-rw-r--r--drivers/gpio/janz-ttl.c3
-rw-r--r--drivers/gpio/ml_ioh_gpio.c3
-rw-r--r--drivers/gpio/pca953x.c2
-rw-r--r--drivers/gpio/rdc321x-gpio.c3
-rw-r--r--drivers/gpio/timbgpio.c6
-rw-r--r--drivers/gpio/tps65910-gpio.c100
-rw-r--r--drivers/gpio/vx855_gpio.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c4
-rw-r--r--drivers/hwmon/Kconfig42
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/abituguru.c3
-rw-r--r--drivers/hwmon/abituguru3.c13
-rw-r--r--drivers/hwmon/acpi_power_meter.c (renamed from drivers/acpi/power_meter.c)0
-rw-r--r--drivers/hwmon/adcxx.c16
-rw-r--r--drivers/hwmon/coretemp.c16
-rw-r--r--drivers/hwmon/emc6w201.c539
-rw-r--r--drivers/hwmon/f71882fg.c115
-rw-r--r--drivers/hwmon/fam15h_power.c229
-rw-r--r--drivers/hwmon/ibmaem.c10
-rw-r--r--drivers/hwmon/it87.c31
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/k10temp.c11
-rw-r--r--drivers/hwmon/k8temp.c8
-rw-r--r--drivers/hwmon/lm70.c10
-rw-r--r--drivers/hwmon/max6650.c78
-rw-r--r--drivers/hwmon/pmbus_core.c1
-rw-r--r--drivers/hwmon/sch5627.c46
-rw-r--r--drivers/hwmon/ultra45_env.c4
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-xiic.c3
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/cm.c8
-rw-r--r--drivers/infiniband/core/cma.c308
-rw-r--r--drivers/infiniband/core/device.c25
-rw-r--r--drivers/infiniband/core/mad.c7
-rw-r--r--drivers/infiniband/core/netlink.c190
-rw-r--r--drivers/infiniband/core/ucma.c35
-rw-r--r--drivers/infiniband/core/user_mad.c7
-rw-r--r--drivers/infiniband/core/uverbs_main.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c26
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h18
-rw-r--r--drivers/infiniband/hw/nes/nes.c4
-rw-r--r--drivers/infiniband/hw/qib/Kconfig2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c4
-rw-r--r--drivers/input/input-compat.h2
-rw-r--r--drivers/input/keyboard/Kconfig11
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c799
-rw-r--r--drivers/input/misc/Kconfig11
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c231
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/isdn/hardware/eicon/divasfunc.c5
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c6
-rw-r--r--drivers/leds/Kconfig34
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c3
-rw-r--r--drivers/leds/leds-88pm860x.c7
-rw-r--r--drivers/leds/leds-asic3.c165
-rw-r--r--drivers/leds/leds-gpio-register.c42
-rw-r--r--drivers/leds/leds-h1940.c170
-rw-r--r--drivers/leds/leds-lm3530.c73
-rw-r--r--drivers/leds/leds-mc13783.c7
-rw-r--r--drivers/leds/leds-pca9532.c191
-rw-r--r--drivers/leds/leds.h7
-rw-r--r--drivers/leds/ledtrig-timer.c3
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c272
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c107
-rw-r--r--drivers/media/dvb/frontends/stb0899_algo.c2
-rw-r--r--drivers/media/dvb/frontends/tda8261.c1
-rw-r--r--drivers/media/radio/radio-maxiradio.c3
-rw-r--r--drivers/media/radio/radio-timb.c3
-rw-r--r--drivers/media/radio/radio-wl1273.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/Kconfig12
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/fintek-cir.c684
-rw-r--r--drivers/media/rc/fintek-cir.h243
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c134
-rw-r--r--drivers/media/video/Kconfig6
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c2
-rw-r--r--drivers/media/video/gspca/kinect.c2
-rw-r--r--drivers/media/video/m5mols/Kconfig5
-rw-r--r--drivers/media/video/m5mols/Makefile3
-rw-r--r--drivers/media/video/m5mols/m5mols.h296
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c191
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c299
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c1004
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h399
-rw-r--r--drivers/media/video/omap/omap_vout.c2
-rw-r--r--drivers/media/video/omap/omap_voutdef.h2
-rw-r--r--drivers/media/video/timblogiw.c3
-rw-r--r--drivers/media/video/uvc/Makefile3
-rw-r--r--drivers/media/video/uvc/uvc_driver.c66
-rw-r--r--drivers/media/video/uvc/uvc_entity.c118
-rw-r--r--drivers/media/video/uvc/uvcvideo.h20
-rw-r--r--drivers/mfd/88pm860x-core.c155
-rw-r--r--drivers/mfd/Kconfig115
-rw-r--r--drivers/mfd/Makefile8
-rw-r--r--drivers/mfd/ab3100-core.c6
-rw-r--r--drivers/mfd/ab3550-core.c6
-rw-r--r--drivers/mfd/ab8500-core.c32
-rw-r--r--drivers/mfd/ab8500-gpadc.c34
-rw-r--r--drivers/mfd/ab8500-i2c.c3
-rw-r--r--drivers/mfd/asic3.c83
-rw-r--r--drivers/mfd/davinci_voicecodec.c6
-rw-r--r--drivers/mfd/db5500-prcmu-regs.h (renamed from arch/arm/mach-ux500/include/mach/prcmu-regs.h)27
-rw-r--r--drivers/mfd/db5500-prcmu.c448
-rw-r--r--drivers/mfd/db8500-prcmu-regs.h166
-rw-r--r--drivers/mfd/db8500-prcmu.c2069
-rw-r--r--drivers/mfd/htc-pasic3.c5
-rw-r--r--drivers/mfd/janz-cmodio.c3
-rw-r--r--drivers/mfd/max8925-core.c2
-rw-r--r--drivers/mfd/mc13xxx-core.c12
-rw-r--r--drivers/mfd/mfd-core.c7
-rw-r--r--drivers/mfd/omap-usb-host.c152
-rw-r--r--drivers/mfd/pm8921-core.c212
-rw-r--r--drivers/mfd/pm8xxx-irq.c371
-rw-r--r--drivers/mfd/rdc321x-southbridge.c6
-rw-r--r--drivers/mfd/t7l66xb.c6
-rw-r--r--drivers/mfd/tc6387xb.c3
-rw-r--r--drivers/mfd/tc6393xb.c10
-rw-r--r--drivers/mfd/timberdale.c81
-rw-r--r--drivers/mfd/tps6105x.c3
-rw-r--r--drivers/mfd/tps6586x.c4
-rw-r--r--drivers/mfd/tps65910-irq.c218
-rw-r--r--drivers/mfd/tps65910.c229
-rw-r--r--drivers/mfd/tps65911-comparator.c188
-rw-r--r--drivers/mfd/twl-core.c252
-rw-r--r--drivers/mfd/twl4030-codec.c10
-rw-r--r--drivers/mfd/twl4030-power.c6
-rw-r--r--drivers/mfd/twl6030-irq.c4
-rw-r--r--drivers/mfd/wl1273-core.c7
-rw-r--r--drivers/mfd/wm831x-core.c13
-rw-r--r--drivers/mfd/wm831x-irq.c27
-rw-r--r--drivers/mfd/wm8400-core.c3
-rw-r--r--drivers/misc/kgdbts.c29
-rw-r--r--drivers/mmc/card/block.c712
-rw-r--r--drivers/mmc/card/mmc_test.c116
-rw-r--r--drivers/mmc/card/queue.c8
-rw-r--r--drivers/mmc/core/bus.c11
-rw-r--r--drivers/mmc/core/core.c111
-rw-r--r--drivers/mmc/core/core.h7
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc.c186
-rw-r--r--drivers/mmc/core/mmc_ops.c80
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/quirks.c89
-rw-r--r--drivers/mmc/core/sd.c405
-rw-r--r--drivers/mmc/core/sd.h2
-rw-r--r--drivers/mmc/core/sd_ops.c51
-rw-r--r--drivers/mmc/core/sdio.c24
-rw-r--r--drivers/mmc/core/sdio_irq.c33
-rw-r--r--drivers/mmc/core/sdio_ops.c18
-rw-r--r--drivers/mmc/host/Kconfig33
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-pci.c49
-rw-r--r--drivers/mmc/host/sdhci-pxa.c48
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c854
-rw-r--r--drivers/mmc/host/sdhci.h59
-rw-r--r--drivers/mmc/host/sh_mmcif.c126
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c50
-rw-r--r--drivers/mmc/host/tmio_mmc.c34
-rw-r--r--drivers/mmc/host/tmio_mmc.h16
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c21
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c184
-rw-r--r--drivers/mmc/host/vub300.c2506
-rw-r--r--drivers/mtd/nand/tmio_nand.c2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c195
-rw-r--r--drivers/net/benet/be_cmds.c3
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c28
-rw-r--r--drivers/net/bonding/bond_sysfs.c16
-rw-r--r--drivers/net/can/janz-ican3.c3
-rw-r--r--drivers/net/ehea/ehea_main.c2
-rw-r--r--drivers/net/gianfar_ptp.c588
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/irda/bfin_sir.c59
-rw-r--r--drivers/net/irda/bfin_sir.h63
-rw-r--r--drivers/net/ks8842.c3
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83640.c1100
-rw-r--r--drivers/net/phy/dp83640_reg.h267
-rw-r--r--drivers/net/tg3.c1
-rw-r--r--drivers/net/tile/tilepro.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c73
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/wan/pc300_drv.c3
-rw-r--r--drivers/net/wireless/airo.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c79
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h4
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c19
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c10
-rw-r--r--drivers/net/wireless/ath/hw.c10
-rw-r--r--drivers/net/wireless/b43/b43.h24
-rw-r--r--drivers/net/wireless/b43/dma.c37
-rw-r--r--drivers/net/wireless/b43/leds.c4
-rw-r--r--drivers/net/wireless/b43/lo.c4
-rw-r--r--drivers/net/wireless/b43/main.c194
-rw-r--r--drivers/net/wireless/b43/phy_a.c16
-rw-r--r--drivers/net/wireless/b43/phy_common.c8
-rw-r--r--drivers/net/wireless/b43/phy_g.c48
-rw-r--r--drivers/net/wireless/b43/phy_lp.c22
-rw-r--r--drivers/net/wireless/b43/phy_n.c24
-rw-r--r--drivers/net/wireless/b43/pio.c30
-rw-r--r--drivers/net/wireless/b43/rfkill.c6
-rw-r--r--drivers/net/wireless/b43/sdio.c4
-rw-r--r--drivers/net/wireless/b43/sysfs.c4
-rw-r--r--drivers/net/wireless/b43/tables_lpphy.c4
-rw-r--r--drivers/net/wireless/b43/wa.c4
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c86
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c250
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sv-open.c177
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c364
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c4
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c4
-rw-r--r--drivers/net/wireless/mwifiex/main.h9
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c4
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c59
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c69
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/parport/parport_ip32.c1
-rw-r--r--drivers/platform/x86/ibm_rtl.c13
-rw-r--r--drivers/platform/x86/intel_ips.c13
-rw-r--r--drivers/power/Kconfig16
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/bq27x00_battery.c11
-rw-r--r--drivers/power/ds2760_battery.c6
-rw-r--r--drivers/power/ds2780_battery.c853
-rw-r--r--drivers/power/gpio-charger.c15
-rw-r--r--drivers/power/isp1704_charger.c22
-rw-r--r--drivers/power/max8903_charger.c391
-rw-r--r--drivers/power/max8925_power.c10
-rw-r--r--drivers/power/test_power.c276
-rw-r--r--drivers/power/z2_battery.c20
-rw-r--r--drivers/ptp/Kconfig75
-rw-r--r--drivers/ptp/Makefile7
-rw-r--r--drivers/ptp/ptp_chardev.c159
-rw-r--r--drivers/ptp/ptp_clock.c343
-rw-r--r--drivers/ptp/ptp_ixp46x.c332
-rw-r--r--drivers/ptp/ptp_private.h92
-rw-r--r--drivers/ptp/ptp_sysfs.c230
-rw-r--r--drivers/regulator/88pm8607.c30
-rw-r--r--drivers/regulator/Kconfig13
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/ab3100.c3
-rw-r--r--drivers/regulator/core.c93
-rw-r--r--drivers/regulator/db8500-prcmu.c558
-rw-r--r--drivers/regulator/max8925-regulator.c11
-rw-r--r--drivers/regulator/max8997.c13
-rw-r--r--drivers/regulator/max8998.c22
-rw-r--r--drivers/regulator/mc13783-regulator.c7
-rw-r--r--drivers/regulator/mc13892-regulator.c25
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/tps6105x-regulator.c5
-rw-r--r--drivers/regulator/tps65023-regulator.c3
-rw-r--r--drivers/regulator/tps6507x-regulator.c3
-rw-r--r--drivers/regulator/tps65910-regulator.c993
-rw-r--r--drivers/regulator/twl-regulator.c564
-rw-r--r--drivers/regulator/wm831x-dcdc.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c12
-rw-r--r--drivers/rtc/Kconfig59
-rw-r--r--drivers/rtc/Makefile7
-rw-r--r--drivers/rtc/rtc-88pm860x.c427
-rw-r--r--drivers/rtc/rtc-em3027.c161
-rw-r--r--drivers/rtc/rtc-m41t93.c225
-rw-r--r--drivers/rtc/rtc-mrst.c4
-rw-r--r--drivers/rtc/rtc-mxc.c6
-rw-r--r--drivers/rtc/rtc-pcf50633.c23
-rw-r--r--drivers/rtc/rtc-rv3029c2.c454
-rw-r--r--drivers/rtc/rtc-spear.c534
-rw-r--r--drivers/rtc/rtc-tile.c162
-rw-r--r--drivers/rtc/rtc-vt8500.c366
-rw-r--r--drivers/s390/block/dasd_diag.c6
-rw-r--r--drivers/s390/char/sclp.c7
-rw-r--r--drivers/s390/kvm/kvm_virtio.c3
-rw-r--r--drivers/scsi/aacraid/linit.c3
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/pmcraid.c9
-rw-r--r--drivers/scsi/pmcraid.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c21
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/wd33c93.c7
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/amba-pl022.c35
-rw-r--r--drivers/spi/dw_spi.c202
-rw-r--r--drivers/spi/dw_spi.h2
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/spi/spi_nuc900.c2
-rw-r--r--drivers/spi/spi_s3c24xx.c2
-rw-r--r--drivers/spi/spi_sh.c2
-rw-r--r--drivers/spi/spi_tegra.c2
-rw-r--r--drivers/spi/xilinx_spi.c3
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/staging/generic_serial/rio/rioinit.c2
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c2
-rw-r--r--drivers/staging/zcache/zcache.c5
-rw-r--r--drivers/tty/cyclades.c3
-rw-r--r--drivers/tty/nozomi.c3
-rw-r--r--drivers/tty/serial/68328serial.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c3
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/usb/otg/twl6030-usb.c10
-rw-r--r--drivers/video/Kconfig42
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/amifb.c27
-rw-r--r--drivers/video/backlight/88pm860x_bl.c7
-rw-r--r--drivers/video/backlight/adp5520_bl.c6
-rw-r--r--drivers/video/da8xx-fb.c4
-rw-r--r--drivers/video/efifb.c4
-rw-r--r--drivers/video/mb862xx/Makefile5
-rw-r--r--drivers/video/mb862xx/mb862xx-i2c.c178
-rw-r--r--drivers/video/mb862xx/mb862xx_reg.h58
-rw-r--r--drivers/video/mb862xx/mb862xxfb.h36
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c (renamed from drivers/video/mb862xx/mb862xxfb.c)153
-rw-r--r--drivers/video/omap/Makefile1
-rw-r--r--drivers/video/omap/dispc.c4
-rw-r--r--drivers/video/omap/lcd_omap2evm.c192
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/omap/rfbi.c2
-rw-r--r--drivers/video/omap2/Makefile4
-rw-r--r--drivers/video/omap2/displays/Kconfig9
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c2
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c57
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c2
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c6
-rw-r--r--drivers/video/omap2/displays/panel-taal.c536
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/video/omap2/dss/Kconfig33
-rw-r--r--drivers/video/omap2/dss/core.c15
-rw-r--r--drivers/video/omap2/dss/dispc.c1552
-rw-r--r--drivers/video/omap2/dss/dispc.h691
-rw-r--r--drivers/video/omap2/dss/display.c46
-rw-r--r--drivers/video/omap2/dss/dpi.c113
-rw-r--r--drivers/video/omap2/dss/dsi.c2367
-rw-r--r--drivers/video/omap2/dss/dss.c118
-rw-r--r--drivers/video/omap2/dss/dss.h98
-rw-r--r--drivers/video/omap2/dss/dss_features.c105
-rw-r--r--drivers/video/omap2/dss/dss_features.h39
-rw-r--r--drivers/video/omap2/dss/hdmi.c461
-rw-r--r--drivers/video/omap2/dss/hdmi.h222
-rw-r--r--drivers/video/omap2/dss/hdmi_omap4_panel.c2
-rw-r--r--drivers/video/omap2/dss/manager.c14
-rw-r--r--drivers/video/omap2/dss/overlay.c43
-rw-r--r--drivers/video/omap2/dss/rfbi.c176
-rw-r--r--drivers/video/omap2/dss/sdi.c2
-rw-r--r--drivers/video/omap2/dss/venc.c23
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c14
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c231
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c23
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h8
-rw-r--r--drivers/video/s3c-fb.c121
-rw-r--r--drivers/video/s3c2410fb.c8
-rw-r--r--drivers/video/s3fb.c209
-rw-r--r--drivers/video/savage/savagefb-i2c.c2
-rw-r--r--drivers/video/savage/savagefb.h8
-rw-r--r--drivers/video/savage/savagefb_driver.c15
-rw-r--r--drivers/video/sh7760fb.c6
-rw-r--r--drivers/video/sh_mobile_hdmi.c10
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c126
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h1
-rw-r--r--drivers/video/sh_mobile_meram.c567
-rw-r--r--drivers/video/sh_mobile_meram.h41
-rw-r--r--drivers/video/sm501fb.c24
-rw-r--r--drivers/video/tmiofb.c12
-rw-r--r--drivers/video/udlfb.c20
-rw-r--r--drivers/video/via/via-gpio.c49
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/ds1wm.c333
-rw-r--r--drivers/w1/slaves/Kconfig20
-rw-r--r--drivers/w1/slaves/Makefile2
-rw-r--r--drivers/w1/slaves/w1_ds2408.c402
-rw-r--r--drivers/w1/slaves/w1_ds2780.c217
-rw-r--r--drivers/w1/slaves/w1_ds2780.h129
-rw-r--r--drivers/w1/w1.c12
-rw-r--r--drivers/w1/w1.h6
-rw-r--r--drivers/w1/w1_family.h2
-rw-r--r--drivers/w1/w1_io.c26
-rw-r--r--drivers/w1/w1_netlink.c5
-rw-r--r--drivers/watchdog/rdc321x_wdt.c3
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/tmem.c264
-rw-r--r--fs/9p/Kconfig5
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/9p/vfs_inode_dotl.c11
-rw-r--r--fs/Kconfig19
-rw-r--r--fs/affs/namei.c5
-rw-r--r--fs/afs/dir.c5
-rw-r--r--fs/autofs4/root.c2
-rw-r--r--fs/bfs/dir.c3
-rw-r--r--fs/block_dev.c17
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/buffer.c64
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c61
-rw-r--r--fs/ceph/dir.c7
-rw-r--r--fs/ceph/export.c25
-rw-r--r--fs/ceph/mds_client.c7
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/coda/dir.c5
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/dcache.c8
-rw-r--r--fs/dlm/main.c2
-rw-r--r--fs/drop_caches.c5
-rw-r--r--fs/ecryptfs/inode.c5
-rw-r--r--fs/exec.c51
-rw-r--r--fs/ext3/super.c2
-rw-r--r--fs/ext4/Makefile3
-rw-r--r--fs/ext4/balloc.c146
-rw-r--r--fs/ext4/ext4.h127
-rw-r--r--fs/ext4/ext4_jbd2.c14
-rw-r--r--fs/ext4/ext4_jbd2.h5
-rw-r--r--fs/ext4/extents.c1410
-rw-r--r--fs/ext4/file.c1
-rw-r--r--fs/ext4/fsync.c25
-rw-r--r--fs/ext4/inode.c114
-rw-r--r--fs/ext4/mballoc.c459
-rw-r--r--fs/ext4/mballoc.h6
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/mmp.c351
-rw-r--r--fs/ext4/move_extent.c3
-rw-r--r--fs/ext4/namei.c82
-rw-r--r--fs/ext4/page-io.c39
-rw-r--r--fs/ext4/super.c206
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--fs/fat/namei_msdos.c5
-rw-r--r--fs/fat/namei_vfat.c5
-rw-r--r--fs/fscache/operation.c10
-rw-r--r--fs/fscache/page.c13
-rw-r--r--fs/fuse/dir.c6
-rw-r--r--fs/gfs2/glock.c5
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/quota.c12
-rw-r--r--fs/gfs2/quota.h4
-rw-r--r--fs/hfs/dir.c6
-rw-r--r--fs/hfsplus/dir.c8
-rw-r--r--fs/hostfs/hostfs_kern.c5
-rw-r--r--fs/hpfs/namei.c9
-rw-r--r--fs/hugetlbfs/inode.c7
-rw-r--r--fs/inode.c9
-rw-r--r--fs/jbd2/commit.c22
-rw-r--r--fs/jbd2/journal.c58
-rw-r--r--fs/jbd2/transaction.c22
-rw-r--r--fs/jffs2/dir.c5
-rw-r--r--fs/jfs/namei.c5
-rw-r--r--fs/logfs/dir.c5
-rw-r--r--fs/mbcache.c10
-rw-r--r--fs/minix/namei.c5
-rw-r--r--fs/mpage.c7
-rw-r--r--fs/namei.c380
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ncpfs/dir.c5
-rw-r--r--fs/ncpfs/inode.c4
-rw-r--r--fs/ncpfs/mmap.c2
-rw-r--r--fs/nfs/dir.c5
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nilfs2/namei.c5
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/omfs/dir.c11
-rw-r--r--fs/partitions/check.c8
-rw-r--r--fs/partitions/efi.c9
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/array.c4
-rw-r--r--fs/proc/base.c103
-rw-r--r--fs/proc/generic.c1
-rw-r--r--fs/proc/inode.c7
-rw-r--r--fs/proc/internal.h26
-rw-r--r--fs/proc/namespaces.c198
-rw-r--r--fs/proc/stat.c6
-rw-r--r--fs/proc/task_mmu.c233
-rw-r--r--fs/proc/vmcore.c52
-rw-r--r--fs/quota/dquot.c5
-rw-r--r--fs/reiserfs/namei.c5
-rw-r--r--fs/reiserfs/xattr.c1
-rw-r--r--fs/splice.c33
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--fs/squashfs/cache.c31
-rw-r--r--fs/squashfs/decompressor.c2
-rw-r--r--fs/squashfs/decompressor.h2
-rw-r--r--fs/squashfs/dir.c2
-rw-r--r--fs/squashfs/export.c42
-rw-r--r--fs/squashfs/file.c2
-rw-r--r--fs/squashfs/fragment.c37
-rw-r--r--fs/squashfs/id.c42
-rw-r--r--fs/squashfs/inode.c2
-rw-r--r--fs/squashfs/namei.c2
-rw-r--r--fs/squashfs/squashfs.h10
-rw-r--r--fs/squashfs/squashfs_fs.h2
-rw-r--r--fs/squashfs/squashfs_fs_i.h2
-rw-r--r--fs/squashfs/squashfs_fs_sb.h2
-rw-r--r--fs/squashfs/super.c112
-rw-r--r--fs/squashfs/symlink.c2
-rw-r--r--fs/squashfs/xattr.c2
-rw-r--r--fs/squashfs/xattr.h3
-rw-r--r--fs/squashfs/xattr_id.c47
-rw-r--r--fs/squashfs/xz_wrapper.c2
-rw-r--r--fs/squashfs/zlib_wrapper.c2
-rw-r--r--fs/super.c3
-rw-r--r--fs/sysv/namei.c5
-rw-r--r--fs/ubifs/dir.c5
-rw-r--r--fs/udf/namei.c5
-rw-r--r--fs/ufs/balloc.c9
-rw-r--r--fs/ufs/namei.c5
-rw-r--r--fs/ufs/truncate.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_discard.c29
-rw-r--r--fs/xfs/linux-2.6/xfs_discard.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c18
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c5
-rw-r--r--fs/xfs/quota/xfs_qm.c6
-rw-r--r--fs/xfs/xfs_ag.h3
-rw-r--r--fs/xfs/xfs_alloc.c35
-rw-r--r--fs/xfs/xfs_alloc.h5
-rw-r--r--fs/xfs/xfs_alloc_btree.c3
-rw-r--r--fs/xfs/xfs_bmap.c549
-rw-r--r--fs/xfs/xfs_bmap.h2
-rw-r--r--fs/xfs/xfs_inode.c15
-rw-r--r--fs/xfs/xfs_inode.h1
-rw-r--r--fs/xfs/xfs_log_cil.c13
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_trans.c2
-rw-r--r--include/asm-generic/audit_change_attr.h4
-rw-r--r--include/asm-generic/audit_dir_write.h14
-rw-r--r--include/asm-generic/audit_read.h5
-rw-r--r--include/asm-generic/audit_write.h2
-rw-r--r--include/asm-generic/bitops/find.h4
-rw-r--r--include/asm-generic/bitops/le.h7
-rw-r--r--include/asm-generic/bug.h37
-rw-r--r--include/asm-generic/cacheflush.h5
-rw-r--r--include/asm-generic/ptrace.h74
-rw-r--r--include/asm-generic/resource.h2
-rw-r--r--include/asm-generic/tlb.h156
-rw-r--r--include/asm-generic/unistd.h221
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/basic_mmio_gpio.h56
-rw-r--r--include/linux/bitmap.h5
-rw-r--r--include/linux/bitops.h4
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h15
-rw-r--r--include/linux/bootmem.h25
-rw-r--r--include/linux/buffer_head.h16
-rw-r--r--include/linux/c2port.h3
-rw-r--r--include/linux/ceph/ceph_fs.h1
-rw-r--r--include/linux/cgroup.h13
-rw-r--r--include/linux/cgroup_subsys.h6
-rw-r--r--include/linux/cleancache.h122
-rw-r--r--include/linux/compat.h236
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/compiler-gcc4.h2
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/crash_dump.h5
-rw-r--r--include/linux/drbd.h10
-rw-r--r--include/linux/drbd_tag_magic.h2
-rw-r--r--include/linux/flex_array.h2
-rw-r--r--include/linux/fs.h12
-rw-r--r--include/linux/fscache-cache.h12
-rw-r--r--include/linux/genalloc.h25
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfp.h9
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--include/linux/hugetlb.h7
-rw-r--r--include/linux/hugetlb_inline.h2
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/i2c/twl.h54
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_vlan.h5
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/input/pmic8xxx-keypad.h52
-rw-r--r--include/linux/input/pmic8xxx-pwrkey.h31
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--include/linux/jbd2.h8
-rw-r--r--include/linux/kernel.h39
-rw-r--r--include/linux/leds-pca9532.h3
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--include/linux/lru_cache.h12
-rw-r--r--include/linux/memblock.h9
-rw-r--r--include/linux/memcontrol.h24
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mfd/88pm860x.h6
-rw-r--r--include/linux/mfd/abx500.h8
-rw-r--r--include/linux/mfd/asic3.h23
-rw-r--r--include/linux/mfd/core.h23
-rw-r--r--include/linux/mfd/db5500-prcmu.h45
-rw-r--r--include/linux/mfd/db8500-prcmu.h978
-rw-r--r--include/linux/mfd/max8997-private.h4
-rw-r--r--include/linux/mfd/pm8xxx/core.h81
-rw-r--r--include/linux/mfd/pm8xxx/irq.h59
-rw-r--r--include/linux/mfd/pm8xxx/pm8921.h31
-rw-r--r--include/linux/mfd/tmio.h17
-rw-r--r--include/linux/mfd/tps65910.h800
-rw-r--r--include/linux/mfd/twl4030-codec.h2
-rw-r--r--include/linux/mfd/wm831x/core.h26
-rw-r--r--include/linux/mfd/wm831x/pdata.h4
-rw-r--r--include/linux/mm.h137
-rw-r--r--include/linux/mm_types.h25
-rw-r--r--include/linux/mmc/Kbuild1
-rw-r--r--include/linux/mmc/card.h189
-rw-r--r--include/linux/mmc/core.h5
-rw-r--r--include/linux/mmc/host.h49
-rw-r--r--include/linux/mmc/ioctl.h54
-rw-r--r--include/linux/mmc/mmc.h18
-rw-r--r--include/linux/mmc/sd.h9
-rw-r--r--include/linux/mmc/sdhci.h15
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h4
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mutex.h9
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nsproxy.h9
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/pagemap.h15
-rw-r--r--include/linux/percpu_counter.h6
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/posix-timers.h1
-rw-r--r--include/linux/power/isp1704_charger.h29
-rw-r--r--include/linux/power/max8903_charger.h57
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/proc_fs.h40
-rw-r--r--include/linux/ptp_classify.h7
-rw-r--r--include/linux/ptp_clock.h84
-rw-r--r--include/linux/ptp_clock_kernel.h139
-rw-r--r--include/linux/regulator/db8500-prcmu.h45
-rw-r--r--include/linux/regulator/machine.h7
-rw-r--r--include/linux/rfkill-gpio.h43
-rw-r--r--include/linux/rmap.h29
-rw-r--r--include/linux/rtc.h8
-rw-r--r--include/linux/sched.h38
-rw-r--r--include/linux/seqlock.h3
-rw-r--r--include/linux/shmem_fs.h8
-rw-r--r--include/linux/smp.h10
-rw-r--r--include/linux/spi/spi.h8
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/vm_event_item.h64
-rw-r--r--include/linux/vmstat.h69
-rw-r--r--include/linux/xattr.h8
-rw-r--r--include/media/m5mols.h35
-rw-r--r--include/media/videobuf-dvb.h4
-rw-r--r--include/net/9p/9p.h13
-rw-r--r--include/net/9p/client.h2
-rw-r--r--include/net/9p/transport.h3
-rw-r--r--include/net/cfg80211.h8
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/rdma/Kbuild5
-rw-r--r--include/rdma/ib_user_cm.h1
-rw-r--r--include/rdma/rdma_cm.h19
-rw-r--r--include/rdma/rdma_netlink.h92
-rw-r--r--include/trace/events/gpio.h56
-rw-r--r--include/video/omap-panel-generic-dpi.h (renamed from arch/arm/plat-omap/include/plat/panel-generic-dpi.h)8
-rw-r--r--include/video/omap-panel-nokia-dsi.h (renamed from arch/arm/plat-omap/include/plat/nokia-dsi-panel.h)14
-rw-r--r--include/video/omapdss.h (renamed from arch/arm/plat-omap/include/plat/display.h)114
-rw-r--r--include/video/sh_mobile_lcdc.h3
-rw-r--r--include/video/sh_mobile_meram.h68
-rw-r--r--include/xen/interface/io/blkif.h13
-rw-r--r--include/xen/interface/xen.h22
-rw-r--r--init/Kconfig8
-rw-r--r--init/calibrate.c75
-rw-r--r--init/main.c3
-rw-r--r--ipc/namespace.c37
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c587
-rw-r--r--kernel/cgroup_freezer.c26
-rw-r--r--kernel/compat.c8
-rw-r--r--kernel/cpuset.c103
-rw-r--r--kernel/fork.c110
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/irq/proc.c55
-rw-r--r--kernel/mutex.c25
-rw-r--r--kernel/ns_cgroup.c118
-rw-r--r--kernel/nsproxy.c46
-rw-r--r--kernel/posix-timers.c25
-rw-r--r--kernel/printk.c87
-rw-r--r--kernel/profile.c16
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/sched.c38
-rw-r--r--kernel/signal.c6
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/utsname.c39
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Kconfig.debug20
-rw-r--r--lib/Makefile8
-rw-r--r--lib/audit.c2
-rw-r--r--lib/bitmap.c109
-rw-r--r--lib/find_last_bit.c4
-rw-r--r--lib/find_next_bit.c18
-rw-r--r--lib/flex_array.c51
-rw-r--r--lib/genalloc.c45
-rw-r--r--lib/kstrtox.c26
-rw-r--r--lib/lru_cache.c2
-rw-r--r--lib/show_mem.c2
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/Kconfig23
-rw-r--r--mm/Makefile1
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/cleancache.c244
-rw-r--r--mm/filemap.c83
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/fremap.c6
-rw-r--r--mm/huge_memory.c25
-rw-r--r--mm/hugetlb.c18
-rw-r--r--mm/init-mm.c1
-rw-r--r--mm/internal.h4
-rw-r--r--mm/ksm.c7
-rw-r--r--mm/memcontrol.c377
-rw-r--r--mm/memory-failure.c21
-rw-r--r--mm/memory.c444
-rw-r--r--mm/memory_hotplug.c21
-rw-r--r--mm/mempolicy.c164
-rw-r--r--mm/migrate.c17
-rw-r--r--mm/mlock.c8
-rw-r--r--mm/mmap.c129
-rw-r--r--mm/mremap.c5
-rw-r--r--mm/nobootmem.c23
-rw-r--r--mm/nommu.c108
-rw-r--r--mm/oom_kill.c36
-rw-r--r--mm/page_alloc.c125
-rw-r--r--mm/page_cgroup.c28
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/rmap.c172
-rw-r--r--mm/shmem.c331
-rw-r--r--mm/slub.c4
-rw-r--r--mm/swap.c52
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/util.c24
-rw-r--r--mm/vmalloc.c19
-rw-r--r--mm/vmscan.c184
-rw-r--r--mm/vmstat.c264
-rw-r--r--net/802/psnap.c1
-rw-r--r--net/8021q/vlan.h5
-rw-r--r--net/9p/Kconfig8
-rw-r--r--net/9p/client.c30
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/trans_fd.c7
-rw-r--r--net/9p/trans_rdma.c3
-rw-r--r--net/9p/util.c2
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/atm/proc.c4
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/can/bcm.c6
-rw-r--r--net/ceph/messenger.c82
-rw-r--r--net/ceph/osd_client.c19
-rw-r--r--net/ceph/osdmap.c13
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/fib_rules.c1
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/net_namespace.c65
-rw-r--r--net/core/rtnetlink.c14
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c22
-rw-r--r--net/mac80211/mesh.h7
-rw-r--r--net/mac80211/mesh_pathtbl.c204
-rw-r--r--net/mac80211/scan.c5
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/iw.c2
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/rdma_transport.c3
-rw-r--r--net/rfkill/Kconfig9
-rw-r--r--net/rfkill/Makefile1
-rw-r--r--net/rfkill/rfkill-gpio.c227
-rw-r--r--net/sched/sch_sfq.c22
-rw-r--r--net/sctp/associola.c16
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sunrpc/auth.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c3
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/nl80211.c12
-rw-r--r--net/wireless/sme.c19
-rw-r--r--net/wireless/util.c2
-rwxr-xr-xscripts/checkpatch.pl13
-rwxr-xr-xscripts/checkversion.pl1
-rw-r--r--scripts/export_report.pl26
-rw-r--r--scripts/kconfig/Makefile38
-rw-r--r--scripts/kconfig/confdata.c20
-rw-r--r--scripts/kconfig/expr.h4
-rw-r--r--scripts/kconfig/gconf.c12
-rw-r--r--scripts/kconfig/lex.zconf.c_shipped33
-rw-r--r--scripts/kconfig/nconf.c14
-rw-r--r--scripts/kconfig/qconf.cc5
-rw-r--r--scripts/kconfig/zconf.l33
-rw-r--r--scripts/package/Makefile4
-rwxr-xr-xscripts/package/mkspec19
-rwxr-xr-xscripts/patch-kernel2
-rw-r--r--security/device_cgroup.c3
-rw-r--r--security/keys/process_keys.c1
-rw-r--r--security/selinux/avc.c12
-rw-r--r--security/selinux/ss/services.c3
-rw-r--r--sound/core/control.c3
-rw-r--r--sound/core/init.c3
-rw-r--r--sound/core/oss/linear.c3
-rw-r--r--sound/core/pcm_lib.c17
-rw-r--r--sound/core/pcm_native.c21
-rw-r--r--sound/core/seq/seq_queue.c2
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_eld.c21
-rw-r--r--sound/pci/hda/hda_intel.c206
-rw-r--r--sound/pci/hda/patch_analog.c1
-rw-r--r--sound/pci/hda/patch_conexant.c42
-rw-r--r--sound/pci/hda/patch_hdmi.c123
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c2
-rw-r--r--sound/soc/codecs/cq93vc.c3
-rw-r--r--sound/soc/codecs/twl4030.c6
-rw-r--r--sound/soc/codecs/wl1273.c3
-rw-r--r--sound/soc/codecs/wm1250-ev1.c2
-rw-r--r--sound/soc/codecs/wm8400.c2
-rw-r--r--sound/soc/codecs/wm8731.c2
-rw-r--r--sound/soc/codecs/wm8915.c1
-rw-r--r--sound/soc/davinci/davinci-vcif.c2
-rw-r--r--sound/soc/omap/Kconfig8
-rw-r--r--sound/soc/omap/Makefile1
-rw-r--r--sound/soc/omap/omap2evm.c139
-rw-r--r--sound/soc/pxa/raumfeld.c92
-rw-r--r--sound/soc/soc-core.c8
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/usb/card.c17
-rw-r--r--sound/usb/mixer.c32
-rw-r--r--sound/usb/mixer.h14
-rw-r--r--sound/usb/mixer_quirks.c70
-rw-r--r--sound/usb/quirks-table.h4
-rw-r--r--sound/usb/quirks.c18
-rw-r--r--sound/usb/usbaudio.h1
1528 files changed, 75241 insertions, 21320 deletions
diff --git a/.mailmap b/.mailmap
index 5a6dd592eedc..353ad5607156 100644
--- a/.mailmap
+++ b/.mailmap
@@ -32,6 +32,7 @@ Brian Avery <b.avery@hp.com>
32Brian King <brking@us.ibm.com> 32Brian King <brking@us.ibm.com>
33Christoph Hellwig <hch@lst.de> 33Christoph Hellwig <hch@lst.de>
34Corey Minyard <minyard@acm.org> 34Corey Minyard <minyard@acm.org>
35Damian Hobson-Garcia <dhobsong@igel.co.jp>
35David Brownell <david-b@pacbell.net> 36David Brownell <david-b@pacbell.net>
36David Woodhouse <dwmw2@shinybook.infradead.org> 37David Woodhouse <dwmw2@shinybook.infradead.org>
37Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 38Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
diff --git a/CREDITS b/CREDITS
index 95c469c610bc..a7ea8e343836 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2943,6 +2943,10 @@ S: Kasarmikatu 11 A4
2943S: 70110 Kuopio 2943S: 70110 Kuopio
2944S: Finland 2944S: Finland
2945 2945
2946N: Tobias Ringström
2947E: tori@unhappy.mine.nu
2948D: Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver
2949
2946N: Luca Risolia 2950N: Luca Risolia
2947E: luca.risolia@studio.unibo.it 2951E: luca.risolia@studio.unibo.it
2948P: 1024D/FCE635A4 88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4 2952P: 1024D/FCE635A4 88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4
@@ -3913,6 +3917,10 @@ S: Flandernstrasse 101
3913S: D-73732 Esslingen 3917S: D-73732 Esslingen
3914S: Germany 3918S: Germany
3915 3919
3920N: Roman Zippel
3921E: zippel@linux-m68k.org
3922D: AFFS and HFS filesystems, m68k maintainer, new kernel configuration in 2.5
3923
3916N: Leonard N. Zubkoff 3924N: Leonard N. Zubkoff
3917W: http://www.dandelion.com/Linux/ 3925W: http://www.dandelion.com/Linux/
3918D: BusLogic SCSI driver 3926D: BusLogic SCSI driver
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 4873c759d535..c1eb41cb9876 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -142,3 +142,67 @@ Description:
142 with the previous I/O request are enabled. When set to 2, 142 with the previous I/O request are enabled. When set to 2,
143 all merge tries are disabled. The default value is 0 - 143 all merge tries are disabled. The default value is 0 -
144 which enables all types of merge tries. 144 which enables all types of merge tries.
145
146What: /sys/block/<disk>/discard_alignment
147Date: May 2011
148Contact: Martin K. Petersen <martin.petersen@oracle.com>
149Description:
150 Devices that support discard functionality may
151 internally allocate space in units that are bigger than
152 the exported logical block size. The discard_alignment
153 parameter indicates how many bytes the beginning of the
154 device is offset from the internal allocation unit's
155 natural alignment.
156
157What: /sys/block/<disk>/<partition>/discard_alignment
158Date: May 2011
159Contact: Martin K. Petersen <martin.petersen@oracle.com>
160Description:
161 Devices that support discard functionality may
162 internally allocate space in units that are bigger than
163 the exported logical block size. The discard_alignment
164 parameter indicates how many bytes the beginning of the
165 partition is offset from the internal allocation unit's
166 natural alignment.
167
168What: /sys/block/<disk>/queue/discard_granularity
169Date: May 2011
170Contact: Martin K. Petersen <martin.petersen@oracle.com>
171Description:
172 Devices that support discard functionality may
173 internally allocate space using units that are bigger
174 than the logical block size. The discard_granularity
175 parameter indicates the size of the internal allocation
176 unit in bytes if reported by the device. Otherwise the
177 discard_granularity will be set to match the device's
178 physical block size. A discard_granularity of 0 means
179 that the device does not support discard functionality.
180
181What: /sys/block/<disk>/queue/discard_max_bytes
182Date: May 2011
183Contact: Martin K. Petersen <martin.petersen@oracle.com>
184Description:
185 Devices that support discard functionality may have
186 internal limits on the number of bytes that can be
187 trimmed or unmapped in a single operation. Some storage
188 protocols also have inherent limits on the number of
189 blocks that can be described in a single command. The
190 discard_max_bytes parameter is set by the device driver
191 to the maximum number of bytes that can be discarded in
192 a single operation. Discard requests issued to the
193 device must not exceed this limit. A discard_max_bytes
194 value of 0 means that the device does not support
195 discard functionality.
196
197What: /sys/block/<disk>/queue/discard_zeroes_data
198Date: May 2011
199Contact: Martin K. Petersen <martin.petersen@oracle.com>
200Description:
201 Devices that support discard functionality may return
202 stale or random data when a previously discarded block
203 is read back. This can cause problems if the filesystem
204 expects discarded blocks to be explicitly cleared. If a
205 device reports that it deterministically returns zeroes
206 when a discarded area is read the discard_zeroes_data
207 parameter will be set to one. Otherwise it will be 0 and
208 the result of reading a discarded area is undefined.
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cleancache b/Documentation/ABI/testing/sysfs-kernel-mm-cleancache
new file mode 100644
index 000000000000..662ae646ea12
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-cleancache
@@ -0,0 +1,11 @@
1What: /sys/kernel/mm/cleancache/
2Date: April 2011
3Contact: Dan Magenheimer <dan.magenheimer@oracle.com>
4Description:
5 /sys/kernel/mm/cleancache/ contains a number of files which
6 record a count of various cleancache operations
7 (sum across all filesystems):
8 succ_gets
9 failed_gets
10 puts
11 flushes
diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp
new file mode 100644
index 000000000000..d40d2b550502
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-ptp
@@ -0,0 +1,98 @@
1What: /sys/class/ptp/
2Date: September 2010
3Contact: Richard Cochran <richardcochran@gmail.com>
4Description:
5 This directory contains files and directories
6 providing a standardized interface to the ancillary
7 features of PTP hardware clocks.
8
9What: /sys/class/ptp/ptpN/
10Date: September 2010
11Contact: Richard Cochran <richardcochran@gmail.com>
12Description:
13 This directory contains the attributes of the Nth PTP
14 hardware clock registered into the PTP class driver
15 subsystem.
16
17What: /sys/class/ptp/ptpN/clock_name
18Date: September 2010
19Contact: Richard Cochran <richardcochran@gmail.com>
20Description:
21 This file contains the name of the PTP hardware clock
22 as a human readable string.
23
24What: /sys/class/ptp/ptpN/max_adjustment
25Date: September 2010
26Contact: Richard Cochran <richardcochran@gmail.com>
27Description:
28 This file contains the PTP hardware clock's maximum
29 frequency adjustment value (a positive integer) in
30 parts per billion.
31
32What: /sys/class/ptp/ptpN/n_alarms
33Date: September 2010
34Contact: Richard Cochran <richardcochran@gmail.com>
35Description:
36 This file contains the number of periodic or one shot
37 alarms offer by the PTP hardware clock.
38
39What: /sys/class/ptp/ptpN/n_external_timestamps
40Date: September 2010
41Contact: Richard Cochran <richardcochran@gmail.com>
42Description:
43 This file contains the number of external timestamp
44 channels offered by the PTP hardware clock.
45
46What: /sys/class/ptp/ptpN/n_periodic_outputs
47Date: September 2010
48Contact: Richard Cochran <richardcochran@gmail.com>
49Description:
50 This file contains the number of programmable periodic
51 output channels offered by the PTP hardware clock.
52
53What: /sys/class/ptp/ptpN/pps_avaiable
54Date: September 2010
55Contact: Richard Cochran <richardcochran@gmail.com>
56Description:
57 This file indicates whether the PTP hardware clock
58 supports a Pulse Per Second to the host CPU. Reading
59 "1" means that the PPS is supported, while "0" means
60 not supported.
61
62What: /sys/class/ptp/ptpN/extts_enable
63Date: September 2010
64Contact: Richard Cochran <richardcochran@gmail.com>
65Description:
66 This write-only file enables or disables external
67 timestamps. To enable external timestamps, write the
68 channel index followed by a "1" into the file.
69 To disable external timestamps, write the channel
70 index followed by a "0" into the file.
71
72What: /sys/class/ptp/ptpN/fifo
73Date: September 2010
74Contact: Richard Cochran <richardcochran@gmail.com>
75Description:
76 This file provides timestamps on external events, in
77 the form of three integers: channel index, seconds,
78 and nanoseconds.
79
80What: /sys/class/ptp/ptpN/period
81Date: September 2010
82Contact: Richard Cochran <richardcochran@gmail.com>
83Description:
84 This write-only file enables or disables periodic
85 outputs. To enable a periodic output, write five
86 integers into the file: channel index, start time
87 seconds, start time nanoseconds, period seconds, and
88 period nanoseconds. To disable a periodic output, set
89 all the seconds and nanoseconds values to zero.
90
91What: /sys/class/ptp/ptpN/pps_enable
92Date: September 2010
93Contact: Richard Cochran <richardcochran@gmail.com>
94Description:
95 This write-only file enables or disables delivery of
96 PPS events to the Linux PPS subsystem. To enable PPS
97 events, write a "1" into the file. To disable events,
98 write a "0" into the file.
diff --git a/Documentation/DocBook/dvb/dvbproperty.xml b/Documentation/DocBook/dvb/dvbproperty.xml
index 52d5e3c7cf6c..b5365f61d69b 100644
--- a/Documentation/DocBook/dvb/dvbproperty.xml
+++ b/Documentation/DocBook/dvb/dvbproperty.xml
@@ -141,13 +141,15 @@ struct dtv_properties {
141 </row></tbody></tgroup></informaltable> 141 </row></tbody></tgroup></informaltable>
142</section> 142</section>
143 143
144<section>
145 <title>Property types</title>
144<para> 146<para>
145On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY</link>/<link linkend="FE_SET_PROPERTY">FE_SET_PROPERTY</link>, 147On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY</link>/<link linkend="FE_SET_PROPERTY">FE_SET_PROPERTY</link>,
146the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to 148the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to
147get/set up to 64 properties. The actual meaning of each property is described on the next sections. 149get/set up to 64 properties. The actual meaning of each property is described on the next sections.
148</para> 150</para>
149 151
150<para>The Available frontend property types are:</para> 152<para>The available frontend property types are:</para>
151<programlisting> 153<programlisting>
152#define DTV_UNDEFINED 0 154#define DTV_UNDEFINED 0
153#define DTV_TUNE 1 155#define DTV_TUNE 1
@@ -193,6 +195,7 @@ get/set up to 64 properties. The actual meaning of each property is described on
193#define DTV_ISDBT_LAYER_ENABLED 41 195#define DTV_ISDBT_LAYER_ENABLED 41
194#define DTV_ISDBS_TS_ID 42 196#define DTV_ISDBS_TS_ID 42
195</programlisting> 197</programlisting>
198</section>
196 199
197<section id="fe_property_common"> 200<section id="fe_property_common">
198 <title>Parameters that are common to all Digital TV standards</title> 201 <title>Parameters that are common to all Digital TV standards</title>
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index c8abb23ef1e7..e5fe09430fd9 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -293,6 +293,7 @@
293<!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml"> 293<!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
294<!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml"> 294<!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
295<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml"> 295<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
296<!ENTITY sub-srggb12 SYSTEM "v4l/pixfmt-srggb12.xml">
296<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml"> 297<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
297<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml"> 298<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml">
298<!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml"> 299<!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml">
@@ -373,9 +374,9 @@
373<!ENTITY sub-media-indices SYSTEM "media-indices.tmpl"> 374<!ENTITY sub-media-indices SYSTEM "media-indices.tmpl">
374 375
375<!ENTITY sub-media-controller SYSTEM "v4l/media-controller.xml"> 376<!ENTITY sub-media-controller SYSTEM "v4l/media-controller.xml">
376<!ENTITY sub-media-open SYSTEM "v4l/media-func-open.xml"> 377<!ENTITY sub-media-func-open SYSTEM "v4l/media-func-open.xml">
377<!ENTITY sub-media-close SYSTEM "v4l/media-func-close.xml"> 378<!ENTITY sub-media-func-close SYSTEM "v4l/media-func-close.xml">
378<!ENTITY sub-media-ioctl SYSTEM "v4l/media-func-ioctl.xml"> 379<!ENTITY sub-media-func-ioctl SYSTEM "v4l/media-func-ioctl.xml">
379<!ENTITY sub-media-ioc-device-info SYSTEM "v4l/media-ioc-device-info.xml"> 380<!ENTITY sub-media-ioc-device-info SYSTEM "v4l/media-ioc-device-info.xml">
380<!ENTITY sub-media-ioc-enum-entities SYSTEM "v4l/media-ioc-enum-entities.xml"> 381<!ENTITY sub-media-ioc-enum-entities SYSTEM "v4l/media-ioc-enum-entities.xml">
381<!ENTITY sub-media-ioc-enum-links SYSTEM "v4l/media-ioc-enum-links.xml"> 382<!ENTITY sub-media-ioc-enum-links SYSTEM "v4l/media-ioc-enum-links.xml">
diff --git a/Documentation/DocBook/v4l/media-controller.xml b/Documentation/DocBook/v4l/media-controller.xml
index 2dc25e1d4089..873ac3a621f0 100644
--- a/Documentation/DocBook/v4l/media-controller.xml
+++ b/Documentation/DocBook/v4l/media-controller.xml
@@ -78,9 +78,9 @@
78<appendix id="media-user-func"> 78<appendix id="media-user-func">
79 <title>Function Reference</title> 79 <title>Function Reference</title>
80 <!-- Keep this alphabetically sorted. --> 80 <!-- Keep this alphabetically sorted. -->
81 &sub-media-open; 81 &sub-media-func-open;
82 &sub-media-close; 82 &sub-media-func-close;
83 &sub-media-ioctl; 83 &sub-media-func-ioctl;
84 <!-- All ioctls go here. --> 84 <!-- All ioctls go here. -->
85 &sub-media-ioc-device-info; 85 &sub-media-ioc-device-info;
86 &sub-media-ioc-enum-entities; 86 &sub-media-ioc-enum-entities;
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index dbfe3b08435f..deb660207f94 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -673,6 +673,7 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
673 &sub-srggb8; 673 &sub-srggb8;
674 &sub-sbggr16; 674 &sub-sbggr16;
675 &sub-srggb10; 675 &sub-srggb10;
676 &sub-srggb12;
676 </section> 677 </section>
677 678
678 <section id="yuv-formats"> 679 <section id="yuv-formats">
diff --git a/Documentation/DocBook/v4l/subdev-formats.xml b/Documentation/DocBook/v4l/subdev-formats.xml
index a26b10c07857..8d3409d2c632 100644
--- a/Documentation/DocBook/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/v4l/subdev-formats.xml
@@ -2531,13 +2531,13 @@
2531 <constant>_JPEG</constant> prefix the format code is made of 2531 <constant>_JPEG</constant> prefix the format code is made of
2532 the following information. 2532 the following information.
2533 <itemizedlist> 2533 <itemizedlist>
2534 <listitem>The number of bus samples per entropy encoded byte.</listitem> 2534 <listitem><para>The number of bus samples per entropy encoded byte.</para></listitem>
2535 <listitem>The bus width.</listitem> 2535 <listitem><para>The bus width.</para></listitem>
2536 </itemizedlist> 2536 </itemizedlist>
2537 </para>
2537 2538
2538 <para>For instance, for a JPEG baseline process and an 8-bit bus width 2539 <para>For instance, for a JPEG baseline process and an 8-bit bus width
2539 the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>. 2540 the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>.
2540 </para>
2541 </para> 2541 </para>
2542 2542
2543 <para>The following table lists existing JPEG compressed formats.</para> 2543 <para>The following table lists existing JPEG compressed formats.</para>
diff --git a/Documentation/IRQ-affinity.txt b/Documentation/IRQ-affinity.txt
index b4a615b78403..7890fae18529 100644
--- a/Documentation/IRQ-affinity.txt
+++ b/Documentation/IRQ-affinity.txt
@@ -4,10 +4,11 @@ ChangeLog:
4 4
5SMP IRQ affinity 5SMP IRQ affinity
6 6
7/proc/irq/IRQ#/smp_affinity specifies which target CPUs are permitted 7/proc/irq/IRQ#/smp_affinity and /proc/irq/IRQ#/smp_affinity_list specify
8for a given IRQ source. It's a bitmask of allowed CPUs. It's not allowed 8which target CPUs are permitted for a given IRQ source. It's a bitmask
9to turn off all CPUs, and if an IRQ controller does not support IRQ 9(smp_affinity) or cpu list (smp_affinity_list) of allowed CPUs. It's not
10affinity then the value will not change from the default 0xffffffff. 10allowed to turn off all CPUs, and if an IRQ controller does not support
11IRQ affinity then the value will not change from the default of all cpus.
11 12
12/proc/irq/default_smp_affinity specifies default affinity mask that applies 13/proc/irq/default_smp_affinity specifies default affinity mask that applies
13to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask 14to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
@@ -54,3 +55,11 @@ round-trip min/avg/max = 0.1/0.5/585.4 ms
54This time around IRQ44 was delivered only to the last four processors. 55This time around IRQ44 was delivered only to the last four processors.
55i.e counters for the CPU0-3 did not change. 56i.e counters for the CPU0-3 did not change.
56 57
58Here is an example of limiting that same irq (44) to cpus 1024 to 1031:
59
60[root@moon 44]# echo 1024-1031 > smp_affinity
61[root@moon 44]# cat smp_affinity
621024-1031
63
64Note that to do this with a bitmask would require 32 bitmasks of zero
65to follow the pertinent one.
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index e9c77788a39d..f6318f6d7baf 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -177,6 +177,8 @@ static int get_family_id(int sd)
177 rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY, 177 rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
178 CTRL_ATTR_FAMILY_NAME, (void *)name, 178 CTRL_ATTR_FAMILY_NAME, (void *)name,
179 strlen(TASKSTATS_GENL_NAME)+1); 179 strlen(TASKSTATS_GENL_NAME)+1);
180 if (rc < 0)
181 return 0; /* sendto() failure? */
180 182
181 rep_len = recv(sd, &ans, sizeof(ans), 0); 183 rep_len = recv(sd, &ans, sizeof(ans), 0);
182 if (ans.n.nlmsg_type == NLMSG_ERROR || 184 if (ans.n.nlmsg_type == NLMSG_ERROR ||
@@ -191,30 +193,37 @@ static int get_family_id(int sd)
191 return id; 193 return id;
192} 194}
193 195
196#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
197
194static void print_delayacct(struct taskstats *t) 198static void print_delayacct(struct taskstats *t)
195{ 199{
196 printf("\n\nCPU %15s%15s%15s%15s\n" 200 printf("\n\nCPU %15s%15s%15s%15s%15s\n"
197 " %15llu%15llu%15llu%15llu\n" 201 " %15llu%15llu%15llu%15llu%15.3fms\n"
198 "IO %15s%15s\n" 202 "IO %15s%15s%15s\n"
199 " %15llu%15llu\n" 203 " %15llu%15llu%15llums\n"
200 "SWAP %15s%15s\n" 204 "SWAP %15s%15s%15s\n"
201 " %15llu%15llu\n" 205 " %15llu%15llu%15llums\n"
202 "RECLAIM %12s%15s\n" 206 "RECLAIM %12s%15s%15s\n"
203 " %15llu%15llu\n", 207 " %15llu%15llu%15llums\n",
204 "count", "real total", "virtual total", "delay total", 208 "count", "real total", "virtual total",
209 "delay total", "delay average",
205 (unsigned long long)t->cpu_count, 210 (unsigned long long)t->cpu_count,
206 (unsigned long long)t->cpu_run_real_total, 211 (unsigned long long)t->cpu_run_real_total,
207 (unsigned long long)t->cpu_run_virtual_total, 212 (unsigned long long)t->cpu_run_virtual_total,
208 (unsigned long long)t->cpu_delay_total, 213 (unsigned long long)t->cpu_delay_total,
209 "count", "delay total", 214 average_ms((double)t->cpu_delay_total, t->cpu_count),
215 "count", "delay total", "delay average",
210 (unsigned long long)t->blkio_count, 216 (unsigned long long)t->blkio_count,
211 (unsigned long long)t->blkio_delay_total, 217 (unsigned long long)t->blkio_delay_total,
212 "count", "delay total", 218 average_ms(t->blkio_delay_total, t->blkio_count),
219 "count", "delay total", "delay average",
213 (unsigned long long)t->swapin_count, 220 (unsigned long long)t->swapin_count,
214 (unsigned long long)t->swapin_delay_total, 221 (unsigned long long)t->swapin_delay_total,
215 "count", "delay total", 222 average_ms(t->swapin_delay_total, t->swapin_count),
223 "count", "delay total", "delay average",
216 (unsigned long long)t->freepages_count, 224 (unsigned long long)t->freepages_count,
217 (unsigned long long)t->freepages_delay_total); 225 (unsigned long long)t->freepages_delay_total,
226 average_ms(t->freepages_delay_total, t->freepages_count));
218} 227}
219 228
220static void task_context_switch_counts(struct taskstats *t) 229static void task_context_switch_counts(struct taskstats *t)
@@ -433,8 +442,6 @@ int main(int argc, char *argv[])
433 } 442 }
434 443
435 do { 444 do {
436 int i;
437
438 rep_len = recv(nl_sd, &msg, sizeof(msg), 0); 445 rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
439 PRINTF("received %d bytes\n", rep_len); 446 PRINTF("received %d bytes\n", rep_len);
440 447
@@ -459,7 +466,6 @@ int main(int argc, char *argv[])
459 466
460 na = (struct nlattr *) GENLMSG_DATA(&msg); 467 na = (struct nlattr *) GENLMSG_DATA(&msg);
461 len = 0; 468 len = 0;
462 i = 0;
463 while (len < rep_len) { 469 while (len < rep_len) {
464 len += NLA_ALIGN(na->nla_len); 470 len += NLA_ALIGN(na->nla_len);
465 switch (na->nla_type) { 471 switch (na->nla_type) {
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index ac4d47187122..3bd585b44927 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -12,7 +12,7 @@ Also, it should be made opaque such that any kind of cast to a normal
12C integer type will fail. Something like the following should 12C integer type will fail. Something like the following should
13suffice: 13suffice:
14 14
15 typedef struct { volatile int counter; } atomic_t; 15 typedef struct { int counter; } atomic_t;
16 16
17Historically, counter has been declared volatile. This is now discouraged. 17Historically, counter has been declared volatile. This is now discouraged.
18See Documentation/volatile-considered-harmful.txt for the complete rationale. 18See Documentation/volatile-considered-harmful.txt for the complete rationale.
diff --git a/Documentation/blockdev/cciss.txt b/Documentation/blockdev/cciss.txt
index 89698e8df7d4..c00c6a5ab21f 100644
--- a/Documentation/blockdev/cciss.txt
+++ b/Documentation/blockdev/cciss.txt
@@ -169,3 +169,18 @@ is issued which positions the tape to a known position. Typically you
169must rewind the tape (by issuing "mt -f /dev/st0 rewind" for example) 169must rewind the tape (by issuing "mt -f /dev/st0 rewind" for example)
170before i/o can proceed again to a tape drive which was reset. 170before i/o can proceed again to a tape drive which was reset.
171 171
172There is a cciss_tape_cmds module parameter which can be used to make cciss
173allocate more commands for use by tape drives. Ordinarily only a few commands
174(6) are allocated for tape drives because tape drives are slow and
175infrequently used and the primary purpose of Smart Array controllers is to
176act as a RAID controller for disk drives, so the vast majority of commands
177are allocated for disk devices. However, if you have more than a few tape
178drives attached to a smart array, the default number of commands may not be
179enought (for example, if you have 8 tape drives, you could only rewind 6
180at one time with the default number of commands.) The cciss_tape_cmds module
181parameter allows more commands (up to 16 more) to be allocated for use by
182tape drives. For example:
183
184 insmod cciss.ko cciss_tape_cmds=16
185
186Or, as a kernel boot parameter passed in via grub: cciss.cciss_tape_cmds=8
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 9164ae3b83bc..9b728dc17535 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -16,7 +16,7 @@ on all processors in the system. Don't let this scare you into
16thinking SMP cache/tlb flushing must be so inefficient, this is in 16thinking SMP cache/tlb flushing must be so inefficient, this is in
17fact an area where many optimizations are possible. For example, 17fact an area where many optimizations are possible. For example,
18if it can be proven that a user address space has never executed 18if it can be proven that a user address space has never executed
19on a cpu (see vma->cpu_vm_mask), one need not perform a flush 19on a cpu (see mm_cpumask()), one need not perform a flush
20for this address space on that cpu. 20for this address space on that cpu.
21 21
22First, the TLB flushing interfaces, since they are the simplest. The 22First, the TLB flushing interfaces, since they are the simplest. The
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index aedf1bd02fdd..0ed99f08f1f3 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -236,7 +236,8 @@ containing the following files describing that cgroup:
236 - cgroup.procs: list of tgids in the cgroup. This list is not 236 - cgroup.procs: list of tgids in the cgroup. This list is not
237 guaranteed to be sorted or free of duplicate tgids, and userspace 237 guaranteed to be sorted or free of duplicate tgids, and userspace
238 should sort/uniquify the list if this property is required. 238 should sort/uniquify the list if this property is required.
239 This is a read-only file, for now. 239 Writing a thread group id into this file moves all threads in that
240 group into this cgroup.
240 - notify_on_release flag: run the release agent on exit? 241 - notify_on_release flag: run the release agent on exit?
241 - release_agent: the path to use for release notifications (this file 242 - release_agent: the path to use for release notifications (this file
242 exists in the top cgroup only) 243 exists in the top cgroup only)
@@ -430,6 +431,12 @@ You can attach the current shell task by echoing 0:
430 431
431# echo 0 > tasks 432# echo 0 > tasks
432 433
434You can use the cgroup.procs file instead of the tasks file to move all
435threads in a threadgroup at once. Echoing the pid of any task in a
436threadgroup to cgroup.procs causes all tasks in that threadgroup to be
437be attached to the cgroup. Writing 0 to cgroup.procs moves all tasks
438in the writing task's threadgroup.
439
433Note: Since every task is always a member of exactly one cgroup in each 440Note: Since every task is always a member of exactly one cgroup in each
434mounted hierarchy, to remove a task from its current cgroup you must 441mounted hierarchy, to remove a task from its current cgroup you must
435move it into a new cgroup (possibly the root cgroup) by writing to the 442move it into a new cgroup (possibly the root cgroup) by writing to the
@@ -575,7 +582,7 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be
575called multiple times against a cgroup. 582called multiple times against a cgroup.
576 583
577int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 584int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
578 struct task_struct *task, bool threadgroup) 585 struct task_struct *task)
579(cgroup_mutex held by caller) 586(cgroup_mutex held by caller)
580 587
581Called prior to moving a task into a cgroup; if the subsystem 588Called prior to moving a task into a cgroup; if the subsystem
@@ -584,9 +591,14 @@ task is passed, then a successful result indicates that *any*
584unspecified task can be moved into the cgroup. Note that this isn't 591unspecified task can be moved into the cgroup. Note that this isn't
585called on a fork. If this method returns 0 (success) then this should 592called on a fork. If this method returns 0 (success) then this should
586remain valid while the caller holds cgroup_mutex and it is ensured that either 593remain valid while the caller holds cgroup_mutex and it is ensured that either
587attach() or cancel_attach() will be called in future. If threadgroup is 594attach() or cancel_attach() will be called in future.
588true, then a successful result indicates that all threads in the given 595
589thread's threadgroup can be moved together. 596int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk);
597(cgroup_mutex held by caller)
598
599As can_attach, but for operations that must be run once per task to be
600attached (possibly many when using cgroup_attach_proc). Called after
601can_attach.
590 602
591void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 603void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
592 struct task_struct *task, bool threadgroup) 604 struct task_struct *task, bool threadgroup)
@@ -598,15 +610,24 @@ function, so that the subsystem can implement a rollback. If not, not necessary.
598This will be called only about subsystems whose can_attach() operation have 610This will be called only about subsystems whose can_attach() operation have
599succeeded. 611succeeded.
600 612
613void pre_attach(struct cgroup *cgrp);
614(cgroup_mutex held by caller)
615
616For any non-per-thread attachment work that needs to happen before
617attach_task. Needed by cpuset.
618
601void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 619void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
602 struct cgroup *old_cgrp, struct task_struct *task, 620 struct cgroup *old_cgrp, struct task_struct *task)
603 bool threadgroup)
604(cgroup_mutex held by caller) 621(cgroup_mutex held by caller)
605 622
606Called after the task has been attached to the cgroup, to allow any 623Called after the task has been attached to the cgroup, to allow any
607post-attachment activity that requires memory allocations or blocking. 624post-attachment activity that requires memory allocations or blocking.
608If threadgroup is true, the subsystem should take care of all threads 625
609in the specified thread's threadgroup. Currently does not support any 626void attach_task(struct cgroup *cgrp, struct task_struct *tsk);
627(cgroup_mutex held by caller)
628
629As attach, but for operations that must be run once per task to be attached,
630like can_attach_task. Called before attach. Currently does not support any
610subsystem that might need the old_cgrp for every thread in the group. 631subsystem that might need the old_cgrp for every thread in the group.
611 632
612void fork(struct cgroup_subsy *ss, struct task_struct *task) 633void fork(struct cgroup_subsy *ss, struct task_struct *task)
@@ -630,7 +651,7 @@ always handled well.
630void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp) 651void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp)
631(cgroup_mutex held by caller) 652(cgroup_mutex held by caller)
632 653
633Called at the end of cgroup_clone() to do any parameter 654Called during cgroup_create() to do any parameter
634initialization which might be required before a task could attach. For 655initialization which might be required before a task could attach. For
635example in cpusets, no task may attach before 'cpus' and 'mems' are set 656example in cpusets, no task may attach before 'cpus' and 'mems' are set
636up. 657up.
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index edb7ae19e868..2c6be0377f55 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -74,3 +74,57 @@ Example:
74 interrupt-parent = <&mpic>; 74 interrupt-parent = <&mpic>;
75 phy-handle = <&phy0> 75 phy-handle = <&phy0>
76 }; 76 };
77
78* Gianfar PTP clock nodes
79
80General Properties:
81
82 - compatible Should be "fsl,etsec-ptp"
83 - reg Offset and length of the register set for the device
84 - interrupts There should be at least two interrupts. Some devices
85 have as many as four PTP related interrupts.
86
87Clock Properties:
88
89 - fsl,tclk-period Timer reference clock period in nanoseconds.
90 - fsl,tmr-prsc Prescaler, divides the output clock.
91 - fsl,tmr-add Frequency compensation value.
92 - fsl,tmr-fiper1 Fixed interval period pulse generator.
93 - fsl,tmr-fiper2 Fixed interval period pulse generator.
94 - fsl,max-adj Maximum frequency adjustment in parts per billion.
95
96 These properties set the operational parameters for the PTP
97 clock. You must choose these carefully for the clock to work right.
98 Here is how to figure good values:
99
100 TimerOsc = system clock MHz
101 tclk_period = desired clock period nanoseconds
102 NominalFreq = 1000 / tclk_period MHz
103 FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0)
104 tmr_add = ceil(2^32 / FreqDivRatio)
105 OutputClock = NominalFreq / tmr_prsc MHz
106 PulseWidth = 1 / OutputClock microseconds
107 FiperFreq1 = desired frequency in Hz
108 FiperDiv1 = 1000000 * OutputClock / FiperFreq1
109 tmr_fiper1 = tmr_prsc * tclk_period * FiperDiv1 - tclk_period
110 max_adj = 1000000000 * (FreqDivRatio - 1.0) - 1
111
112 The calculation for tmr_fiper2 is the same as for tmr_fiper1. The
113 driver expects that tmr_fiper1 will be correctly set to produce a 1
114 Pulse Per Second (PPS) signal, since this will be offered to the PPS
115 subsystem to synchronize the Linux clock.
116
117Example:
118
119 ptp_clock@24E00 {
120 compatible = "fsl,etsec-ptp";
121 reg = <0x24E00 0xB0>;
122 interrupts = <12 0x8 13 0x8>;
123 interrupt-parent = < &ipic >;
124 fsl,tclk-period = <10>;
125 fsl,tmr-prsc = <100>;
126 fsl,tmr-add = <0x999999A4>;
127 fsl,tmr-fiper1 = <0x3B9AC9F6>;
128 fsl,tmr-fiper2 = <0x00018696>;
129 fsl,max-adj = <659999998>;
130 };
diff --git a/Documentation/filesystems/9p.txt b/Documentation/filesystems/9p.txt
index b22abba78fed..13de64c7f0ab 100644
--- a/Documentation/filesystems/9p.txt
+++ b/Documentation/filesystems/9p.txt
@@ -25,6 +25,8 @@ Other applications are described in the following papers:
25 http://xcpu.org/papers/cellfs-talk.pdf 25 http://xcpu.org/papers/cellfs-talk.pdf
26 * PROSE I/O: Using 9p to enable Application Partitions 26 * PROSE I/O: Using 9p to enable Application Partitions
27 http://plan9.escet.urjc.es/iwp9/cready/PROSE_iwp9_2006.pdf 27 http://plan9.escet.urjc.es/iwp9/cready/PROSE_iwp9_2006.pdf
28 * VirtFS: A Virtualization Aware File System pass-through
29 http://goo.gl/3WPDg
28 30
29USAGE 31USAGE
30===== 32=====
@@ -130,31 +132,20 @@ OPTIONS
130RESOURCES 132RESOURCES
131========= 133=========
132 134
133Our current recommendation is to use Inferno (http://www.vitanuova.com/nferno/index.html) 135Protocol specifications are maintained on github:
134as the 9p server. You can start a 9p server under Inferno by issuing the 136http://ericvh.github.com/9p-rfc/
135following command:
136 ; styxlisten -A tcp!*!564 export '#U*'
137 137
138The -A specifies an unauthenticated export. The 564 is the port # (you may 1389p client and server implementations are listed on
139have to choose a higher port number if running as a normal user). The '#U*' 139http://9p.cat-v.org/implementations
140specifies exporting the root of the Linux name space. You may specify a
141subset of the namespace by extending the path: '#U*'/tmp would just export
142/tmp. For more information, see the Inferno manual pages covering styxlisten
143and export.
144 140
145A Linux version of the 9p server is now maintained under the npfs project 141A 9p2000.L server is being developed by LLNL and can be found
146on sourceforge (http://sourceforge.net/projects/npfs). The currently 142at http://code.google.com/p/diod/
147maintained version is the single-threaded version of the server (named spfs)
148available from the same SVN repository.
149 143
150There are user and developer mailing lists available through the v9fs project 144There are user and developer mailing lists available through the v9fs project
151on sourceforge (http://sourceforge.net/projects/v9fs). 145on sourceforge (http://sourceforge.net/projects/v9fs).
152 146
153A stand-alone version of the module (which should build for any 2.6 kernel) 147News and other information is maintained on a Wiki.
154is available via (http://github.com/ericvh/9p-sac/tree/master) 148(http://sf.net/apps/mediawiki/v9fs/index.php).
155
156News and other information is maintained on SWiK (http://swik.net/v9fs)
157and the Wiki (http://sf.net/apps/mediawiki/v9fs/index.php).
158 149
159Bug reports may be issued through the kernel.org bugzilla 150Bug reports may be issued through the kernel.org bugzilla
160(http://bugzilla.kernel.org) 151(http://bugzilla.kernel.org)
diff --git a/Documentation/filesystems/configfs/configfs_example_explicit.c b/Documentation/filesystems/configfs/configfs_example_explicit.c
index fd53869f5633..1420233dfa55 100644
--- a/Documentation/filesystems/configfs/configfs_example_explicit.c
+++ b/Documentation/filesystems/configfs/configfs_example_explicit.c
@@ -464,9 +464,8 @@ static int __init configfs_example_init(void)
464 return 0; 464 return 0;
465 465
466out_unregister: 466out_unregister:
467 for (; i >= 0; i--) { 467 for (i--; i >= 0; i--)
468 configfs_unregister_subsystem(example_subsys[i]); 468 configfs_unregister_subsystem(example_subsys[i]);
469 }
470 469
471 return ret; 470 return ret;
472} 471}
@@ -475,9 +474,8 @@ static void __exit configfs_example_exit(void)
475{ 474{
476 int i; 475 int i;
477 476
478 for (i = 0; example_subsys[i]; i++) { 477 for (i = 0; example_subsys[i]; i++)
479 configfs_unregister_subsystem(example_subsys[i]); 478 configfs_unregister_subsystem(example_subsys[i]);
480 }
481} 479}
482 480
483module_init(configfs_example_init); 481module_init(configfs_example_init);
diff --git a/Documentation/filesystems/configfs/configfs_example_macros.c b/Documentation/filesystems/configfs/configfs_example_macros.c
index d8e30a0378aa..327dfbc640a9 100644
--- a/Documentation/filesystems/configfs/configfs_example_macros.c
+++ b/Documentation/filesystems/configfs/configfs_example_macros.c
@@ -427,9 +427,8 @@ static int __init configfs_example_init(void)
427 return 0; 427 return 0;
428 428
429out_unregister: 429out_unregister:
430 for (; i >= 0; i--) { 430 for (i--; i >= 0; i--)
431 configfs_unregister_subsystem(example_subsys[i]); 431 configfs_unregister_subsystem(example_subsys[i]);
432 }
433 432
434 return ret; 433 return ret;
435} 434}
@@ -438,9 +437,8 @@ static void __exit configfs_example_exit(void)
438{ 437{
439 int i; 438 int i;
440 439
441 for (i = 0; example_subsys[i]; i++) { 440 for (i = 0; example_subsys[i]; i++)
442 configfs_unregister_subsystem(example_subsys[i]); 441 configfs_unregister_subsystem(example_subsys[i]);
443 }
444} 442}
445 443
446module_init(configfs_example_init); 444module_init(configfs_example_init);
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index c79ec58fd7f6..3ae9bc94352a 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -226,10 +226,6 @@ acl Enables POSIX Access Control Lists support.
226noacl This option disables POSIX Access Control List 226noacl This option disables POSIX Access Control List
227 support. 227 support.
228 228
229reservation
230
231noreservation
232
233bsddf (*) Make 'df' act like BSD. 229bsddf (*) Make 'df' act like BSD.
234minixdf Make 'df' act like Minix. 230minixdf Make 'df' act like Minix.
235 231
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 60740e8ecb37..f48178024067 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -574,6 +574,12 @@ The contents of each smp_affinity file is the same by default:
574 > cat /proc/irq/0/smp_affinity 574 > cat /proc/irq/0/smp_affinity
575 ffffffff 575 ffffffff
576 576
577There is an alternate interface, smp_affinity_list which allows specifying
578a cpu range instead of a bitmask:
579
580 > cat /proc/irq/0/smp_affinity_list
581 1024-1031
582
577The default_smp_affinity mask applies to all non-active IRQs, which are the 583The default_smp_affinity mask applies to all non-active IRQs, which are the
578IRQs which have not yet been allocated/activated, and hence which lack a 584IRQs which have not yet been allocated/activated, and hence which lack a
579/proc/irq/[0-9]* directory. 585/proc/irq/[0-9]* directory.
@@ -583,12 +589,13 @@ reports itself as being attached. This hardware locality information does not
583include information about any possible driver locality preference. 589include information about any possible driver locality preference.
584 590
585prof_cpu_mask specifies which CPUs are to be profiled by the system wide 591prof_cpu_mask specifies which CPUs are to be profiled by the system wide
586profiler. Default value is ffffffff (all cpus). 592profiler. Default value is ffffffff (all cpus if there are only 32 of them).
587 593
588The way IRQs are routed is handled by the IO-APIC, and it's Round Robin 594The way IRQs are routed is handled by the IO-APIC, and it's Round Robin
589between all the CPUs which are allowed to handle it. As usual the kernel has 595between all the CPUs which are allowed to handle it. As usual the kernel has
590more info than you and does a better job than you, so the defaults are the 596more info than you and does a better job than you, so the defaults are the
591best choice for almost everyone. 597best choice for almost everyone. [Note this applies only to those IO-APIC's
598that support "Round Robin" interrupt distribution.]
592 599
593There are three more important subdirectories in /proc: net, scsi, and sys. 600There are three more important subdirectories in /proc: net, scsi, and sys.
594The general rule is that the contents, or even the existence of these 601The general rule is that the contents, or even the existence of these
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index 7bff3e4f35df..3fc0c31a6f5d 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -39,6 +39,12 @@ When mounting an XFS filesystem, the following options are accepted.
39 drive level write caching to be enabled, for devices that 39 drive level write caching to be enabled, for devices that
40 support write barriers. 40 support write barriers.
41 41
42 discard
43 Issue command to let the block device reclaim space freed by the
44 filesystem. This is useful for SSD devices, thinly provisioned
45 LUNs and virtual machine images, but may have a performance
46 impact. This option is incompatible with the nodelaylog option.
47
42 dmapi 48 dmapi
43 Enable the DMAPI (Data Management API) event callouts. 49 Enable the DMAPI (Data Management API) event callouts.
44 Use with the "mtpt" option. 50 Use with the "mtpt" option.
diff --git a/Documentation/hwmon/emc6w201 b/Documentation/hwmon/emc6w201
new file mode 100644
index 000000000000..32f355aaf56b
--- /dev/null
+++ b/Documentation/hwmon/emc6w201
@@ -0,0 +1,42 @@
1Kernel driver emc6w201
2======================
3
4Supported chips:
5 * SMSC EMC6W201
6 Prefix: 'emc6w201'
7 Addresses scanned: I2C 0x2c, 0x2d, 0x2e
8 Datasheet: Not public
9
10Author: Jean Delvare <khali@linux-fr.org>
11
12
13Description
14-----------
15
16From the datasheet:
17
18"The EMC6W201 is an environmental monitoring device with automatic fan
19control capability and enhanced system acoustics for noise suppression.
20This ACPI compliant device provides hardware monitoring for up to six
21voltages (including its own VCC) and five external thermal sensors,
22measures the speed of up to five fans, and controls the speed of
23multiple DC fans using three Pulse Width Modulator (PWM) outputs. Note
24that it is possible to control more than three fans by connecting two
25fans to one PWM output. The EMC6W201 will be available in a 36-pin
26QFN package."
27
28The device is functionally close to the EMC6D100 series, but is
29register-incompatible.
30
31The driver currently only supports the monitoring of the voltages,
32temperatures and fan speeds. Limits can be changed. Alarms are not
33supported, and neither is fan speed control.
34
35
36Known Systems With EMC6W201
37---------------------------
38
39The EMC6W201 is a rare device, only found on a few systems, made in
402005 and 2006. Known systems with this device:
41* Dell Precision 670 workstation
42* Gigabyte 2CEWH mainboard
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg
index df02245d1419..84d2623810f3 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg
@@ -6,6 +6,10 @@ Supported chips:
6 Prefix: 'f71808e' 6 Prefix: 'f71808e'
7 Addresses scanned: none, address read from Super I/O config space 7 Addresses scanned: none, address read from Super I/O config space
8 Datasheet: Not public 8 Datasheet: Not public
9 * Fintek F71808A
10 Prefix: 'f71808a'
11 Addresses scanned: none, address read from Super I/O config space
12 Datasheet: Not public
9 * Fintek F71858FG 13 * Fintek F71858FG
10 Prefix: 'f71858fg' 14 Prefix: 'f71858fg'
11 Addresses scanned: none, address read from Super I/O config space 15 Addresses scanned: none, address read from Super I/O config space
diff --git a/Documentation/hwmon/fam15h_power b/Documentation/hwmon/fam15h_power
new file mode 100644
index 000000000000..a92918e0bd69
--- /dev/null
+++ b/Documentation/hwmon/fam15h_power
@@ -0,0 +1,37 @@
1Kernel driver fam15h_power
2==========================
3
4Supported chips:
5* AMD Family 15h Processors
6
7 Prefix: 'fam15h_power'
8 Addresses scanned: PCI space
9 Datasheets:
10 BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
11 (not yet published)
12
13Author: Andreas Herrmann <andreas.herrmann3@amd.com>
14
15Description
16-----------
17
18This driver permits reading of registers providing power information
19of AMD Family 15h processors.
20
21For AMD Family 15h processors the following power values can be
22calculated using different processor northbridge function registers:
23
24* BasePwrWatts: Specifies in watts the maximum amount of power
25 consumed by the processor for NB and logic external to the core.
26* ProcessorPwrWatts: Specifies in watts the maximum amount of power
27 the processor can support.
28* CurrPwrWatts: Specifies in watts the current amount of power being
29 consumed by the processor.
30
31This driver provides ProcessorPwrWatts and CurrPwrWatts:
32* power1_crit (ProcessorPwrWatts)
33* power1_input (CurrPwrWatts)
34
35On multi-node processors the calculated value is for the entire
36package and not for a single node. Thus the driver creates sysfs
37attributes only for internal node0 of a multi-node processor.
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index d2b56a4fd1f5..0393c89277c0 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -11,6 +11,7 @@ Supported chips:
11 Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) 11 Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
12* AMD Family 12h processors: "Llano" 12* AMD Family 12h processors: "Llano"
13* AMD Family 14h processors: "Brazos" (C/E/G-Series) 13* AMD Family 14h processors: "Brazos" (C/E/G-Series)
14* AMD Family 15h processors: "Bulldozer"
14 15
15 Prefix: 'k10temp' 16 Prefix: 'k10temp'
16 Addresses scanned: PCI space 17 Addresses scanned: PCI space
@@ -40,7 +41,7 @@ Description
40----------- 41-----------
41 42
42This driver permits reading of the internal temperature sensor of AMD 43This driver permits reading of the internal temperature sensor of AMD
43Family 10h/11h/12h/14h processors. 44Family 10h/11h/12h/14h/15h processors.
44 45
45All these processors have a sensor, but on those for Socket F or AM2+, 46All these processors have a sensor, but on those for Socket F or AM2+,
46the sensor may return inconsistent values (erratum 319). The driver 47the sensor may return inconsistent values (erratum 319). The driver
diff --git a/Documentation/hwmon/max6650 b/Documentation/hwmon/max6650
index c565650fcfc6..58d9644a2bde 100644
--- a/Documentation/hwmon/max6650
+++ b/Documentation/hwmon/max6650
@@ -2,9 +2,13 @@ Kernel driver max6650
2===================== 2=====================
3 3
4Supported chips: 4Supported chips:
5 * Maxim 6650 / 6651 5 * Maxim MAX6650
6 Prefix: 'max6650' 6 Prefix: 'max6650'
7 Addresses scanned: I2C 0x1b, 0x1f, 0x48, 0x4b 7 Addresses scanned: none
8 Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
9 * Maxim MAX6651
10 Prefix: 'max6651'
11 Addresses scanned: none
8 Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf 12 Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
9 13
10Authors: 14Authors:
@@ -15,10 +19,10 @@ Authors:
15Description 19Description
16----------- 20-----------
17 21
18This driver implements support for the Maxim 6650/6651 22This driver implements support for the Maxim MAX6650 and MAX6651.
19 23
20The 2 devices are very similar, but the Maxim 6550 has a reduced feature 24The 2 devices are very similar, but the MAX6550 has a reduced feature
21set, e.g. only one fan-input, instead of 4 for the 6651. 25set, e.g. only one fan-input, instead of 4 for the MAX6651.
22 26
23The driver is not able to distinguish between the 2 devices. 27The driver is not able to distinguish between the 2 devices.
24 28
@@ -36,6 +40,13 @@ fan1_div rw sets the speed range the inputs can handle. Legal
36 values are 1, 2, 4, and 8. Use lower values for 40 values are 1, 2, 4, and 8. Use lower values for
37 faster fans. 41 faster fans.
38 42
43Usage notes
44-----------
45
46This driver does not auto-detect devices. You will have to instantiate the
47devices explicitly. Please see Documentation/i2c/instantiating-devices for
48details.
49
39Module parameters 50Module parameters
40----------------- 51-----------------
41 52
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 2d1ad12e2b3e..3a46e360496d 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -304,6 +304,7 @@ Code Seq#(hex) Include File Comments
3040xB0 all RATIO devices in development: 3040xB0 all RATIO devices in development:
305 <mailto:vgo@ratio.de> 305 <mailto:vgo@ratio.de>
3060xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca> 3060xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca>
3070xB3 00 linux/mmc/ioctl.h
3070xC0 00-0F linux/usb/iowarrior.h 3080xC0 00-0F linux/usb/iowarrior.h
3080xCB 00-1F CBM serial IEC bus in development: 3090xCB 00-1F CBM serial IEC bus in development:
309 <mailto:michael.klein@puffin.lb.shuttle.de> 310 <mailto:michael.klein@puffin.lb.shuttle.de>
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index b507d61fd41c..44e2649fbb29 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -113,6 +113,13 @@ applicable everywhere (see syntax).
113 That will limit the usefulness but on the other hand avoid 113 That will limit the usefulness but on the other hand avoid
114 the illegal configurations all over. 114 the illegal configurations all over.
115 115
116- limiting menu display: "visible if" <expr>
117 This attribute is only applicable to menu blocks, if the condition is
118 false, the menu block is not displayed to the user (the symbols
119 contained there can still be selected by other symbols, though). It is
120 similar to a conditional "prompt" attribude for individual menu
121 entries. Default value of "visible" is true.
122
116- numerical ranges: "range" <symbol> <symbol> ["if" <expr>] 123- numerical ranges: "range" <symbol> <symbol> ["if" <expr>]
117 This allows to limit the range of possible input values for int 124 This allows to limit the range of possible input values for int
118 and hex symbols. The user can only input a value which is larger than 125 and hex symbols. The user can only input a value which is larger than
@@ -303,7 +310,8 @@ menu:
303 "endmenu" 310 "endmenu"
304 311
305This defines a menu block, see "Menu structure" above for more 312This defines a menu block, see "Menu structure" above for more
306information. The only possible options are dependencies. 313information. The only possible options are dependencies and "visible"
314attributes.
307 315
308if: 316if:
309 317
@@ -381,3 +389,25 @@ config FOO
381 389
382limits FOO to module (=m) or disabled (=n). 390limits FOO to module (=m) or disabled (=n).
383 391
392Kconfig symbol existence
393~~~~~~~~~~~~~~~~~~~~~~~~
394The following two methods produce the same kconfig symbol dependencies
395but differ greatly in kconfig symbol existence (production) in the
396generated config file.
397
398case 1:
399
400config FOO
401 tristate "about foo"
402 depends on BAR
403
404vs. case 2:
405
406if BAR
407config FOO
408 tristate "about foo"
409endif
410
411In case 1, the symbol FOO will always exist in the config file (given
412no other dependencies). In case 2, the symbol FOO will only exist in
413the config file if BAR is enabled.
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index cca46b1a0f6c..c313d71324b4 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -48,11 +48,6 @@ KCONFIG_OVERWRITECONFIG
48If you set KCONFIG_OVERWRITECONFIG in the environment, Kconfig will not 48If you set KCONFIG_OVERWRITECONFIG in the environment, Kconfig will not
49break symlinks when .config is a symlink to somewhere else. 49break symlinks when .config is a symlink to somewhere else.
50 50
51KCONFIG_NOTIMESTAMP
52--------------------------------------------------
53If this environment variable exists and is non-null, the timestamp line
54in generated .config files is omitted.
55
56______________________________________________________________________ 51______________________________________________________________________
57Environment variables for '{allyes/allmod/allno/rand}config' 52Environment variables for '{allyes/allmod/allno/rand}config'
58 53
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7c6624e7a5cb..5438a2d7907f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1777,9 +1777,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1777 1777
1778 nosoftlockup [KNL] Disable the soft-lockup detector. 1778 nosoftlockup [KNL] Disable the soft-lockup detector.
1779 1779
1780 noswapaccount [KNL] Disable accounting of swap in memory resource
1781 controller. (See Documentation/cgroups/memory.txt)
1782
1783 nosync [HW,M68K] Disables sync negotiation for all devices. 1780 nosync [HW,M68K] Disables sync negotiation for all devices.
1784 1781
1785 notsc [BUGS=X86-32] Disable Time Stamp Counter 1782 notsc [BUGS=X86-32] Disable Time Stamp Counter
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
index 65f4c795015d..9c0a80d17a23 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/lockstat.txt
@@ -136,7 +136,7 @@ View the top contending locks:
136 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24 136 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24
137 &inode->i_mutex: 161 286 18446744073709 62882.54 1244614.55 3653 20598 18446744073709 62318.60 1693822.74 137 &inode->i_mutex: 161 286 18446744073709 62882.54 1244614.55 3653 20598 18446744073709 62318.60 1693822.74
138 &zone->lru_lock: 94 94 0.53 7.33 92.10 4366 32690 0.29 59.81 16350.06 138 &zone->lru_lock: 94 94 0.53 7.33 92.10 4366 32690 0.29 59.81 16350.06
139 &inode->i_data.i_mmap_lock: 79 79 0.40 3.77 53.03 11779 87755 0.28 116.93 29898.44 139 &inode->i_data.i_mmap_mutex: 79 79 0.40 3.77 53.03 11779 87755 0.28 116.93 29898.44
140 &q->__queue_lock: 48 50 0.52 31.62 86.31 774 13131 0.17 113.08 12277.52 140 &q->__queue_lock: 48 50 0.52 31.62 86.31 774 13131 0.17 113.08 12277.52
141 &rq->rq_lock_key: 43 47 0.74 68.50 170.63 3706 33929 0.22 107.99 17460.62 141 &rq->rq_lock_key: 43 47 0.74 68.50 170.63 3706 33929 0.22 107.99 17460.62
142 &rq->rq_lock_key#2: 39 46 0.75 6.68 49.03 2979 32292 0.17 125.17 17137.63 142 &rq->rq_lock_key#2: 39 46 0.75 6.68 49.03 2979 32292 0.17 125.17 17137.63
diff --git a/Documentation/mmc/00-INDEX b/Documentation/mmc/00-INDEX
index fca586f5b853..93dd7a714075 100644
--- a/Documentation/mmc/00-INDEX
+++ b/Documentation/mmc/00-INDEX
@@ -2,3 +2,5 @@
2 - this file 2 - this file
3mmc-dev-attrs.txt 3mmc-dev-attrs.txt
4 - info on SD and MMC device attributes 4 - info on SD and MMC device attributes
5mmc-dev-parts.txt
6 - info on SD and MMC device partitions
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index ff2bd685bced..8898a95b41e5 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -1,3 +1,13 @@
1SD and MMC Block Device Attributes
2==================================
3
4These attributes are defined for the block devices associated with the
5SD or MMC device.
6
7The following attributes are read/write.
8
9 force_ro Enforce read-only access even if write protect switch is off.
10
1SD and MMC Device Attributes 11SD and MMC Device Attributes
2============================ 12============================
3 13
diff --git a/Documentation/mmc/mmc-dev-parts.txt b/Documentation/mmc/mmc-dev-parts.txt
new file mode 100644
index 000000000000..2db28b8e662f
--- /dev/null
+++ b/Documentation/mmc/mmc-dev-parts.txt
@@ -0,0 +1,27 @@
1SD and MMC Device Partitions
2============================
3
4Device partitions are additional logical block devices present on the
5SD/MMC device.
6
7As of this writing, MMC boot partitions as supported and exposed as
8/dev/mmcblkXboot0 and /dev/mmcblkXboot1, where X is the index of the
9parent /dev/mmcblkX.
10
11MMC Boot Partitions
12===================
13
14Read and write access is provided to the two MMC boot partitions. Due to
15the sensitive nature of the boot partition contents, which often store
16a bootloader or bootloader configuration tables crucial to booting the
17platform, write access is disabled by default to reduce the chance of
18accidental bricking.
19
20To enable write access to /dev/mmcblkXbootY, disable the forced read-only
21access with:
22
23echo 0 > /sys/block/mmcblkXbootY/force_ro
24
25To re-enable read-only access:
26
27echo 1 > /sys/block/mmcblkXbootY/force_ro
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 1f45bd887d65..675612ff41ae 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -770,8 +770,17 @@ resend_igmp
770 a failover event. One membership report is issued immediately after 770 a failover event. One membership report is issued immediately after
771 the failover, subsequent packets are sent in each 200ms interval. 771 the failover, subsequent packets are sent in each 200ms interval.
772 772
773 The valid range is 0 - 255; the default value is 1. This option 773 The valid range is 0 - 255; the default value is 1. A value of 0
774 was added for bonding version 3.7.0. 774 prevents the IGMP membership report from being issued in response
775 to the failover event.
776
777 This option is useful for bonding modes balance-rr (0), active-backup
778 (1), balance-tlb (5) and balance-alb (6), in which a failover can
779 switch the IGMP traffic from one slave to another. Therefore a fresh
780 IGMP report must be issued to cause the switch to forward the incoming
781 IGMP traffic over the newly selected slave.
782
783 This option was added for bonding version 3.7.0.
775 784
7763. Configuring Bonding Devices 7853. Configuring Bonding Devices
777============================== 786==============================
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index bdec39b9bd75..b42419b52e44 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -53,11 +53,11 @@ static struct regulator_init_data regulator1_data = {
53 53
54Regulator-1 supplies power to Regulator-2. This relationship must be registered 54Regulator-1 supplies power to Regulator-2. This relationship must be registered
55with the core so that Regulator-1 is also enabled when Consumer A enables its 55with the core so that Regulator-1 is also enabled when Consumer A enables its
56supply (Regulator-2). The supply regulator is set by the supply_regulator_dev 56supply (Regulator-2). The supply regulator is set by the supply_regulator
57field below:- 57field below:-
58 58
59static struct regulator_init_data regulator2_data = { 59static struct regulator_init_data regulator2_data = {
60 .supply_regulator_dev = &platform_regulator1_device.dev, 60 .supply_regulator = "regulator_name",
61 .constraints = { 61 .constraints = {
62 .min_uV = 1800000, 62 .min_uV = 1800000,
63 .max_uV = 2000000, 63 .max_uV = 2000000,
diff --git a/Documentation/ptp/ptp.txt b/Documentation/ptp/ptp.txt
new file mode 100644
index 000000000000..ae8fef86b832
--- /dev/null
+++ b/Documentation/ptp/ptp.txt
@@ -0,0 +1,89 @@
1
2* PTP hardware clock infrastructure for Linux
3
4 This patch set introduces support for IEEE 1588 PTP clocks in
5 Linux. Together with the SO_TIMESTAMPING socket options, this
6 presents a standardized method for developing PTP user space
7 programs, synchronizing Linux with external clocks, and using the
8 ancillary features of PTP hardware clocks.
9
10 A new class driver exports a kernel interface for specific clock
11 drivers and a user space interface. The infrastructure supports a
12 complete set of PTP hardware clock functionality.
13
14 + Basic clock operations
15 - Set time
16 - Get time
17 - Shift the clock by a given offset atomically
18 - Adjust clock frequency
19
20 + Ancillary clock features
21 - One short or periodic alarms, with signal delivery to user program
22 - Time stamp external events
23 - Period output signals configurable from user space
24 - Synchronization of the Linux system time via the PPS subsystem
25
26** PTP hardware clock kernel API
27
28 A PTP clock driver registers itself with the class driver. The
29 class driver handles all of the dealings with user space. The
30 author of a clock driver need only implement the details of
31 programming the clock hardware. The clock driver notifies the class
32 driver of asynchronous events (alarms and external time stamps) via
33 a simple message passing interface.
34
35 The class driver supports multiple PTP clock drivers. In normal use
36 cases, only one PTP clock is needed. However, for testing and
37 development, it can be useful to have more than one clock in a
38 single system, in order to allow performance comparisons.
39
40** PTP hardware clock user space API
41
42 The class driver also creates a character device for each
43 registered clock. User space can use an open file descriptor from
44 the character device as a POSIX clock id and may call
45 clock_gettime, clock_settime, and clock_adjtime. These calls
46 implement the basic clock operations.
47
48 User space programs may control the clock using standardized
49 ioctls. A program may query, enable, configure, and disable the
50 ancillary clock features. User space can receive time stamped
51 events via blocking read() and poll(). One shot and periodic
52 signals may be configured via the POSIX timer_settime() system
53 call.
54
55** Writing clock drivers
56
57 Clock drivers include include/linux/ptp_clock_kernel.h and register
58 themselves by presenting a 'struct ptp_clock_info' to the
59 registration method. Clock drivers must implement all of the
60 functions in the interface. If a clock does not offer a particular
61 ancillary feature, then the driver should just return -EOPNOTSUPP
62 from those functions.
63
64 Drivers must ensure that all of the methods in interface are
65 reentrant. Since most hardware implementations treat the time value
66 as a 64 bit integer accessed as two 32 bit registers, drivers
67 should use spin_lock_irqsave/spin_unlock_irqrestore to protect
68 against concurrent access. This locking cannot be accomplished in
69 class driver, since the lock may also be needed by the clock
70 driver's interrupt service routine.
71
72** Supported hardware
73
74 + Freescale eTSEC gianfar
75 - 2 Time stamp external triggers, programmable polarity (opt. interrupt)
76 - 2 Alarm registers (optional interrupt)
77 - 3 Periodic signals (optional interrupt)
78
79 + National DP83640
80 - 6 GPIOs programmable as inputs or outputs
81 - 6 GPIOs with dedicated functions (LED/JTAG/clock) can also be
82 used as general inputs or outputs
83 - GPIO inputs can time stamp external triggers
84 - GPIO outputs can produce periodic signals
85 - 1 interrupt pin
86
87 + Intel IXP465
88 - Auxiliary Slave/Master Mode Snapshot (optional interrupt)
89 - Target Time (optional interrupt)
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
new file mode 100644
index 000000000000..f59ded066108
--- /dev/null
+++ b/Documentation/ptp/testptp.c
@@ -0,0 +1,381 @@
1/*
2 * PTP 1588 clock support - User space test program
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <errno.h>
21#include <fcntl.h>
22#include <math.h>
23#include <signal.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/ioctl.h>
28#include <sys/mman.h>
29#include <sys/stat.h>
30#include <sys/time.h>
31#include <sys/timex.h>
32#include <sys/types.h>
33#include <time.h>
34#include <unistd.h>
35
36#include <linux/ptp_clock.h>
37
38#define DEVICE "/dev/ptp0"
39
40#ifndef ADJ_SETOFFSET
41#define ADJ_SETOFFSET 0x0100
42#endif
43
44#ifndef CLOCK_INVALID
45#define CLOCK_INVALID -1
46#endif
47
48/* When glibc offers the syscall, this will go away. */
49#include <sys/syscall.h>
50static int clock_adjtime(clockid_t id, struct timex *tx)
51{
52 return syscall(__NR_clock_adjtime, id, tx);
53}
54
55static clockid_t get_clockid(int fd)
56{
57#define CLOCKFD 3
58#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
59
60 return FD_TO_CLOCKID(fd);
61}
62
63static void handle_alarm(int s)
64{
65 printf("received signal %d\n", s);
66}
67
68static int install_handler(int signum, void (*handler)(int))
69{
70 struct sigaction action;
71 sigset_t mask;
72
73 /* Unblock the signal. */
74 sigemptyset(&mask);
75 sigaddset(&mask, signum);
76 sigprocmask(SIG_UNBLOCK, &mask, NULL);
77
78 /* Install the signal handler. */
79 action.sa_handler = handler;
80 action.sa_flags = 0;
81 sigemptyset(&action.sa_mask);
82 sigaction(signum, &action, NULL);
83
84 return 0;
85}
86
87static long ppb_to_scaled_ppm(int ppb)
88{
89 /*
90 * The 'freq' field in the 'struct timex' is in parts per
91 * million, but with a 16 bit binary fractional field.
92 * Instead of calculating either one of
93 *
94 * scaled_ppm = (ppb / 1000) << 16 [1]
95 * scaled_ppm = (ppb << 16) / 1000 [2]
96 *
97 * we simply use double precision math, in order to avoid the
98 * truncation in [1] and the possible overflow in [2].
99 */
100 return (long) (ppb * 65.536);
101}
102
103static void usage(char *progname)
104{
105 fprintf(stderr,
106 "usage: %s [options]\n"
107 " -a val request a one-shot alarm after 'val' seconds\n"
108 " -A val request a periodic alarm every 'val' seconds\n"
109 " -c query the ptp clock's capabilities\n"
110 " -d name device to open\n"
111 " -e val read 'val' external time stamp events\n"
112 " -f val adjust the ptp clock frequency by 'val' ppb\n"
113 " -g get the ptp clock time\n"
114 " -h prints this message\n"
115 " -p val enable output with a period of 'val' nanoseconds\n"
116 " -P val enable or disable (val=1|0) the system clock PPS\n"
117 " -s set the ptp clock time from the system time\n"
118 " -S set the system time from the ptp clock time\n"
119 " -t val shift the ptp clock time by 'val' seconds\n",
120 progname);
121}
122
123int main(int argc, char *argv[])
124{
125 struct ptp_clock_caps caps;
126 struct ptp_extts_event event;
127 struct ptp_extts_request extts_request;
128 struct ptp_perout_request perout_request;
129 struct timespec ts;
130 struct timex tx;
131
132 static timer_t timerid;
133 struct itimerspec timeout;
134 struct sigevent sigevent;
135
136 char *progname;
137 int c, cnt, fd;
138
139 char *device = DEVICE;
140 clockid_t clkid;
141 int adjfreq = 0x7fffffff;
142 int adjtime = 0;
143 int capabilities = 0;
144 int extts = 0;
145 int gettime = 0;
146 int oneshot = 0;
147 int periodic = 0;
148 int perout = -1;
149 int pps = -1;
150 int settime = 0;
151
152 progname = strrchr(argv[0], '/');
153 progname = progname ? 1+progname : argv[0];
154 while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghp:P:sSt:v"))) {
155 switch (c) {
156 case 'a':
157 oneshot = atoi(optarg);
158 break;
159 case 'A':
160 periodic = atoi(optarg);
161 break;
162 case 'c':
163 capabilities = 1;
164 break;
165 case 'd':
166 device = optarg;
167 break;
168 case 'e':
169 extts = atoi(optarg);
170 break;
171 case 'f':
172 adjfreq = atoi(optarg);
173 break;
174 case 'g':
175 gettime = 1;
176 break;
177 case 'p':
178 perout = atoi(optarg);
179 break;
180 case 'P':
181 pps = atoi(optarg);
182 break;
183 case 's':
184 settime = 1;
185 break;
186 case 'S':
187 settime = 2;
188 break;
189 case 't':
190 adjtime = atoi(optarg);
191 break;
192 case 'h':
193 usage(progname);
194 return 0;
195 case '?':
196 default:
197 usage(progname);
198 return -1;
199 }
200 }
201
202 fd = open(device, O_RDWR);
203 if (fd < 0) {
204 fprintf(stderr, "opening %s: %s\n", device, strerror(errno));
205 return -1;
206 }
207
208 clkid = get_clockid(fd);
209 if (CLOCK_INVALID == clkid) {
210 fprintf(stderr, "failed to read clock id\n");
211 return -1;
212 }
213
214 if (capabilities) {
215 if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
216 perror("PTP_CLOCK_GETCAPS");
217 } else {
218 printf("capabilities:\n"
219 " %d maximum frequency adjustment (ppb)\n"
220 " %d programmable alarms\n"
221 " %d external time stamp channels\n"
222 " %d programmable periodic signals\n"
223 " %d pulse per second\n",
224 caps.max_adj,
225 caps.n_alarm,
226 caps.n_ext_ts,
227 caps.n_per_out,
228 caps.pps);
229 }
230 }
231
232 if (0x7fffffff != adjfreq) {
233 memset(&tx, 0, sizeof(tx));
234 tx.modes = ADJ_FREQUENCY;
235 tx.freq = ppb_to_scaled_ppm(adjfreq);
236 if (clock_adjtime(clkid, &tx)) {
237 perror("clock_adjtime");
238 } else {
239 puts("frequency adjustment okay");
240 }
241 }
242
243 if (adjtime) {
244 memset(&tx, 0, sizeof(tx));
245 tx.modes = ADJ_SETOFFSET;
246 tx.time.tv_sec = adjtime;
247 tx.time.tv_usec = 0;
248 if (clock_adjtime(clkid, &tx) < 0) {
249 perror("clock_adjtime");
250 } else {
251 puts("time shift okay");
252 }
253 }
254
255 if (gettime) {
256 if (clock_gettime(clkid, &ts)) {
257 perror("clock_gettime");
258 } else {
259 printf("clock time: %ld.%09ld or %s",
260 ts.tv_sec, ts.tv_nsec, ctime(&ts.tv_sec));
261 }
262 }
263
264 if (settime == 1) {
265 clock_gettime(CLOCK_REALTIME, &ts);
266 if (clock_settime(clkid, &ts)) {
267 perror("clock_settime");
268 } else {
269 puts("set time okay");
270 }
271 }
272
273 if (settime == 2) {
274 clock_gettime(clkid, &ts);
275 if (clock_settime(CLOCK_REALTIME, &ts)) {
276 perror("clock_settime");
277 } else {
278 puts("set time okay");
279 }
280 }
281
282 if (extts) {
283 memset(&extts_request, 0, sizeof(extts_request));
284 extts_request.index = 0;
285 extts_request.flags = PTP_ENABLE_FEATURE;
286 if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
287 perror("PTP_EXTTS_REQUEST");
288 extts = 0;
289 } else {
290 puts("external time stamp request okay");
291 }
292 for (; extts; extts--) {
293 cnt = read(fd, &event, sizeof(event));
294 if (cnt != sizeof(event)) {
295 perror("read");
296 break;
297 }
298 printf("event index %u at %lld.%09u\n", event.index,
299 event.t.sec, event.t.nsec);
300 fflush(stdout);
301 }
302 /* Disable the feature again. */
303 extts_request.flags = 0;
304 if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
305 perror("PTP_EXTTS_REQUEST");
306 }
307 }
308
309 if (oneshot) {
310 install_handler(SIGALRM, handle_alarm);
311 /* Create a timer. */
312 sigevent.sigev_notify = SIGEV_SIGNAL;
313 sigevent.sigev_signo = SIGALRM;
314 if (timer_create(clkid, &sigevent, &timerid)) {
315 perror("timer_create");
316 return -1;
317 }
318 /* Start the timer. */
319 memset(&timeout, 0, sizeof(timeout));
320 timeout.it_value.tv_sec = oneshot;
321 if (timer_settime(timerid, 0, &timeout, NULL)) {
322 perror("timer_settime");
323 return -1;
324 }
325 pause();
326 timer_delete(timerid);
327 }
328
329 if (periodic) {
330 install_handler(SIGALRM, handle_alarm);
331 /* Create a timer. */
332 sigevent.sigev_notify = SIGEV_SIGNAL;
333 sigevent.sigev_signo = SIGALRM;
334 if (timer_create(clkid, &sigevent, &timerid)) {
335 perror("timer_create");
336 return -1;
337 }
338 /* Start the timer. */
339 memset(&timeout, 0, sizeof(timeout));
340 timeout.it_interval.tv_sec = periodic;
341 timeout.it_value.tv_sec = periodic;
342 if (timer_settime(timerid, 0, &timeout, NULL)) {
343 perror("timer_settime");
344 return -1;
345 }
346 while (1) {
347 pause();
348 }
349 timer_delete(timerid);
350 }
351
352 if (perout >= 0) {
353 if (clock_gettime(clkid, &ts)) {
354 perror("clock_gettime");
355 return -1;
356 }
357 memset(&perout_request, 0, sizeof(perout_request));
358 perout_request.index = 0;
359 perout_request.start.sec = ts.tv_sec + 2;
360 perout_request.start.nsec = 0;
361 perout_request.period.sec = 0;
362 perout_request.period.nsec = perout;
363 if (ioctl(fd, PTP_PEROUT_REQUEST, &perout_request)) {
364 perror("PTP_PEROUT_REQUEST");
365 } else {
366 puts("periodic output request okay");
367 }
368 }
369
370 if (pps != -1) {
371 int enable = pps ? 1 : 0;
372 if (ioctl(fd, PTP_ENABLE_PPS, enable)) {
373 perror("PTP_ENABLE_PPS");
374 } else {
375 puts("pps for system time request okay");
376 }
377 }
378
379 close(fd);
380 return 0;
381}
diff --git a/Documentation/ptp/testptp.mk b/Documentation/ptp/testptp.mk
new file mode 100644
index 000000000000..4ef2d9755421
--- /dev/null
+++ b/Documentation/ptp/testptp.mk
@@ -0,0 +1,33 @@
1# PTP 1588 clock support - User space test program
2#
3# Copyright (C) 2010 OMICRON electronics GmbH
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License as published by
7# the Free Software Foundation; either version 2 of the License, or
8# (at your option) any later version.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19CC = $(CROSS_COMPILE)gcc
20INC = -I$(KBUILD_OUTPUT)/usr/include
21CFLAGS = -Wall $(INC)
22LDLIBS = -lrt
23PROGS = testptp
24
25all: $(PROGS)
26
27testptp: testptp.o
28
29clean:
30 rm -f testptp.o
31
32distclean: clean
33 rm -f $(PROGS)
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 36f007514db3..5e7cb39ad195 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -161,7 +161,8 @@ core_pattern is used to specify a core dumpfile pattern name.
161 %s signal number 161 %s signal number
162 %t UNIX time of dump 162 %t UNIX time of dump
163 %h hostname 163 %h hostname
164 %e executable filename 164 %e executable filename (may be shortened)
165 %E executable path
165 %<OTHER> both are dropped 166 %<OTHER> both are dropped
166. If the first character of the pattern is a '|', the kernel will treat 167. If the first character of the pattern is a '|', the kernel will treat
167 the rest of the pattern as a command to run. The core dump will be 168 the rest of the pattern as a command to run. The core dump will be
diff --git a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
index 9b7e1904db1c..5d0fc8bfcdb9 100644
--- a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
+++ b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
@@ -1182,6 +1182,16 @@
1182 forge.net/> and explains these in detail, as well as 1182 forge.net/> and explains these in detail, as well as
1183 some other issues. 1183 some other issues.
1184 1184
1185 There is also a related point-to-point only "ucast" transport.
1186 This is useful when your network does not support multicast, and
1187 all network connections are simple point to point links.
1188
1189 The full set of command line options for this transport are
1190
1191
1192 ethn=ucast,ethernet address,remote address,listen port,remote port
1193
1194
1185 1195
1186 1196
1187 66..66.. TTUUNN//TTAAPP wwiitthh tthhee uummll__nneett hheellppeerr 1197 66..66.. TTUUNN//TTAAPP wwiitthh tthhee uummll__nneett hheellppeerr
diff --git a/Documentation/vm/cleancache.txt b/Documentation/vm/cleancache.txt
new file mode 100644
index 000000000000..36c367c73084
--- /dev/null
+++ b/Documentation/vm/cleancache.txt
@@ -0,0 +1,278 @@
1MOTIVATION
2
3Cleancache is a new optional feature provided by the VFS layer that
4potentially dramatically increases page cache effectiveness for
5many workloads in many environments at a negligible cost.
6
7Cleancache can be thought of as a page-granularity victim cache for clean
8pages that the kernel's pageframe replacement algorithm (PFRA) would like
9to keep around, but can't since there isn't enough memory. So when the
10PFRA "evicts" a page, it first attempts to use cleancache code to
11put the data contained in that page into "transcendent memory", memory
12that is not directly accessible or addressable by the kernel and is
13of unknown and possibly time-varying size.
14
15Later, when a cleancache-enabled filesystem wishes to access a page
16in a file on disk, it first checks cleancache to see if it already
17contains it; if it does, the page of data is copied into the kernel
18and a disk access is avoided.
19
20Transcendent memory "drivers" for cleancache are currently implemented
21in Xen (using hypervisor memory) and zcache (using in-kernel compressed
22memory) and other implementations are in development.
23
24FAQs are included below.
25
26IMPLEMENTATION OVERVIEW
27
28A cleancache "backend" that provides transcendent memory registers itself
29to the kernel's cleancache "frontend" by calling cleancache_register_ops,
30passing a pointer to a cleancache_ops structure with funcs set appropriately.
31Note that cleancache_register_ops returns the previous settings so that
32chaining can be performed if desired. The functions provided must conform to
33certain semantics as follows:
34
35Most important, cleancache is "ephemeral". Pages which are copied into
36cleancache have an indefinite lifetime which is completely unknowable
37by the kernel and so may or may not still be in cleancache at any later time.
38Thus, as its name implies, cleancache is not suitable for dirty pages.
39Cleancache has complete discretion over what pages to preserve and what
40pages to discard and when.
41
42Mounting a cleancache-enabled filesystem should call "init_fs" to obtain a
43pool id which, if positive, must be saved in the filesystem's superblock;
44a negative return value indicates failure. A "put_page" will copy a
45(presumably about-to-be-evicted) page into cleancache and associate it with
46the pool id, a file key, and a page index into the file. (The combination
47of a pool id, a file key, and an index is sometimes called a "handle".)
48A "get_page" will copy the page, if found, from cleancache into kernel memory.
49A "flush_page" will ensure the page no longer is present in cleancache;
50a "flush_inode" will flush all pages associated with the specified file;
51and, when a filesystem is unmounted, a "flush_fs" will flush all pages in
52all files specified by the given pool id and also surrender the pool id.
53
54An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache
55to treat the pool as shared using a 128-bit UUID as a key. On systems
56that may run multiple kernels (such as hard partitioned or virtualized
57systems) that may share a clustered filesystem, and where cleancache
58may be shared among those kernels, calls to init_shared_fs that specify the
59same UUID will receive the same pool id, thus allowing the pages to
60be shared. Note that any security requirements must be imposed outside
61of the kernel (e.g. by "tools" that control cleancache). Or a
62cleancache implementation can simply disable shared_init by always
63returning a negative value.
64
65If a get_page is successful on a non-shared pool, the page is flushed (thus
66making cleancache an "exclusive" cache). On a shared pool, the page
67is NOT flushed on a successful get_page so that it remains accessible to
68other sharers. The kernel is responsible for ensuring coherency between
69cleancache (shared or not), the page cache, and the filesystem, using
70cleancache flush operations as required.
71
72Note that cleancache must enforce put-put-get coherency and get-get
73coherency. For the former, if two puts are made to the same handle but
74with different data, say AAA by the first put and BBB by the second, a
75subsequent get can never return the stale data (AAA). For get-get coherency,
76if a get for a given handle fails, subsequent gets for that handle will
77never succeed unless preceded by a successful put with that handle.
78
79Last, cleancache provides no SMP serialization guarantees; if two
80different Linux threads are simultaneously putting and flushing a page
81with the same handle, the results are indeterminate. Callers must
82lock the page to ensure serial behavior.
83
84CLEANCACHE PERFORMANCE METRICS
85
86Cleancache monitoring is done by sysfs files in the
87/sys/kernel/mm/cleancache directory. The effectiveness of cleancache
88can be measured (across all filesystems) with:
89
90succ_gets - number of gets that were successful
91failed_gets - number of gets that failed
92puts - number of puts attempted (all "succeed")
93flushes - number of flushes attempted
94
95A backend implementatation may provide additional metrics.
96
97FAQ
98
991) Where's the value? (Andrew Morton)
100
101Cleancache provides a significant performance benefit to many workloads
102in many environments with negligible overhead by improving the
103effectiveness of the pagecache. Clean pagecache pages are
104saved in transcendent memory (RAM that is otherwise not directly
105addressable to the kernel); fetching those pages later avoids "refaults"
106and thus disk reads.
107
108Cleancache (and its sister code "frontswap") provide interfaces for
109this transcendent memory (aka "tmem"), which conceptually lies between
110fast kernel-directly-addressable RAM and slower DMA/asynchronous devices.
111Disallowing direct kernel or userland reads/writes to tmem
112is ideal when data is transformed to a different form and size (such
113as with compression) or secretly moved (as might be useful for write-
114balancing for some RAM-like devices). Evicted page-cache pages (and
115swap pages) are a great use for this kind of slower-than-RAM-but-much-
116faster-than-disk transcendent memory, and the cleancache (and frontswap)
117"page-object-oriented" specification provides a nice way to read and
118write -- and indirectly "name" -- the pages.
119
120In the virtual case, the whole point of virtualization is to statistically
121multiplex physical resources across the varying demands of multiple
122virtual machines. This is really hard to do with RAM and efforts to
123do it well with no kernel change have essentially failed (except in some
124well-publicized special-case workloads). Cleancache -- and frontswap --
125with a fairly small impact on the kernel, provide a huge amount
126of flexibility for more dynamic, flexible RAM multiplexing.
127Specifically, the Xen Transcendent Memory backend allows otherwise
128"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
129virtual machines, but the pages can be compressed and deduplicated to
130optimize RAM utilization. And when guest OS's are induced to surrender
131underutilized RAM (e.g. with "self-ballooning"), page cache pages
132are the first to go, and cleancache allows those pages to be
133saved and reclaimed if overall host system memory conditions allow.
134
135And the identical interface used for cleancache can be used in
136physical systems as well. The zcache driver acts as a memory-hungry
137device that stores pages of data in a compressed state. And
138the proposed "RAMster" driver shares RAM across multiple physical
139systems.
140
1412) Why does cleancache have its sticky fingers so deep inside the
142 filesystems and VFS? (Andrew Morton and Christoph Hellwig)
143
144The core hooks for cleancache in VFS are in most cases a single line
145and the minimum set are placed precisely where needed to maintain
146coherency (via cleancache_flush operations) between cleancache,
147the page cache, and disk. All hooks compile into nothingness if
148cleancache is config'ed off and turn into a function-pointer-
149compare-to-NULL if config'ed on but no backend claims the ops
150functions, or to a compare-struct-element-to-negative if a
151backend claims the ops functions but a filesystem doesn't enable
152cleancache.
153
154Some filesystems are built entirely on top of VFS and the hooks
155in VFS are sufficient, so don't require an "init_fs" hook; the
156initial implementation of cleancache didn't provide this hook.
157But for some filesystems (such as btrfs), the VFS hooks are
158incomplete and one or more hooks in fs-specific code are required.
159And for some other filesystems, such as tmpfs, cleancache may
160be counterproductive. So it seemed prudent to require a filesystem
161to "opt in" to use cleancache, which requires adding a hook in
162each filesystem. Not all filesystems are supported by cleancache
163only because they haven't been tested. The existing set should
164be sufficient to validate the concept, the opt-in approach means
165that untested filesystems are not affected, and the hooks in the
166existing filesystems should make it very easy to add more
167filesystems in the future.
168
169The total impact of the hooks to existing fs and mm files is only
170about 40 lines added (not counting comments and blank lines).
171
1723) Why not make cleancache asynchronous and batched so it can
173 more easily interface with real devices with DMA instead
174 of copying each individual page? (Minchan Kim)
175
176The one-page-at-a-time copy semantics simplifies the implementation
177on both the frontend and backend and also allows the backend to
178do fancy things on-the-fly like page compression and
179page deduplication. And since the data is "gone" (copied into/out
180of the pageframe) before the cleancache get/put call returns,
181a great deal of race conditions and potential coherency issues
182are avoided. While the interface seems odd for a "real device"
183or for real kernel-addressable RAM, it makes perfect sense for
184transcendent memory.
185
1864) Why is non-shared cleancache "exclusive"? And where is the
187 page "flushed" after a "get"? (Minchan Kim)
188
189The main reason is to free up space in transcendent memory and
190to avoid unnecessary cleancache_flush calls. If you want inclusive,
191the page can be "put" immediately following the "get". If
192put-after-get for inclusive becomes common, the interface could
193be easily extended to add a "get_no_flush" call.
194
195The flush is done by the cleancache backend implementation.
196
1975) What's the performance impact?
198
199Performance analysis has been presented at OLS'09 and LCA'10.
200Briefly, performance gains can be significant on most workloads,
201especially when memory pressure is high (e.g. when RAM is
202overcommitted in a virtual workload); and because the hooks are
203invoked primarily in place of or in addition to a disk read/write,
204overhead is negligible even in worst case workloads. Basically
205cleancache replaces I/O with memory-copy-CPU-overhead; on older
206single-core systems with slow memory-copy speeds, cleancache
207has little value, but in newer multicore machines, especially
208consolidated/virtualized machines, it has great value.
209
2106) How do I add cleancache support for filesystem X? (Boaz Harrash)
211
212Filesystems that are well-behaved and conform to certain
213restrictions can utilize cleancache simply by making a call to
214cleancache_init_fs at mount time. Unusual, misbehaving, or
215poorly layered filesystems must either add additional hooks
216and/or undergo extensive additional testing... or should just
217not enable the optional cleancache.
218
219Some points for a filesystem to consider:
220
221- The FS should be block-device-based (e.g. a ram-based FS such
222 as tmpfs should not enable cleancache)
223- To ensure coherency/correctness, the FS must ensure that all
224 file removal or truncation operations either go through VFS or
225 add hooks to do the equivalent cleancache "flush" operations
226- To ensure coherency/correctness, either inode numbers must
227 be unique across the lifetime of the on-disk file OR the
228 FS must provide an "encode_fh" function.
229- The FS must call the VFS superblock alloc and deactivate routines
230 or add hooks to do the equivalent cleancache calls done there.
231- To maximize performance, all pages fetched from the FS should
232 go through the do_mpag_readpage routine or the FS should add
233 hooks to do the equivalent (cf. btrfs)
234- Currently, the FS blocksize must be the same as PAGESIZE. This
235 is not an architectural restriction, but no backends currently
236 support anything different.
237- A clustered FS should invoke the "shared_init_fs" cleancache
238 hook to get best performance for some backends.
239
2407) Why not use the KVA of the inode as the key? (Christoph Hellwig)
241
242If cleancache would use the inode virtual address instead of
243inode/filehandle, the pool id could be eliminated. But, this
244won't work because cleancache retains pagecache data pages
245persistently even when the inode has been pruned from the
246inode unused list, and only flushes the data page if the file
247gets removed/truncated. So if cleancache used the inode kva,
248there would be potential coherency issues if/when the inode
249kva is reused for a different file. Alternately, if cleancache
250flushed the pages when the inode kva was freed, much of the value
251of cleancache would be lost because the cache of pages in cleanache
252is potentially much larger than the kernel pagecache and is most
253useful if the pages survive inode cache removal.
254
2558) Why is a global variable required?
256
257The cleancache_enabled flag is checked in all of the frequently-used
258cleancache hooks. The alternative is a function call to check a static
259variable. Since cleancache is enabled dynamically at runtime, systems
260that don't enable cleancache would suffer thousands (possibly
261tens-of-thousands) of unnecessary function calls per second. So the
262global variable allows cleancache to be enabled by default at compile
263time, but have insignificant performance impact when cleancache remains
264disabled at runtime.
265
2669) Does cleanache work with KVM?
267
268The memory model of KVM is sufficiently different that a cleancache
269backend may have less value for KVM. This remains to be tested,
270especially in an overcommitted system.
271
27210) Does cleancache work in userspace? It sounds useful for
273 memory hungry caches like web browsers. (Jamie Lokier)
274
275No plans yet, though we agree it sounds useful, at least for
276apps that bypass the page cache (e.g. O_DIRECT).
277
278Last updated: Dan Magenheimer, April 13 2011
diff --git a/Documentation/vm/locking b/Documentation/vm/locking
index 25fadb448760..f61228bd6395 100644
--- a/Documentation/vm/locking
+++ b/Documentation/vm/locking
@@ -66,7 +66,7 @@ in some cases it is not really needed. Eg, vm_start is modified by
66expand_stack(), it is hard to come up with a destructive scenario without 66expand_stack(), it is hard to come up with a destructive scenario without
67having the vmlist protection in this case. 67having the vmlist protection in this case.
68 68
69The page_table_lock nests with the inode i_mmap_lock and the kmem cache 69The page_table_lock nests with the inode i_mmap_mutex and the kmem cache
70c_spinlock spinlocks. This is okay, since the kmem code asks for pages after 70c_spinlock spinlocks. This is okay, since the kmem code asks for pages after
71dropping c_spinlock. The page_table_lock also nests with pagecache_lock and 71dropping c_spinlock. The page_table_lock also nests with pagecache_lock and
72pagemap_lru_lock spinlocks, and no code asks for memory with these locks 72pagemap_lru_lock spinlocks, and no code asks for memory with these locks
diff --git a/MAINTAINERS b/MAINTAINERS
index 98c324b07a16..572b5b20ba48 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -287,35 +287,35 @@ F: sound/pci/ad1889.*
287 287
288AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER 288AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
289M: Michael Hennerich <michael.hennerich@analog.com> 289M: Michael Hennerich <michael.hennerich@analog.com>
290L: device-driver-devel@blackfin.uclinux.org 290L: device-drivers-devel@blackfin.uclinux.org
291W: http://wiki.analog.com/AD5254 291W: http://wiki.analog.com/AD5254
292S: Supported 292S: Supported
293F: drivers/misc/ad525x_dpot.c 293F: drivers/misc/ad525x_dpot.c
294 294
295AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821) 295AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
296M: Michael Hennerich <michael.hennerich@analog.com> 296M: Michael Hennerich <michael.hennerich@analog.com>
297L: device-driver-devel@blackfin.uclinux.org 297L: device-drivers-devel@blackfin.uclinux.org
298W: http://wiki.analog.com/AD5398 298W: http://wiki.analog.com/AD5398
299S: Supported 299S: Supported
300F: drivers/regulator/ad5398.c 300F: drivers/regulator/ad5398.c
301 301
302AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A) 302AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
303M: Michael Hennerich <michael.hennerich@analog.com> 303M: Michael Hennerich <michael.hennerich@analog.com>
304L: device-driver-devel@blackfin.uclinux.org 304L: device-drivers-devel@blackfin.uclinux.org
305W: http://wiki.analog.com/AD7142 305W: http://wiki.analog.com/AD7142
306S: Supported 306S: Supported
307F: drivers/input/misc/ad714x.c 307F: drivers/input/misc/ad714x.c
308 308
309AD7877 TOUCHSCREEN DRIVER 309AD7877 TOUCHSCREEN DRIVER
310M: Michael Hennerich <michael.hennerich@analog.com> 310M: Michael Hennerich <michael.hennerich@analog.com>
311L: device-driver-devel@blackfin.uclinux.org 311L: device-drivers-devel@blackfin.uclinux.org
312W: http://wiki.analog.com/AD7877 312W: http://wiki.analog.com/AD7877
313S: Supported 313S: Supported
314F: drivers/input/touchscreen/ad7877.c 314F: drivers/input/touchscreen/ad7877.c
315 315
316AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889) 316AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
317M: Michael Hennerich <michael.hennerich@analog.com> 317M: Michael Hennerich <michael.hennerich@analog.com>
318L: device-driver-devel@blackfin.uclinux.org 318L: device-drivers-devel@blackfin.uclinux.org
319W: http://wiki.analog.com/AD7879 319W: http://wiki.analog.com/AD7879
320S: Supported 320S: Supported
321F: drivers/input/touchscreen/ad7879.c 321F: drivers/input/touchscreen/ad7879.c
@@ -341,7 +341,7 @@ F: drivers/net/wireless/adm8211.*
341 341
342ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501) 342ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
343M: Michael Hennerich <michael.hennerich@analog.com> 343M: Michael Hennerich <michael.hennerich@analog.com>
344L: device-driver-devel@blackfin.uclinux.org 344L: device-drivers-devel@blackfin.uclinux.org
345W: http://wiki.analog.com/ADP5520 345W: http://wiki.analog.com/ADP5520
346S: Supported 346S: Supported
347F: drivers/mfd/adp5520.c 347F: drivers/mfd/adp5520.c
@@ -352,7 +352,7 @@ F: drivers/input/keyboard/adp5520-keys.c
352 352
353ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587) 353ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
354M: Michael Hennerich <michael.hennerich@analog.com> 354M: Michael Hennerich <michael.hennerich@analog.com>
355L: device-driver-devel@blackfin.uclinux.org 355L: device-drivers-devel@blackfin.uclinux.org
356W: http://wiki.analog.com/ADP5588 356W: http://wiki.analog.com/ADP5588
357S: Supported 357S: Supported
358F: drivers/input/keyboard/adp5588-keys.c 358F: drivers/input/keyboard/adp5588-keys.c
@@ -360,7 +360,7 @@ F: drivers/gpio/adp5588-gpio.c
360 360
361ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863) 361ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
362M: Michael Hennerich <michael.hennerich@analog.com> 362M: Michael Hennerich <michael.hennerich@analog.com>
363L: device-driver-devel@blackfin.uclinux.org 363L: device-drivers-devel@blackfin.uclinux.org
364W: http://wiki.analog.com/ADP8860 364W: http://wiki.analog.com/ADP8860
365S: Supported 365S: Supported
366F: drivers/video/backlight/adp8860_bl.c 366F: drivers/video/backlight/adp8860_bl.c
@@ -387,7 +387,7 @@ F: drivers/hwmon/adt7475.c
387 387
388ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346) 388ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
389M: Michael Hennerich <michael.hennerich@analog.com> 389M: Michael Hennerich <michael.hennerich@analog.com>
390L: device-driver-devel@blackfin.uclinux.org 390L: device-drivers-devel@blackfin.uclinux.org
391W: http://wiki.analog.com/ADXL345 391W: http://wiki.analog.com/ADXL345
392S: Supported 392S: Supported
393F: drivers/input/misc/adxl34x.c 393F: drivers/input/misc/adxl34x.c
@@ -483,6 +483,13 @@ F: drivers/tty/serial/altera_jtaguart.c
483F: include/linux/altera_uart.h 483F: include/linux/altera_uart.h
484F: include/linux/altera_jtaguart.h 484F: include/linux/altera_jtaguart.h
485 485
486AMD FAM15H PROCESSOR POWER MONITORING DRIVER
487M: Andreas Herrmann <andreas.herrmann3@amd.com>
488L: lm-sensors@lm-sensors.org
489S: Maintained
490F: Documentation/hwmon/fam15h_power
491F: drivers/hwmon/fam15h_power.c
492
486AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER 493AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
487M: Thomas Dahlmann <dahlmann.thomas@arcor.de> 494M: Thomas Dahlmann <dahlmann.thomas@arcor.de>
488L: linux-geode@lists.infradead.org (moderated for non-subscribers) 495L: linux-geode@lists.infradead.org (moderated for non-subscribers)
@@ -526,7 +533,7 @@ S: Maintained
526F: drivers/infiniband/hw/amso1100/ 533F: drivers/infiniband/hw/amso1100/
527 534
528ANALOG DEVICES INC ASOC CODEC DRIVERS 535ANALOG DEVICES INC ASOC CODEC DRIVERS
529L: device-driver-devel@blackfin.uclinux.org 536L: device-drivers-devel@blackfin.uclinux.org
530L: alsa-devel@alsa-project.org (moderated for non-subscribers) 537L: alsa-devel@alsa-project.org (moderated for non-subscribers)
531W: http://wiki.analog.com/ 538W: http://wiki.analog.com/
532S: Supported 539S: Supported
@@ -924,6 +931,8 @@ F: drivers/mmc/host/msm_sdcc.h
924F: drivers/tty/serial/msm_serial.h 931F: drivers/tty/serial/msm_serial.h
925F: drivers/tty/serial/msm_serial.c 932F: drivers/tty/serial/msm_serial.c
926F: drivers/platform/msm/ 933F: drivers/platform/msm/
934F: drivers/*/pm8???-*
935F: include/linux/mfd/pm8xxx/
927T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git 936T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
928S: Maintained 937S: Maintained
929 938
@@ -2034,9 +2043,8 @@ F: net/ax25/ax25_timer.c
2034F: net/ax25/sysctl_net_ax25.c 2043F: net/ax25/sysctl_net_ax25.c
2035 2044
2036DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER 2045DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
2037M: Tobias Ringstrom <tori@unhappy.mine.nu>
2038L: netdev@vger.kernel.org 2046L: netdev@vger.kernel.org
2039S: Maintained 2047S: Orphan
2040F: Documentation/networking/dmfe.txt 2048F: Documentation/networking/dmfe.txt
2041F: drivers/net/tulip/dmfe.c 2049F: drivers/net/tulip/dmfe.c
2042 2050
@@ -2576,6 +2584,13 @@ S: Maintained
2576F: drivers/hwmon/f75375s.c 2584F: drivers/hwmon/f75375s.c
2577F: include/linux/f75375s.h 2585F: include/linux/f75375s.h
2578 2586
2587FIREWIRE AUDIO DRIVERS
2588M: Clemens Ladisch <clemens@ladisch.de>
2589L: alsa-devel@alsa-project.org (moderated for non-subscribers)
2590T: git git://git.alsa-project.org/alsa-kernel.git
2591S: Maintained
2592F: sound/firewire/
2593
2579FIREWIRE SUBSYSTEM 2594FIREWIRE SUBSYSTEM
2580M: Stefan Richter <stefanr@s5r6.in-berlin.de> 2595M: Stefan Richter <stefanr@s5r6.in-berlin.de>
2581L: linux1394-devel@lists.sourceforge.net 2596L: linux1394-devel@lists.sourceforge.net
@@ -3566,9 +3581,16 @@ M: Andrew Morton <akpm@linux-foundation.org>
3566M: Jan Kara <jack@suse.cz> 3581M: Jan Kara <jack@suse.cz>
3567L: linux-ext4@vger.kernel.org 3582L: linux-ext4@vger.kernel.org
3568S: Maintained 3583S: Maintained
3569F: fs/jbd*/ 3584F: fs/jbd/
3570F: include/linux/ext*jbd*.h 3585F: include/linux/ext3_jbd.h
3571F: include/linux/jbd*.h 3586F: include/linux/jbd.h
3587
3588JOURNALLING LAYER FOR BLOCK DEVICES (JBD2)
3589M: "Theodore Ts'o" <tytso@mit.edu>
3590L: linux-ext4@vger.kernel.org
3591S: Maintained
3592F: fs/jbd2/
3593F: include/linux/jbd2.h
3572 3594
3573JSM Neo PCI based serial card 3595JSM Neo PCI based serial card
3574M: Breno Leitao <leitao@linux.vnet.ibm.com> 3596M: Breno Leitao <leitao@linux.vnet.ibm.com>
@@ -3591,10 +3613,9 @@ F: Documentation/hwmon/k8temp
3591F: drivers/hwmon/k8temp.c 3613F: drivers/hwmon/k8temp.c
3592 3614
3593KCONFIG 3615KCONFIG
3594M: Roman Zippel <zippel@linux-m68k.org> 3616M: Michal Marek <mmarek@suse.cz>
3595L: linux-kbuild@vger.kernel.org 3617L: linux-kbuild@vger.kernel.org
3596Q: http://patchwork.kernel.org/project/linux-kbuild/list/ 3618S: Odd Fixes
3597S: Maintained
3598F: Documentation/kbuild/kconfig-language.txt 3619F: Documentation/kbuild/kconfig-language.txt
3599F: scripts/kconfig/ 3620F: scripts/kconfig/
3600 3621
@@ -3898,7 +3919,6 @@ F: drivers/*/*/*pasemi*
3898LINUX SECURITY MODULE (LSM) FRAMEWORK 3919LINUX SECURITY MODULE (LSM) FRAMEWORK
3899M: Chris Wright <chrisw@sous-sol.org> 3920M: Chris Wright <chrisw@sous-sol.org>
3900L: linux-security-module@vger.kernel.org 3921L: linux-security-module@vger.kernel.org
3901T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
3902S: Supported 3922S: Supported
3903 3923
3904LIS3LV02D ACCELEROMETER DRIVER 3924LIS3LV02D ACCELEROMETER DRIVER
@@ -4134,6 +4154,7 @@ M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
4134L: linux-mm@kvack.org 4154L: linux-mm@kvack.org
4135S: Maintained 4155S: Maintained
4136F: mm/memcontrol.c 4156F: mm/memcontrol.c
4157F: mm/page_cgroup.c
4137 4158
4138MEMORY TECHNOLOGY DEVICES (MTD) 4159MEMORY TECHNOLOGY DEVICES (MTD)
4139M: David Woodhouse <dwmw2@infradead.org> 4160M: David Woodhouse <dwmw2@infradead.org>
@@ -5987,7 +6008,7 @@ F: Documentation/filesystems/spufs.txt
5987F: arch/powerpc/platforms/cell/spufs/ 6008F: arch/powerpc/platforms/cell/spufs/
5988 6009
5989SQUASHFS FILE SYSTEM 6010SQUASHFS FILE SYSTEM
5990M: Phillip Lougher <phillip@lougher.demon.co.uk> 6011M: Phillip Lougher <phillip@squashfs.org.uk>
5991L: squashfs-devel@lists.sourceforge.net (subscribers-only) 6012L: squashfs-devel@lists.sourceforge.net (subscribers-only)
5992W: http://squashfs.org.uk 6013W: http://squashfs.org.uk
5993S: Maintained 6014S: Maintained
@@ -6796,6 +6817,13 @@ L: lm-sensors@lm-sensors.org
6796S: Maintained 6817S: Maintained
6797F: drivers/hwmon/vt8231.c 6818F: drivers/hwmon/vt8231.c
6798 6819
6820VUB300 USB to SDIO/SD/MMC bridge chip
6821M: Tony Olech <tony.olech@elandigitalsystems.com>
6822L: linux-mmc@vger.kernel.org
6823L: linux-usb@vger.kernel.org
6824S: Supported
6825F: drivers/mmc/host/vub300.c
6826
6799W1 DALLAS'S 1-WIRE BUS 6827W1 DALLAS'S 1-WIRE BUS
6800M: Evgeniy Polyakov <johnpol@2ka.mipt.ru> 6828M: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
6801S: Maintained 6829S: Maintained
diff --git a/Makefile b/Makefile
index 6b73d1eed1ea..529d93fa2430 100644
--- a/Makefile
+++ b/Makefile
@@ -220,6 +220,14 @@ ifeq ($(ARCH),sh64)
220 SRCARCH := sh 220 SRCARCH := sh
221endif 221endif
222 222
223# Additional ARCH settings for tile
224ifeq ($(ARCH),tilepro)
225 SRCARCH := tile
226endif
227ifeq ($(ARCH),tilegx)
228 SRCARCH := tile
229endif
230
223# Where to locate arch specific headers 231# Where to locate arch specific headers
224hdr-arch := $(SRCARCH) 232hdr-arch := $(SRCARCH)
225 233
@@ -1009,7 +1017,8 @@ include/generated/utsrelease.h: include/config/kernel.release FORCE
1009 1017
1010PHONY += headerdep 1018PHONY += headerdep
1011headerdep: 1019headerdep:
1012 $(Q)find include/ -name '*.h' | xargs --max-args 1 scripts/headerdep.pl 1020 $(Q)find $(srctree)/include/ -name '*.h' | xargs --max-args 1 \
1021 $(srctree)/scripts/headerdep.pl -I$(srctree)/include
1013 1022
1014# --------------------------------------------------------------------------- 1023# ---------------------------------------------------------------------------
1015 1024
@@ -1417,13 +1426,15 @@ tags TAGS cscope gtags: FORCE
1417# Scripts to check various things for consistency 1426# Scripts to check various things for consistency
1418# --------------------------------------------------------------------------- 1427# ---------------------------------------------------------------------------
1419 1428
1429PHONY += includecheck versioncheck coccicheck namespacecheck export_report
1430
1420includecheck: 1431includecheck:
1421 find * $(RCS_FIND_IGNORE) \ 1432 find $(srctree)/* $(RCS_FIND_IGNORE) \
1422 -name '*.[hcS]' -type f -print | sort \ 1433 -name '*.[hcS]' -type f -print | sort \
1423 | xargs $(PERL) -w $(srctree)/scripts/checkincludes.pl 1434 | xargs $(PERL) -w $(srctree)/scripts/checkincludes.pl
1424 1435
1425versioncheck: 1436versioncheck:
1426 find * $(RCS_FIND_IGNORE) \ 1437 find $(srctree)/* $(RCS_FIND_IGNORE) \
1427 -name '*.[hcS]' -type f -print | sort \ 1438 -name '*.[hcS]' -type f -print | sort \
1428 | xargs $(PERL) -w $(srctree)/scripts/checkversion.pl 1439 | xargs $(PERL) -w $(srctree)/scripts/checkversion.pl
1429 1440
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d24bacaa61e..26b0e2397a57 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -175,4 +175,7 @@ config HAVE_ARCH_JUMP_LABEL
175config HAVE_ARCH_MUTEX_CPU_RELAX 175config HAVE_ARCH_MUTEX_CPU_RELAX
176 bool 176 bool
177 177
178config HAVE_RCU_TABLE_FREE
179 bool
180
178source "kernel/gcov/Kconfig" 181source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 9808998cc073..60219bf94198 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -12,6 +12,7 @@ config ALPHA
12 select GENERIC_IRQ_PROBE 12 select GENERIC_IRQ_PROBE
13 select AUTO_IRQ_AFFINITY if SMP 13 select AUTO_IRQ_AFFINITY if SMP
14 select GENERIC_IRQ_SHOW 14 select GENERIC_IRQ_SHOW
15 select ARCH_WANT_OPTIONAL_GPIOLIB
15 help 16 help
16 The Alpha is a 64-bit general-purpose processor designed and 17 The Alpha is a 64-bit general-purpose processor designed and
17 marketed by the Digital Equipment Corporation of blessed memory, 18 marketed by the Digital Equipment Corporation of blessed memory,
@@ -40,10 +41,6 @@ config ARCH_HAS_ILOG2_U64
40 bool 41 bool
41 default n 42 default n
42 43
43config GENERIC_FIND_NEXT_BIT
44 bool
45 default y
46
47config GENERIC_CALIBRATE_DELAY 44config GENERIC_CALIBRATE_DELAY
48 bool 45 bool
49 default y 46 default y
@@ -51,6 +48,9 @@ config GENERIC_CALIBRATE_DELAY
51config GENERIC_CMOS_UPDATE 48config GENERIC_CMOS_UPDATE
52 def_bool y 49 def_bool y
53 50
51config GENERIC_GPIO
52 def_bool y
53
54config ZONE_DMA 54config ZONE_DMA
55 bool 55 bool
56 default y 56 default y
diff --git a/arch/alpha/include/asm/gpio.h b/arch/alpha/include/asm/gpio.h
new file mode 100644
index 000000000000..7dc6a6343c06
--- /dev/null
+++ b/arch/alpha/include/asm/gpio.h
@@ -0,0 +1,55 @@
1/*
2 * Generic GPIO API implementation for Alpha.
3 *
4 * A stright copy of that for PowerPC which was:
5 *
6 * Copyright (c) 2007-2008 MontaVista Software, Inc.
7 *
8 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef _ASM_ALPHA_GPIO_H
17#define _ASM_ALPHA_GPIO_H
18
19#include <linux/errno.h>
20#include <asm-generic/gpio.h>
21
22#ifdef CONFIG_GPIOLIB
23
24/*
25 * We don't (yet) implement inlined/rapid versions for on-chip gpios.
26 * Just call gpiolib.
27 */
28static inline int gpio_get_value(unsigned int gpio)
29{
30 return __gpio_get_value(gpio);
31}
32
33static inline void gpio_set_value(unsigned int gpio, int value)
34{
35 __gpio_set_value(gpio, value);
36}
37
38static inline int gpio_cansleep(unsigned int gpio)
39{
40 return __gpio_cansleep(gpio);
41}
42
43static inline int gpio_to_irq(unsigned int gpio)
44{
45 return __gpio_to_irq(gpio);
46}
47
48static inline int irq_to_gpio(unsigned int irq)
49{
50 return -EINVAL;
51}
52
53#endif /* CONFIG_GPIOLIB */
54
55#endif /* _ASM_ALPHA_GPIO_H */
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 3f390e8cc0b3..c46e714aa3e0 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -39,8 +39,6 @@ struct cpuinfo_alpha {
39 39
40extern struct cpuinfo_alpha cpu_data[NR_CPUS]; 40extern struct cpuinfo_alpha cpu_data[NR_CPUS];
41 41
42#define PROC_CHANGE_PENALTY 20
43
44#define hard_smp_processor_id() __hard_smp_processor_id() 42#define hard_smp_processor_id() __hard_smp_processor_id()
45#define raw_smp_processor_id() (current_thread_info()->cpu) 43#define raw_smp_processor_id() (current_thread_info()->cpu)
46 44
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 3ec35066f1dc..838eac128409 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -121,7 +121,7 @@ common_shutdown_1(void *generic_ptr)
121 /* Wait for the secondaries to halt. */ 121 /* Wait for the secondaries to halt. */
122 set_cpu_present(boot_cpuid, false); 122 set_cpu_present(boot_cpuid, false);
123 set_cpu_possible(boot_cpuid, false); 123 set_cpu_possible(boot_cpuid, false);
124 while (cpus_weight(cpu_present_map)) 124 while (cpumask_weight(cpu_present_mask))
125 barrier(); 125 barrier();
126#endif 126#endif
127 127
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index edbddcbd5bc6..cc0fd862cf26 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -1257,7 +1257,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
1257#ifdef CONFIG_SMP 1257#ifdef CONFIG_SMP
1258 seq_printf(f, "cpus active\t\t: %u\n" 1258 seq_printf(f, "cpus active\t\t: %u\n"
1259 "cpu active mask\t\t: %016lx\n", 1259 "cpu active mask\t\t: %016lx\n",
1260 num_online_cpus(), cpus_addr(cpu_possible_map)[0]); 1260 num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
1261#endif 1261#endif
1262 1262
1263 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); 1263 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 5a621c6d22ab..d739703608fc 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -451,7 +451,7 @@ setup_smp(void)
451 } 451 }
452 452
453 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n", 453 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
454 smp_num_probed, cpu_present_map.bits[0]); 454 smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
455} 455}
456 456
457/* 457/*
@@ -629,8 +629,9 @@ smp_send_reschedule(int cpu)
629void 629void
630smp_send_stop(void) 630smp_send_stop(void)
631{ 631{
632 cpumask_t to_whom = cpu_possible_map; 632 cpumask_t to_whom;
633 cpu_clear(smp_processor_id(), to_whom); 633 cpumask_copy(&to_whom, cpu_possible_mask);
634 cpumask_clear_cpu(smp_processor_id(), &to_whom);
634#ifdef DEBUG_IPI_MSG 635#ifdef DEBUG_IPI_MSG
635 if (hard_smp_processor_id() != boot_cpu_id) 636 if (hard_smp_processor_id() != boot_cpu_id)
636 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); 637 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 5ac00fd4cd0c..f8856829c22a 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -140,7 +140,7 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
140 140
141 for (cpu = 0; cpu < 4; cpu++) { 141 for (cpu = 0; cpu < 4; cpu++) {
142 unsigned long aff = cpu_irq_affinity[cpu]; 142 unsigned long aff = cpu_irq_affinity[cpu];
143 if (cpu_isset(cpu, affinity)) 143 if (cpumask_test_cpu(cpu, &affinity))
144 aff |= 1UL << irq; 144 aff |= 1UL << irq;
145 else 145 else
146 aff &= ~(1UL << irq); 146 aff &= ~(1UL << irq);
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index fea0e4620994..6994407e242a 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -65,10 +65,11 @@ titan_update_irq_hw(unsigned long mask)
65 register int bcpu = boot_cpuid; 65 register int bcpu = boot_cpuid;
66 66
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 cpumask_t cpm = cpu_present_map; 68 cpumask_t cpm;
69 volatile unsigned long *dim0, *dim1, *dim2, *dim3; 69 volatile unsigned long *dim0, *dim1, *dim2, *dim3;
70 unsigned long mask0, mask1, mask2, mask3, dummy; 70 unsigned long mask0, mask1, mask2, mask3, dummy;
71 71
72 cpumask_copy(&cpm, cpu_present_mask);
72 mask &= ~isa_enable; 73 mask &= ~isa_enable;
73 mask0 = mask & titan_cpu_irq_affinity[0]; 74 mask0 = mask & titan_cpu_irq_affinity[0];
74 mask1 = mask & titan_cpu_irq_affinity[1]; 75 mask1 = mask & titan_cpu_irq_affinity[1];
@@ -84,10 +85,10 @@ titan_update_irq_hw(unsigned long mask)
84 dim1 = &cchip->dim1.csr; 85 dim1 = &cchip->dim1.csr;
85 dim2 = &cchip->dim2.csr; 86 dim2 = &cchip->dim2.csr;
86 dim3 = &cchip->dim3.csr; 87 dim3 = &cchip->dim3.csr;
87 if (!cpu_isset(0, cpm)) dim0 = &dummy; 88 if (!cpumask_test_cpu(0, &cpm)) dim0 = &dummy;
88 if (!cpu_isset(1, cpm)) dim1 = &dummy; 89 if (!cpumask_test_cpu(1, &cpm)) dim1 = &dummy;
89 if (!cpu_isset(2, cpm)) dim2 = &dummy; 90 if (!cpumask_test_cpu(2, &cpm)) dim2 = &dummy;
90 if (!cpu_isset(3, cpm)) dim3 = &dummy; 91 if (!cpumask_test_cpu(3, &cpm)) dim3 = &dummy;
91 92
92 *dim0 = mask0; 93 *dim0 = mask0;
93 *dim1 = mask1; 94 *dim1 = mask1;
@@ -137,7 +138,7 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
137 int cpu; 138 int cpu;
138 139
139 for (cpu = 0; cpu < 4; cpu++) { 140 for (cpu = 0; cpu < 4; cpu++) {
140 if (cpu_isset(cpu, affinity)) 141 if (cpumask_test_cpu(cpu, &affinity))
141 titan_cpu_irq_affinity[cpu] |= 1UL << irq; 142 titan_cpu_irq_affinity[cpu] |= 1UL << irq;
142 else 143 else
143 titan_cpu_irq_affinity[cpu] &= ~(1UL << irq); 144 titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 86425ab53bf5..69d0c5761e2f 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -32,8 +32,6 @@
32#include <asm/console.h> 32#include <asm/console.h>
33#include <asm/tlb.h> 33#include <asm/tlb.h>
34 34
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37extern void die_if_kernel(char *,struct pt_regs *,long); 35extern void die_if_kernel(char *,struct pt_regs *,long);
38 36
39static struct pcb_struct original_pcb; 37static struct pcb_struct original_pcb;
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 7b2c56d8f930..3973ae395772 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -313,6 +313,7 @@ void __init paging_init(void)
313 zones_size[ZONE_DMA] = dma_local_pfn; 313 zones_size[ZONE_DMA] = dma_local_pfn;
314 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; 314 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
315 } 315 }
316 node_set_state(nid, N_NORMAL_MEMORY);
316 free_area_init_node(nid, zones_size, start_pfn, NULL); 317 free_area_init_node(nid, zones_size, start_pfn, NULL);
317 } 318 }
318 319
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 03d01d783e3b..81cbe40c159c 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,13 +63,6 @@ config DEBUG_USER
63 8 - SIGSEGV faults 63 8 - SIGSEGV faults
64 16 - SIGBUS faults 64 16 - SIGBUS faults
65 65
66config DEBUG_STACK_USAGE
67 bool "Enable stack utilization instrumentation"
68 depends on DEBUG_KERNEL
69 help
70 Enables the display of the minimum amount of free stack which each
71 task has ever had available in the sysrq-T output.
72
73# These options are only for real kernel hackers who want to get their hands dirty. 66# These options are only for real kernel hackers who want to get their hands dirty.
74config DEBUG_LL 67config DEBUG_LL
75 bool "Kernel low-level debugging functions" 68 bool "Kernel low-level debugging functions"
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 076db52ff672..d5f00d7eb075 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -21,58 +21,22 @@ CONFIG_MODVERSIONS=y
21CONFIG_MODULE_SRCVERSION_ALL=y 21CONFIG_MODULE_SRCVERSION_ALL=y
22# CONFIG_BLK_DEV_BSG is not set 22# CONFIG_BLK_DEV_BSG is not set
23CONFIG_ARCH_OMAP=y 23CONFIG_ARCH_OMAP=y
24CONFIG_ARCH_OMAP2=y
25CONFIG_ARCH_OMAP3=y
26CONFIG_ARCH_OMAP4=y
27CONFIG_OMAP_RESET_CLOCKS=y 24CONFIG_OMAP_RESET_CLOCKS=y
28CONFIG_OMAP_MUX_DEBUG=y 25CONFIG_OMAP_MUX_DEBUG=y
29CONFIG_OMAP_32K_TIMER=y
30CONFIG_MACH_OMAP_GENERIC=y
31CONFIG_ARCH_OMAP2420=y
32CONFIG_ARCH_OMAP2430=y
33CONFIG_ARCH_OMAP3430=y
34CONFIG_MACH_OMAP_H4=y
35CONFIG_MACH_OMAP_APOLLON=y
36CONFIG_MACH_OMAP_2430SDP=y
37CONFIG_MACH_OMAP3_BEAGLE=y
38CONFIG_MACH_DEVKIT8000=y
39CONFIG_MACH_OMAP_LDP=y
40CONFIG_MACH_OVERO=y
41CONFIG_MACH_OMAP3EVM=y
42CONFIG_MACH_OMAP3517EVM=y
43CONFIG_MACH_OMAP3_PANDORA=y
44CONFIG_MACH_OMAP3_TOUCHBOOK=y
45CONFIG_MACH_OMAP_3430SDP=y
46CONFIG_MACH_NOKIA_N8X0=y
47CONFIG_MACH_NOKIA_RX51=y
48CONFIG_MACH_OMAP_ZOOM2=y
49CONFIG_MACH_OMAP_ZOOM3=y
50CONFIG_MACH_CM_T35=y
51CONFIG_MACH_IGEP0020=y
52CONFIG_MACH_SBC3530=y
53CONFIG_MACH_OMAP_3630SDP=y
54CONFIG_MACH_OMAP_4430SDP=y
55CONFIG_ARM_THUMBEE=y 26CONFIG_ARM_THUMBEE=y
56CONFIG_ARM_L1_CACHE_SHIFT=5
57CONFIG_ARM_ERRATA_411920=y 27CONFIG_ARM_ERRATA_411920=y
58CONFIG_NO_HZ=y 28CONFIG_NO_HZ=y
59CONFIG_HIGH_RES_TIMERS=y 29CONFIG_HIGH_RES_TIMERS=y
60CONFIG_SMP=y 30CONFIG_SMP=y
61CONFIG_NR_CPUS=2 31CONFIG_NR_CPUS=2
62# CONFIG_LOCAL_TIMERS is not set
63CONFIG_AEABI=y
64CONFIG_LEDS=y 32CONFIG_LEDS=y
65CONFIG_ZBOOT_ROM_TEXT=0x0 33CONFIG_ZBOOT_ROM_TEXT=0x0
66CONFIG_ZBOOT_ROM_BSS=0x0 34CONFIG_ZBOOT_ROM_BSS=0x0
67CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200" 35CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
68CONFIG_KEXEC=y 36CONFIG_KEXEC=y
69CONFIG_FPE_NWFPE=y 37CONFIG_FPE_NWFPE=y
70CONFIG_VFP=y
71CONFIG_NEON=y
72CONFIG_BINFMT_MISC=y 38CONFIG_BINFMT_MISC=y
73CONFIG_PM=y
74CONFIG_PM_DEBUG=y 39CONFIG_PM_DEBUG=y
75CONFIG_PM_RUNTIME=y
76CONFIG_NET=y 40CONFIG_NET=y
77CONFIG_PACKET=y 41CONFIG_PACKET=y
78CONFIG_UNIX=y 42CONFIG_UNIX=y
@@ -89,14 +53,6 @@ CONFIG_IP_PNP_RARP=y
89# CONFIG_IPV6 is not set 53# CONFIG_IPV6 is not set
90CONFIG_NETFILTER=y 54CONFIG_NETFILTER=y
91CONFIG_BT=m 55CONFIG_BT=m
92CONFIG_BT_L2CAP=m
93CONFIG_BT_SCO=m
94CONFIG_BT_RFCOMM=y
95CONFIG_BT_RFCOMM_TTY=y
96CONFIG_BT_BNEP=m
97CONFIG_BT_BNEP_MC_FILTER=y
98CONFIG_BT_BNEP_PROTO_FILTER=y
99CONFIG_BT_HIDP=m
100CONFIG_BT_HCIUART=m 56CONFIG_BT_HCIUART=m
101CONFIG_BT_HCIUART_H4=y 57CONFIG_BT_HCIUART_H4=y
102CONFIG_BT_HCIUART_BCSP=y 58CONFIG_BT_HCIUART_BCSP=y
@@ -107,11 +63,9 @@ CONFIG_CFG80211=m
107CONFIG_MAC80211=m 63CONFIG_MAC80211=m
108CONFIG_MAC80211_RC_PID=y 64CONFIG_MAC80211_RC_PID=y
109CONFIG_MAC80211_RC_DEFAULT_PID=y 65CONFIG_MAC80211_RC_DEFAULT_PID=y
110CONFIG_MAC80211_LEDS=y
111CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 66CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
112CONFIG_CONNECTOR=y 67CONFIG_CONNECTOR=y
113CONFIG_MTD=y 68CONFIG_MTD=y
114CONFIG_MTD_CONCAT=y
115CONFIG_MTD_CMDLINE_PARTS=y 69CONFIG_MTD_CMDLINE_PARTS=y
116CONFIG_MTD_CHAR=y 70CONFIG_MTD_CHAR=y
117CONFIG_MTD_BLOCK=y 71CONFIG_MTD_BLOCK=y
@@ -127,7 +81,6 @@ CONFIG_MTD_UBI=y
127CONFIG_BLK_DEV_LOOP=y 81CONFIG_BLK_DEV_LOOP=y
128CONFIG_BLK_DEV_RAM=y 82CONFIG_BLK_DEV_RAM=y
129CONFIG_BLK_DEV_RAM_SIZE=16384 83CONFIG_BLK_DEV_RAM_SIZE=16384
130CONFIG_EEPROM_LEGACY=y
131CONFIG_SCSI=y 84CONFIG_SCSI=y
132CONFIG_BLK_DEV_SD=y 85CONFIG_BLK_DEV_SD=y
133CONFIG_SCSI_MULTI_LUN=y 86CONFIG_SCSI_MULTI_LUN=y
@@ -158,19 +111,15 @@ CONFIG_TOUCHSCREEN_ADS7846=y
158CONFIG_INPUT_MISC=y 111CONFIG_INPUT_MISC=y
159CONFIG_INPUT_TWL4030_PWRBUTTON=y 112CONFIG_INPUT_TWL4030_PWRBUTTON=y
160CONFIG_VT_HW_CONSOLE_BINDING=y 113CONFIG_VT_HW_CONSOLE_BINDING=y
161CONFIG_SERIAL_8250=y 114# CONFIG_LEGACY_PTYS is not set
162CONFIG_SERIAL_8250_CONSOLE=y
163CONFIG_SERIAL_8250_NR_UARTS=32 115CONFIG_SERIAL_8250_NR_UARTS=32
164CONFIG_SERIAL_8250_EXTENDED=y 116CONFIG_SERIAL_8250_EXTENDED=y
165CONFIG_SERIAL_8250_MANY_PORTS=y 117CONFIG_SERIAL_8250_MANY_PORTS=y
166CONFIG_SERIAL_8250_SHARE_IRQ=y 118CONFIG_SERIAL_8250_SHARE_IRQ=y
167CONFIG_SERIAL_8250_DETECT_IRQ=y 119CONFIG_SERIAL_8250_DETECT_IRQ=y
168CONFIG_SERIAL_8250_RSA=y 120CONFIG_SERIAL_8250_RSA=y
169# CONFIG_LEGACY_PTYS is not set
170CONFIG_HW_RANDOM=y 121CONFIG_HW_RANDOM=y
171CONFIG_I2C=y
172CONFIG_I2C_CHARDEV=y 122CONFIG_I2C_CHARDEV=y
173CONFIG_I2C_OMAP=y
174CONFIG_SPI=y 123CONFIG_SPI=y
175CONFIG_SPI_OMAP24XX=y 124CONFIG_SPI_OMAP24XX=y
176CONFIG_DEBUG_GPIO=y 125CONFIG_DEBUG_GPIO=y
@@ -181,10 +130,6 @@ CONFIG_POWER_SUPPLY=y
181CONFIG_WATCHDOG=y 130CONFIG_WATCHDOG=y
182CONFIG_OMAP_WATCHDOG=y 131CONFIG_OMAP_WATCHDOG=y
183CONFIG_TWL4030_WATCHDOG=y 132CONFIG_TWL4030_WATCHDOG=y
184CONFIG_MENELAUS=y
185CONFIG_TWL4030_CORE=y
186CONFIG_TWL4030_POWER=y
187CONFIG_REGULATOR=y
188CONFIG_REGULATOR_TWL4030=y 133CONFIG_REGULATOR_TWL4030=y
189CONFIG_REGULATOR_TPS65023=y 134CONFIG_REGULATOR_TPS65023=y
190CONFIG_REGULATOR_TPS6507X=y 135CONFIG_REGULATOR_TPS6507X=y
@@ -208,7 +153,6 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
208CONFIG_LCD_CLASS_DEVICE=y 153CONFIG_LCD_CLASS_DEVICE=y
209CONFIG_LCD_PLATFORM=y 154CONFIG_LCD_PLATFORM=y
210CONFIG_DISPLAY_SUPPORT=y 155CONFIG_DISPLAY_SUPPORT=y
211# CONFIG_VGA_CONSOLE is not set
212CONFIG_FRAMEBUFFER_CONSOLE=y 156CONFIG_FRAMEBUFFER_CONSOLE=y
213CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y 157CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
214CONFIG_FONTS=y 158CONFIG_FONTS=y
@@ -217,25 +161,20 @@ CONFIG_FONT_8x16=y
217CONFIG_LOGO=y 161CONFIG_LOGO=y
218CONFIG_SOUND=m 162CONFIG_SOUND=m
219CONFIG_SND=m 163CONFIG_SND=m
220CONFIG_SND_MIXER_OSS=y 164CONFIG_SND_MIXER_OSS=m
221CONFIG_SND_PCM_OSS=y 165CONFIG_SND_PCM_OSS=m
222CONFIG_SND_VERBOSE_PRINTK=y 166CONFIG_SND_VERBOSE_PRINTK=y
223CONFIG_SND_DEBUG=y 167CONFIG_SND_DEBUG=y
224CONFIG_SND_USB_AUDIO=y 168CONFIG_SND_USB_AUDIO=m
225CONFIG_SND_SOC=y 169CONFIG_SND_SOC=m
226CONFIG_SND_OMAP_SOC=y 170CONFIG_SND_OMAP_SOC=m
227CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y 171CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
228CONFIG_USB=y 172CONFIG_USB=y
229CONFIG_USB_DEBUG=y 173CONFIG_USB_DEBUG=y
230CONFIG_USB_ANNOUNCE_NEW_DEVICES=y 174CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
231CONFIG_USB_DEVICEFS=y 175CONFIG_USB_DEVICEFS=y
232CONFIG_USB_SUSPEND=y 176CONFIG_USB_SUSPEND=y
233# CONFIG_USB_OTG_WHITELIST is not set
234CONFIG_USB_MON=y 177CONFIG_USB_MON=y
235# CONFIG_USB_MUSB_HDRC is not set
236# CONFIG_USB_MUSB_OTG is not set
237# CONFIG_USB_GADGET_MUSB_HDRC is not set
238CONFIG_USB_MUSB_DEBUG=y
239CONFIG_USB_WDM=y 178CONFIG_USB_WDM=y
240CONFIG_USB_STORAGE=y 179CONFIG_USB_STORAGE=y
241CONFIG_USB_LIBUSUAL=y 180CONFIG_USB_LIBUSUAL=y
@@ -250,18 +189,12 @@ CONFIG_MMC_UNSAFE_RESUME=y
250CONFIG_SDIO_UART=y 189CONFIG_SDIO_UART=y
251CONFIG_MMC_OMAP=y 190CONFIG_MMC_OMAP=y
252CONFIG_MMC_OMAP_HS=y 191CONFIG_MMC_OMAP_HS=y
253CONFIG_LEDS_CLASS=y
254CONFIG_LEDS_GPIO=y
255CONFIG_LEDS_TRIGGER_TIMER=y
256CONFIG_LEDS_TRIGGER_HEARTBEAT=y
257CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
258CONFIG_RTC_CLASS=y 192CONFIG_RTC_CLASS=y
259CONFIG_RTC_DRV_TWL92330=y 193CONFIG_RTC_DRV_TWL92330=y
260CONFIG_RTC_DRV_TWL4030=y 194CONFIG_RTC_DRV_TWL4030=y
261CONFIG_EXT2_FS=y 195CONFIG_EXT2_FS=y
262CONFIG_EXT3_FS=y 196CONFIG_EXT3_FS=y
263# CONFIG_EXT3_FS_XATTR is not set 197# CONFIG_EXT3_FS_XATTR is not set
264CONFIG_INOTIFY=y
265CONFIG_QUOTA=y 198CONFIG_QUOTA=y
266CONFIG_QFMT_V2=y 199CONFIG_QFMT_V2=y
267CONFIG_MSDOS_FS=y 200CONFIG_MSDOS_FS=y
@@ -285,12 +218,10 @@ CONFIG_NLS_CODEPAGE_437=y
285CONFIG_NLS_ISO8859_1=y 218CONFIG_NLS_ISO8859_1=y
286CONFIG_PRINTK_TIME=y 219CONFIG_PRINTK_TIME=y
287CONFIG_MAGIC_SYSRQ=y 220CONFIG_MAGIC_SYSRQ=y
288CONFIG_DEBUG_FS=y
289CONFIG_DEBUG_KERNEL=y 221CONFIG_DEBUG_KERNEL=y
290CONFIG_SCHEDSTATS=y 222CONFIG_SCHEDSTATS=y
291CONFIG_TIMER_STATS=y 223CONFIG_TIMER_STATS=y
292CONFIG_PROVE_LOCKING=y 224CONFIG_PROVE_LOCKING=y
293# CONFIG_LOCK_STAT is not set
294CONFIG_DEBUG_SPINLOCK_SLEEP=y 225CONFIG_DEBUG_SPINLOCK_SLEEP=y
295# CONFIG_DEBUG_BUGVERBOSE is not set 226# CONFIG_DEBUG_BUGVERBOSE is not set
296CONFIG_DEBUG_INFO=y 227CONFIG_DEBUG_INFO=y
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 6b7403fd8f54..b4892a06442c 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -203,8 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
203#define find_first_bit(p,sz) _find_first_bit_le(p,sz) 203#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
204#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) 204#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off)
205 205
206#define WORD_BITOFF_TO_LE(x) ((x))
207
208#else 206#else
209/* 207/*
210 * These are the big endian, atomic definitions. 208 * These are the big endian, atomic definitions.
@@ -214,8 +212,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
214#define find_first_bit(p,sz) _find_first_bit_be(p,sz) 212#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
215#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) 213#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off)
216 214
217#define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18)
218
219#endif 215#endif
220 216
221#if __LINUX_ARM_ARCH__ < 5 217#if __LINUX_ARM_ARCH__ < 5
@@ -287,55 +283,29 @@ static inline int fls(int x)
287#include <asm-generic/bitops/hweight.h> 283#include <asm-generic/bitops/hweight.h>
288#include <asm-generic/bitops/lock.h> 284#include <asm-generic/bitops/lock.h>
289 285
290static inline void __set_bit_le(int nr, void *addr) 286#ifdef __ARMEB__
291{
292 __set_bit(WORD_BITOFF_TO_LE(nr), addr);
293}
294
295static inline void __clear_bit_le(int nr, void *addr)
296{
297 __clear_bit(WORD_BITOFF_TO_LE(nr), addr);
298}
299
300static inline int __test_and_set_bit_le(int nr, void *addr)
301{
302 return __test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr);
303}
304
305static inline int test_and_set_bit_le(int nr, void *addr)
306{
307 return test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr);
308}
309
310static inline int __test_and_clear_bit_le(int nr, void *addr)
311{
312 return __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr);
313}
314
315static inline int test_and_clear_bit_le(int nr, void *addr)
316{
317 return test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr);
318}
319
320static inline int test_bit_le(int nr, const void *addr)
321{
322 return test_bit(WORD_BITOFF_TO_LE(nr), addr);
323}
324 287
325static inline int find_first_zero_bit_le(const void *p, unsigned size) 288static inline int find_first_zero_bit_le(const void *p, unsigned size)
326{ 289{
327 return _find_first_zero_bit_le(p, size); 290 return _find_first_zero_bit_le(p, size);
328} 291}
292#define find_first_zero_bit_le find_first_zero_bit_le
329 293
330static inline int find_next_zero_bit_le(const void *p, int size, int offset) 294static inline int find_next_zero_bit_le(const void *p, int size, int offset)
331{ 295{
332 return _find_next_zero_bit_le(p, size, offset); 296 return _find_next_zero_bit_le(p, size, offset);
333} 297}
298#define find_next_zero_bit_le find_next_zero_bit_le
334 299
335static inline int find_next_bit_le(const void *p, int size, int offset) 300static inline int find_next_bit_le(const void *p, int size, int offset)
336{ 301{
337 return _find_next_bit_le(p, size, offset); 302 return _find_next_bit_le(p, size, offset);
338} 303}
304#define find_next_bit_le find_next_bit_le
305
306#endif
307
308#include <asm-generic/bitops/le.h>
339 309
340/* 310/*
341 * Ext2 is defined to use little-endian byte ordering. 311 * Ext2 is defined to use little-endian byte ordering.
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a87664f54f93..d2b514fd76f4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -20,12 +20,6 @@
20 20
21#define raw_smp_processor_id() (current_thread_info()->cpu) 21#define raw_smp_processor_id() (current_thread_info()->cpu)
22 22
23/*
24 * at the moment, there's not a big penalty for changing CPUs
25 * (the >big< penalty is running SMP in the first place)
26 */
27#define PROC_CHANGE_PENALTY 15
28
29struct seq_file; 23struct seq_file;
30 24
31/* 25/*
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 82dfe5d0c41e..265f908c4a6e 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -41,12 +41,12 @@
41 */ 41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) 42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0 43#define tlb_fast_mode(tlb) 0
44#define FREE_PTE_NR 500
45#else 44#else
46#define tlb_fast_mode(tlb) 1 45#define tlb_fast_mode(tlb) 1
47#define FREE_PTE_NR 0
48#endif 46#endif
49 47
48#define MMU_GATHER_BUNDLE 8
49
50/* 50/*
51 * TLB handling. This allows us to remove pages from the page 51 * TLB handling. This allows us to remove pages from the page
52 * tables, and efficiently handle the TLB issues. 52 * tables, and efficiently handle the TLB issues.
@@ -58,7 +58,9 @@ struct mmu_gather {
58 unsigned long range_start; 58 unsigned long range_start;
59 unsigned long range_end; 59 unsigned long range_end;
60 unsigned int nr; 60 unsigned int nr;
61 struct page *pages[FREE_PTE_NR]; 61 unsigned int max;
62 struct page **pages;
63 struct page *local[MMU_GATHER_BUNDLE];
62}; 64};
63 65
64DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 66DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -97,26 +99,37 @@ static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
97 } 99 }
98} 100}
99 101
102static inline void __tlb_alloc_page(struct mmu_gather *tlb)
103{
104 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
105
106 if (addr) {
107 tlb->pages = (void *)addr;
108 tlb->max = PAGE_SIZE / sizeof(struct page *);
109 }
110}
111
100static inline void tlb_flush_mmu(struct mmu_gather *tlb) 112static inline void tlb_flush_mmu(struct mmu_gather *tlb)
101{ 113{
102 tlb_flush(tlb); 114 tlb_flush(tlb);
103 if (!tlb_fast_mode(tlb)) { 115 if (!tlb_fast_mode(tlb)) {
104 free_pages_and_swap_cache(tlb->pages, tlb->nr); 116 free_pages_and_swap_cache(tlb->pages, tlb->nr);
105 tlb->nr = 0; 117 tlb->nr = 0;
118 if (tlb->pages == tlb->local)
119 __tlb_alloc_page(tlb);
106 } 120 }
107} 121}
108 122
109static inline struct mmu_gather * 123static inline void
110tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 124tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
111{ 125{
112 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
113
114 tlb->mm = mm; 126 tlb->mm = mm;
115 tlb->fullmm = full_mm_flush; 127 tlb->fullmm = fullmm;
116 tlb->vma = NULL; 128 tlb->vma = NULL;
129 tlb->max = ARRAY_SIZE(tlb->local);
130 tlb->pages = tlb->local;
117 tlb->nr = 0; 131 tlb->nr = 0;
118 132 __tlb_alloc_page(tlb);
119 return tlb;
120} 133}
121 134
122static inline void 135static inline void
@@ -127,7 +140,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
127 /* keep the page table cache within bounds */ 140 /* keep the page table cache within bounds */
128 check_pgt_cache(); 141 check_pgt_cache();
129 142
130 put_cpu_var(mmu_gathers); 143 if (tlb->pages != tlb->local)
144 free_pages((unsigned long)tlb->pages, 0);
131} 145}
132 146
133/* 147/*
@@ -162,15 +176,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
162 tlb_flush(tlb); 176 tlb_flush(tlb);
163} 177}
164 178
165static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 179static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
166{ 180{
167 if (tlb_fast_mode(tlb)) { 181 if (tlb_fast_mode(tlb)) {
168 free_page_and_swap_cache(page); 182 free_page_and_swap_cache(page);
169 } else { 183 return 1; /* avoid calling tlb_flush_mmu */
170 tlb->pages[tlb->nr++] = page;
171 if (tlb->nr >= FREE_PTE_NR)
172 tlb_flush_mmu(tlb);
173 } 184 }
185
186 tlb->pages[tlb->nr++] = page;
187 VM_BUG_ON(tlb->nr > tlb->max);
188 return tlb->max - tlb->nr;
189}
190
191static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
192{
193 if (!__tlb_remove_page(tlb, page))
194 tlb_flush_mmu(tlb);
174} 195}
175 196
176static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 197static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
new file mode 100644
index 000000000000..292d55ed2113
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
@@ -0,0 +1,78 @@
1/*
2 * PTP 1588 clock using the IXP46X
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _IXP46X_TS_H_
22#define _IXP46X_TS_H_
23
24#define DEFAULT_ADDEND 0xF0000029
25#define TICKS_NS_SHIFT 4
26
27struct ixp46x_channel_ctl {
28 u32 ch_control; /* 0x40 Time Synchronization Channel Control */
29 u32 ch_event; /* 0x44 Time Synchronization Channel Event */
30 u32 tx_snap_lo; /* 0x48 Transmit Snapshot Low Register */
31 u32 tx_snap_hi; /* 0x4C Transmit Snapshot High Register */
32 u32 rx_snap_lo; /* 0x50 Receive Snapshot Low Register */
33 u32 rx_snap_hi; /* 0x54 Receive Snapshot High Register */
34 u32 src_uuid_lo; /* 0x58 Source UUID0 Low Register */
35 u32 src_uuid_hi; /* 0x5C Sequence Identifier/Source UUID0 High */
36};
37
38struct ixp46x_ts_regs {
39 u32 control; /* 0x00 Time Sync Control Register */
40 u32 event; /* 0x04 Time Sync Event Register */
41 u32 addend; /* 0x08 Time Sync Addend Register */
42 u32 accum; /* 0x0C Time Sync Accumulator Register */
43 u32 test; /* 0x10 Time Sync Test Register */
44 u32 unused; /* 0x14 */
45 u32 rsystime_lo; /* 0x18 RawSystemTime_Low Register */
46 u32 rsystime_hi; /* 0x1C RawSystemTime_High Register */
47 u32 systime_lo; /* 0x20 SystemTime_Low Register */
48 u32 systime_hi; /* 0x24 SystemTime_High Register */
49 u32 trgt_lo; /* 0x28 TargetTime_Low Register */
50 u32 trgt_hi; /* 0x2C TargetTime_High Register */
51 u32 asms_lo; /* 0x30 Auxiliary Slave Mode Snapshot Low */
52 u32 asms_hi; /* 0x34 Auxiliary Slave Mode Snapshot High */
53 u32 amms_lo; /* 0x38 Auxiliary Master Mode Snapshot Low */
54 u32 amms_hi; /* 0x3C Auxiliary Master Mode Snapshot High */
55
56 struct ixp46x_channel_ctl channel[3];
57};
58
59/* 0x00 Time Sync Control Register Bits */
60#define TSCR_AMM (1<<3)
61#define TSCR_ASM (1<<2)
62#define TSCR_TTM (1<<1)
63#define TSCR_RST (1<<0)
64
65/* 0x04 Time Sync Event Register Bits */
66#define TSER_SNM (1<<3)
67#define TSER_SNS (1<<2)
68#define TTIPEND (1<<1)
69
70/* 0x40 Time Synchronization Channel Control Register Bits */
71#define MASTER_MODE (1<<0)
72#define TIMESTAMP_ALL (1<<1)
73
74/* 0x44 Time Synchronization Channel Event Register Bits */
75#define TX_SNAPSHOT_LOCKED (1<<0)
76#define RX_SNAPSHOT_LOCKED (1<<1)
77
78#endif
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index b997a35830fc..19d5891c48e3 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -288,6 +288,7 @@ config MACH_IGEP0030
288 depends on ARCH_OMAP3 288 depends on ARCH_OMAP3
289 default y 289 default y
290 select OMAP_PACKAGE_CBB 290 select OMAP_PACKAGE_CBB
291 select MACH_IGEP0020
291 292
292config MACH_SBC3530 293config MACH_SBC3530
293 bool "OMAP3 SBC STALKER board" 294 bool "OMAP3 SBC STALKER board"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 66dfbccacd25..b14807794401 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -229,8 +229,6 @@ obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o \
229obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o 229obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
230obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o \ 230obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o \
231 hsmmc.o 231 hsmmc.o
232obj-$(CONFIG_MACH_IGEP0030) += board-igep0030.o \
233 hsmmc.o
234obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \ 232obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \
235 hsmmc.o 233 hsmmc.o
236obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \ 234obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \
@@ -270,3 +268,5 @@ obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o
270 268
271disp-$(CONFIG_OMAP2_DSS) := display.o 269disp-$(CONFIG_OMAP2_DSS) := display.o
272obj-y += $(disp-m) $(disp-y) 270obj-y += $(disp-m) $(disp-y)
271
272obj-y += common-board-devices.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 1fa6bb896f41..d54969be0a54 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -41,6 +41,7 @@
41 41
42#include "mux.h" 42#include "mux.h"
43#include "hsmmc.h" 43#include "hsmmc.h"
44#include "common-board-devices.h"
44 45
45#define SDP2430_CS0_BASE 0x04000000 46#define SDP2430_CS0_BASE 0x04000000
46#define SECONDARY_LCD_GPIO 147 47#define SECONDARY_LCD_GPIO 147
@@ -180,15 +181,6 @@ static struct twl4030_platform_data sdp2430_twldata = {
180 .vmmc1 = &sdp2430_vmmc1, 181 .vmmc1 = &sdp2430_vmmc1,
181}; 182};
182 183
183static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = {
184 {
185 I2C_BOARD_INFO("twl4030", 0x48),
186 .flags = I2C_CLIENT_WAKE,
187 .irq = INT_24XX_SYS_NIRQ,
188 .platform_data = &sdp2430_twldata,
189 },
190};
191
192static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = { 184static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = {
193 { 185 {
194 I2C_BOARD_INFO("isp1301_omap", 0x2D), 186 I2C_BOARD_INFO("isp1301_omap", 0x2D),
@@ -201,8 +193,7 @@ static int __init omap2430_i2c_init(void)
201{ 193{
202 omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, 194 omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
203 ARRAY_SIZE(sdp2430_i2c1_boardinfo)); 195 ARRAY_SIZE(sdp2430_i2c1_boardinfo));
204 omap_register_i2c_bus(2, 2600, sdp2430_i2c_boardinfo, 196 omap2_pmic_init("twl4030", &sdp2430_twldata);
205 ARRAY_SIZE(sdp2430_i2c_boardinfo));
206 return 0; 197 return 0;
207} 198}
208 199
@@ -217,11 +208,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
217 {} /* Terminator */ 208 {} /* Terminator */
218}; 209};
219 210
220static struct omap_musb_board_data musb_board_data = {
221 .interface_type = MUSB_INTERFACE_ULPI,
222 .mode = MUSB_OTG,
223 .power = 100,
224};
225static struct omap_usb_config sdp2430_usb_config __initdata = { 211static struct omap_usb_config sdp2430_usb_config __initdata = {
226 .otg = 1, 212 .otg = 1,
227#ifdef CONFIG_USB_GADGET_OMAP 213#ifdef CONFIG_USB_GADGET_OMAP
@@ -240,8 +226,6 @@ static struct omap_board_mux board_mux[] __initdata = {
240 226
241static void __init omap_2430sdp_init(void) 227static void __init omap_2430sdp_init(void)
242{ 228{
243 int ret;
244
245 omap2430_mux_init(board_mux, OMAP_PACKAGE_ZAC); 229 omap2430_mux_init(board_mux, OMAP_PACKAGE_ZAC);
246 230
247 omap_board_config = sdp2430_config; 231 omap_board_config = sdp2430_config;
@@ -255,14 +239,13 @@ static void __init omap_2430sdp_init(void)
255 omap2_usbfs_init(&sdp2430_usb_config); 239 omap2_usbfs_init(&sdp2430_usb_config);
256 240
257 omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP); 241 omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP);
258 usb_musb_init(&musb_board_data); 242 usb_musb_init(NULL);
259 243
260 board_smc91x_init(); 244 board_smc91x_init();
261 245
262 /* Turn off secondary LCD backlight */ 246 /* Turn off secondary LCD backlight */
263 ret = gpio_request(SECONDARY_LCD_GPIO, "Secondary LCD backlight"); 247 gpio_request_one(SECONDARY_LCD_GPIO, GPIOF_OUT_INIT_LOW,
264 if (ret == 0) 248 "Secondary LCD backlight");
265 gpio_direction_output(SECONDARY_LCD_GPIO, 0);
266} 249}
267 250
268static void __init omap_2430sdp_map_io(void) 251static void __init omap_2430sdp_map_io(void)
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 9afd087cc29c..ae2963a98041 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -19,7 +19,6 @@
19#include <linux/input.h> 19#include <linux/input.h>
20#include <linux/input/matrix_keypad.h> 20#include <linux/input/matrix_keypad.h>
21#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
22#include <linux/spi/ads7846.h>
23#include <linux/i2c/twl.h> 22#include <linux/i2c/twl.h>
24#include <linux/regulator/machine.h> 23#include <linux/regulator/machine.h>
25#include <linux/io.h> 24#include <linux/io.h>
@@ -37,8 +36,8 @@
37#include <plat/common.h> 36#include <plat/common.h>
38#include <plat/dma.h> 37#include <plat/dma.h>
39#include <plat/gpmc.h> 38#include <plat/gpmc.h>
40#include <plat/display.h> 39#include <video/omapdss.h>
41#include <plat/panel-generic-dpi.h> 40#include <video/omap-panel-generic-dpi.h>
42 41
43#include <plat/gpmc-smc91x.h> 42#include <plat/gpmc-smc91x.h>
44 43
@@ -48,6 +47,7 @@
48#include "hsmmc.h" 47#include "hsmmc.h"
49#include "pm.h" 48#include "pm.h"
50#include "control.h" 49#include "control.h"
50#include "common-board-devices.h"
51 51
52#define CONFIG_DISABLE_HFCLK 1 52#define CONFIG_DISABLE_HFCLK 1
53 53
@@ -59,24 +59,6 @@
59 59
60#define TWL4030_MSECURE_GPIO 22 60#define TWL4030_MSECURE_GPIO 22
61 61
62/* FIXME: These values need to be updated based on more profiling on 3430sdp*/
63static struct cpuidle_params omap3_cpuidle_params_table[] = {
64 /* C1 */
65 {1, 2, 2, 5},
66 /* C2 */
67 {1, 10, 10, 30},
68 /* C3 */
69 {1, 50, 50, 300},
70 /* C4 */
71 {1, 1500, 1800, 4000},
72 /* C5 */
73 {1, 2500, 7500, 12000},
74 /* C6 */
75 {1, 3000, 8500, 15000},
76 /* C7 */
77 {1, 10000, 30000, 300000},
78};
79
80static uint32_t board_keymap[] = { 62static uint32_t board_keymap[] = {
81 KEY(0, 0, KEY_LEFT), 63 KEY(0, 0, KEY_LEFT),
82 KEY(0, 1, KEY_RIGHT), 64 KEY(0, 1, KEY_RIGHT),
@@ -123,63 +105,14 @@ static struct twl4030_keypad_data sdp3430_kp_data = {
123 .rep = 1, 105 .rep = 1,
124}; 106};
125 107
126static int ts_gpio; /* Needed for ads7846_get_pendown_state */
127
128/**
129 * @brief ads7846_dev_init : Requests & sets GPIO line for pen-irq
130 *
131 * @return - void. If request gpio fails then Flag KERN_ERR.
132 */
133static void ads7846_dev_init(void)
134{
135 if (gpio_request(ts_gpio, "ADS7846 pendown") < 0) {
136 printk(KERN_ERR "can't get ads746 pen down GPIO\n");
137 return;
138 }
139
140 gpio_direction_input(ts_gpio);
141 gpio_set_debounce(ts_gpio, 310);
142}
143
144static int ads7846_get_pendown_state(void)
145{
146 return !gpio_get_value(ts_gpio);
147}
148
149static struct ads7846_platform_data tsc2046_config __initdata = {
150 .get_pendown_state = ads7846_get_pendown_state,
151 .keep_vref_on = 1,
152 .wakeup = true,
153};
154
155
156static struct omap2_mcspi_device_config tsc2046_mcspi_config = {
157 .turbo_mode = 0,
158 .single_channel = 1, /* 0: slave, 1: master */
159};
160
161static struct spi_board_info sdp3430_spi_board_info[] __initdata = {
162 [0] = {
163 /*
164 * TSC2046 operates at a max freqency of 2MHz, so
165 * operate slightly below at 1.5MHz
166 */
167 .modalias = "ads7846",
168 .bus_num = 1,
169 .chip_select = 0,
170 .max_speed_hz = 1500000,
171 .controller_data = &tsc2046_mcspi_config,
172 .irq = 0,
173 .platform_data = &tsc2046_config,
174 },
175};
176
177
178#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 8 108#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 8
179#define SDP3430_LCD_PANEL_ENABLE_GPIO 5 109#define SDP3430_LCD_PANEL_ENABLE_GPIO 5
180 110
181static unsigned backlight_gpio; 111static struct gpio sdp3430_dss_gpios[] __initdata = {
182static unsigned enable_gpio; 112 {SDP3430_LCD_PANEL_ENABLE_GPIO, GPIOF_OUT_INIT_LOW, "LCD reset" },
113 {SDP3430_LCD_PANEL_BACKLIGHT_GPIO, GPIOF_OUT_INIT_LOW, "LCD Backlight"},
114};
115
183static int lcd_enabled; 116static int lcd_enabled;
184static int dvi_enabled; 117static int dvi_enabled;
185 118
@@ -187,29 +120,11 @@ static void __init sdp3430_display_init(void)
187{ 120{
188 int r; 121 int r;
189 122
190 enable_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO; 123 r = gpio_request_array(sdp3430_dss_gpios,
191 backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO; 124 ARRAY_SIZE(sdp3430_dss_gpios));
192 125 if (r)
193 r = gpio_request(enable_gpio, "LCD reset"); 126 printk(KERN_ERR "failed to get LCD control GPIOs\n");
194 if (r) {
195 printk(KERN_ERR "failed to get LCD reset GPIO\n");
196 goto err0;
197 }
198
199 r = gpio_request(backlight_gpio, "LCD Backlight");
200 if (r) {
201 printk(KERN_ERR "failed to get LCD backlight GPIO\n");
202 goto err1;
203 }
204
205 gpio_direction_output(enable_gpio, 0);
206 gpio_direction_output(backlight_gpio, 0);
207 127
208 return;
209err1:
210 gpio_free(enable_gpio);
211err0:
212 return;
213} 128}
214 129
215static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev) 130static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev)
@@ -219,8 +134,8 @@ static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev)
219 return -EINVAL; 134 return -EINVAL;
220 } 135 }
221 136
222 gpio_direction_output(enable_gpio, 1); 137 gpio_direction_output(SDP3430_LCD_PANEL_ENABLE_GPIO, 1);
223 gpio_direction_output(backlight_gpio, 1); 138 gpio_direction_output(SDP3430_LCD_PANEL_BACKLIGHT_GPIO, 1);
224 139
225 lcd_enabled = 1; 140 lcd_enabled = 1;
226 141
@@ -231,8 +146,8 @@ static void sdp3430_panel_disable_lcd(struct omap_dss_device *dssdev)
231{ 146{
232 lcd_enabled = 0; 147 lcd_enabled = 0;
233 148
234 gpio_direction_output(enable_gpio, 0); 149 gpio_direction_output(SDP3430_LCD_PANEL_ENABLE_GPIO, 0);
235 gpio_direction_output(backlight_gpio, 0); 150 gpio_direction_output(SDP3430_LCD_PANEL_BACKLIGHT_GPIO, 0);
236} 151}
237 152
238static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev) 153static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev)
@@ -360,12 +275,10 @@ static int sdp3430_twl_gpio_setup(struct device *dev,
360 omap2_hsmmc_init(mmc); 275 omap2_hsmmc_init(mmc);
361 276
362 /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */ 277 /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */
363 gpio_request(gpio + 7, "sub_lcd_en_bkl"); 278 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "sub_lcd_en_bkl");
364 gpio_direction_output(gpio + 7, 0);
365 279
366 /* gpio + 15 is "sub_lcd_nRST" (output) */ 280 /* gpio + 15 is "sub_lcd_nRST" (output) */
367 gpio_request(gpio + 15, "sub_lcd_nRST"); 281 gpio_request_one(gpio + 15, GPIOF_OUT_INIT_LOW, "sub_lcd_nRST");
368 gpio_direction_output(gpio + 15, 0);
369 282
370 return 0; 283 return 0;
371} 284}
@@ -580,20 +493,10 @@ static struct twl4030_platform_data sdp3430_twldata = {
580 .vpll2 = &sdp3430_vpll2, 493 .vpll2 = &sdp3430_vpll2,
581}; 494};
582 495
583static struct i2c_board_info __initdata sdp3430_i2c_boardinfo[] = {
584 {
585 I2C_BOARD_INFO("twl4030", 0x48),
586 .flags = I2C_CLIENT_WAKE,
587 .irq = INT_34XX_SYS_NIRQ,
588 .platform_data = &sdp3430_twldata,
589 },
590};
591
592static int __init omap3430_i2c_init(void) 496static int __init omap3430_i2c_init(void)
593{ 497{
594 /* i2c1 for PMIC only */ 498 /* i2c1 for PMIC only */
595 omap_register_i2c_bus(1, 2600, sdp3430_i2c_boardinfo, 499 omap3_pmic_init("twl4030", &sdp3430_twldata);
596 ARRAY_SIZE(sdp3430_i2c_boardinfo));
597 /* i2c2 on camera connector (for sensor control) and optional isp1301 */ 500 /* i2c2 on camera connector (for sensor control) and optional isp1301 */
598 omap_register_i2c_bus(2, 400, NULL, 0); 501 omap_register_i2c_bus(2, 400, NULL, 0);
599 /* i2c3 on display connector (for DVI, tfp410) */ 502 /* i2c3 on display connector (for DVI, tfp410) */
@@ -872,30 +775,22 @@ static struct flash_partitions sdp_flash_partitions[] = {
872 }, 775 },
873}; 776};
874 777
875static struct omap_musb_board_data musb_board_data = {
876 .interface_type = MUSB_INTERFACE_ULPI,
877 .mode = MUSB_OTG,
878 .power = 100,
879};
880
881static void __init omap_3430sdp_init(void) 778static void __init omap_3430sdp_init(void)
882{ 779{
780 int gpio_pendown;
781
883 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 782 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
884 omap_board_config = sdp3430_config; 783 omap_board_config = sdp3430_config;
885 omap_board_config_size = ARRAY_SIZE(sdp3430_config); 784 omap_board_config_size = ARRAY_SIZE(sdp3430_config);
886 omap3_pm_init_cpuidle(omap3_cpuidle_params_table);
887 omap3430_i2c_init(); 785 omap3430_i2c_init();
888 omap_display_init(&sdp3430_dss_data); 786 omap_display_init(&sdp3430_dss_data);
889 if (omap_rev() > OMAP3430_REV_ES1_0) 787 if (omap_rev() > OMAP3430_REV_ES1_0)
890 ts_gpio = SDP3430_TS_GPIO_IRQ_SDPV2; 788 gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV2;
891 else 789 else
892 ts_gpio = SDP3430_TS_GPIO_IRQ_SDPV1; 790 gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1;
893 sdp3430_spi_board_info[0].irq = gpio_to_irq(ts_gpio); 791 omap_ads7846_init(1, gpio_pendown, 310, NULL);
894 spi_register_board_info(sdp3430_spi_board_info,
895 ARRAY_SIZE(sdp3430_spi_board_info));
896 ads7846_dev_init();
897 board_serial_init(); 792 board_serial_init();
898 usb_musb_init(&musb_board_data); 793 usb_musb_init(NULL);
899 board_smc91x_init(); 794 board_smc91x_init();
900 board_flash_init(sdp_flash_partitions, chip_sel_3430, 0); 795 board_flash_init(sdp_flash_partitions, chip_sel_3430, 0);
901 sdp3430_display_init(); 796 sdp3430_display_init();
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 56702c5e577f..73fa90bb6953 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -36,12 +36,13 @@
36#include <plat/usb.h> 36#include <plat/usb.h>
37#include <plat/mmc.h> 37#include <plat/mmc.h>
38#include <plat/omap4-keypad.h> 38#include <plat/omap4-keypad.h>
39#include <plat/display.h> 39#include <video/omapdss.h>
40 40
41#include "mux.h" 41#include "mux.h"
42#include "hsmmc.h" 42#include "hsmmc.h"
43#include "timer-gp.h" 43#include "timer-gp.h"
44#include "control.h" 44#include "control.h"
45#include "common-board-devices.h"
45 46
46#define ETH_KS8851_IRQ 34 47#define ETH_KS8851_IRQ 34
47#define ETH_KS8851_POWER_ON 48 48#define ETH_KS8851_POWER_ON 48
@@ -251,58 +252,22 @@ static struct spi_board_info sdp4430_spi_board_info[] __initdata = {
251 }, 252 },
252}; 253};
253 254
255static struct gpio sdp4430_eth_gpios[] __initdata = {
256 { ETH_KS8851_POWER_ON, GPIOF_OUT_INIT_HIGH, "eth_power" },
257 { ETH_KS8851_QUART, GPIOF_OUT_INIT_HIGH, "quart" },
258 { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" },
259};
260
254static int omap_ethernet_init(void) 261static int omap_ethernet_init(void)
255{ 262{
256 int status; 263 int status;
257 264
258 /* Request of GPIO lines */ 265 /* Request of GPIO lines */
266 status = gpio_request_array(sdp4430_eth_gpios,
267 ARRAY_SIZE(sdp4430_eth_gpios));
268 if (status)
269 pr_err("Cannot request ETH GPIOs\n");
259 270
260 status = gpio_request(ETH_KS8851_POWER_ON, "eth_power");
261 if (status) {
262 pr_err("Cannot request GPIO %d\n", ETH_KS8851_POWER_ON);
263 return status;
264 }
265
266 status = gpio_request(ETH_KS8851_QUART, "quart");
267 if (status) {
268 pr_err("Cannot request GPIO %d\n", ETH_KS8851_QUART);
269 goto error1;
270 }
271
272 status = gpio_request(ETH_KS8851_IRQ, "eth_irq");
273 if (status) {
274 pr_err("Cannot request GPIO %d\n", ETH_KS8851_IRQ);
275 goto error2;
276 }
277
278 /* Configuration of requested GPIO lines */
279
280 status = gpio_direction_output(ETH_KS8851_POWER_ON, 1);
281 if (status) {
282 pr_err("Cannot set output GPIO %d\n", ETH_KS8851_IRQ);
283 goto error3;
284 }
285
286 status = gpio_direction_output(ETH_KS8851_QUART, 1);
287 if (status) {
288 pr_err("Cannot set output GPIO %d\n", ETH_KS8851_QUART);
289 goto error3;
290 }
291
292 status = gpio_direction_input(ETH_KS8851_IRQ);
293 if (status) {
294 pr_err("Cannot set input GPIO %d\n", ETH_KS8851_IRQ);
295 goto error3;
296 }
297
298 return 0;
299
300error3:
301 gpio_free(ETH_KS8851_IRQ);
302error2:
303 gpio_free(ETH_KS8851_QUART);
304error1:
305 gpio_free(ETH_KS8851_POWER_ON);
306 return status; 271 return status;
307} 272}
308 273
@@ -575,14 +540,6 @@ static struct twl4030_platform_data sdp4430_twldata = {
575 .usb = &omap4_usbphy_data 540 .usb = &omap4_usbphy_data
576}; 541};
577 542
578static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = {
579 {
580 I2C_BOARD_INFO("twl6030", 0x48),
581 .flags = I2C_CLIENT_WAKE,
582 .irq = OMAP44XX_IRQ_SYS_1N,
583 .platform_data = &sdp4430_twldata,
584 },
585};
586static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = { 543static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = {
587 { 544 {
588 I2C_BOARD_INFO("tmp105", 0x48), 545 I2C_BOARD_INFO("tmp105", 0x48),
@@ -598,12 +555,7 @@ static struct i2c_board_info __initdata sdp4430_i2c_4_boardinfo[] = {
598}; 555};
599static int __init omap4_i2c_init(void) 556static int __init omap4_i2c_init(void)
600{ 557{
601 /* 558 omap4_pmic_init("twl6030", &sdp4430_twldata);
602 * Phoenix Audio IC needs I2C1 to
603 * start with 400 KHz or less
604 */
605 omap_register_i2c_bus(1, 400, sdp4430_i2c_boardinfo,
606 ARRAY_SIZE(sdp4430_i2c_boardinfo));
607 omap_register_i2c_bus(2, 400, NULL, 0); 559 omap_register_i2c_bus(2, 400, NULL, 0);
608 omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo, 560 omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo,
609 ARRAY_SIZE(sdp4430_i2c_3_boardinfo)); 561 ARRAY_SIZE(sdp4430_i2c_3_boardinfo));
@@ -614,21 +566,13 @@ static int __init omap4_i2c_init(void)
614 566
615static void __init omap_sfh7741prox_init(void) 567static void __init omap_sfh7741prox_init(void)
616{ 568{
617 int error; 569 int error;
618 570
619 error = gpio_request(OMAP4_SFH7741_ENABLE_GPIO, "sfh7741"); 571 error = gpio_request_one(OMAP4_SFH7741_ENABLE_GPIO,
620 if (error < 0) { 572 GPIOF_OUT_INIT_LOW, "sfh7741");
573 if (error < 0)
621 pr_err("%s:failed to request GPIO %d, error %d\n", 574 pr_err("%s:failed to request GPIO %d, error %d\n",
622 __func__, OMAP4_SFH7741_ENABLE_GPIO, error); 575 __func__, OMAP4_SFH7741_ENABLE_GPIO, error);
623 return;
624 }
625
626 error = gpio_direction_output(OMAP4_SFH7741_ENABLE_GPIO , 0);
627 if (error < 0) {
628 pr_err("%s: GPIO configuration failed: GPIO %d,error %d\n",
629 __func__, OMAP4_SFH7741_ENABLE_GPIO, error);
630 gpio_free(OMAP4_SFH7741_ENABLE_GPIO);
631 }
632} 576}
633 577
634static void sdp4430_hdmi_mux_init(void) 578static void sdp4430_hdmi_mux_init(void)
@@ -645,27 +589,19 @@ static void sdp4430_hdmi_mux_init(void)
645 OMAP_PIN_INPUT_PULLUP); 589 OMAP_PIN_INPUT_PULLUP);
646} 590}
647 591
592static struct gpio sdp4430_hdmi_gpios[] = {
593 { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" },
594 { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
595};
596
648static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) 597static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
649{ 598{
650 int status; 599 int status;
651 600
652 status = gpio_request_one(HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, 601 status = gpio_request_array(sdp4430_hdmi_gpios,
653 "hdmi_gpio_hpd"); 602 ARRAY_SIZE(sdp4430_hdmi_gpios));
654 if (status) { 603 if (status)
655 pr_err("Cannot request GPIO %d\n", HDMI_GPIO_HPD); 604 pr_err("%s: Cannot request HDMI GPIOs\n", __func__);
656 return status;
657 }
658 status = gpio_request_one(HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH,
659 "hdmi_gpio_ls_oe");
660 if (status) {
661 pr_err("Cannot request GPIO %d\n", HDMI_GPIO_LS_OE);
662 goto error1;
663 }
664
665 return 0;
666
667error1:
668 gpio_free(HDMI_GPIO_HPD);
669 605
670 return status; 606 return status;
671} 607}
@@ -680,6 +616,15 @@ static struct omap_dss_device sdp4430_hdmi_device = {
680 .name = "hdmi", 616 .name = "hdmi",
681 .driver_name = "hdmi_panel", 617 .driver_name = "hdmi_panel",
682 .type = OMAP_DISPLAY_TYPE_HDMI, 618 .type = OMAP_DISPLAY_TYPE_HDMI,
619 .clocks = {
620 .dispc = {
621 .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK,
622 },
623 .hdmi = {
624 .regn = 15,
625 .regm2 = 1,
626 },
627 },
683 .platform_enable = sdp4430_panel_enable_hdmi, 628 .platform_enable = sdp4430_panel_enable_hdmi,
684 .platform_disable = sdp4430_panel_disable_hdmi, 629 .platform_disable = sdp4430_panel_disable_hdmi,
685 .channel = OMAP_DSS_CHANNEL_DIGIT, 630 .channel = OMAP_DSS_CHANNEL_DIGIT,
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index a890d244fec6..5e438a77cd72 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -89,19 +89,13 @@ static void __init am3517_crane_init(void)
89 return; 89 return;
90 } 90 }
91 91
92 ret = gpio_request(GPIO_USB_POWER, "usb_ehci_enable"); 92 ret = gpio_request_one(GPIO_USB_POWER, GPIOF_OUT_INIT_HIGH,
93 "usb_ehci_enable");
93 if (ret < 0) { 94 if (ret < 0) {
94 pr_err("Can not request GPIO %d\n", GPIO_USB_POWER); 95 pr_err("Can not request GPIO %d\n", GPIO_USB_POWER);
95 return; 96 return;
96 } 97 }
97 98
98 ret = gpio_direction_output(GPIO_USB_POWER, 1);
99 if (ret < 0) {
100 gpio_free(GPIO_USB_POWER);
101 pr_err("Unable to initialize EHCI power\n");
102 return;
103 }
104
105 usbhs_init(&usbhs_bdata); 99 usbhs_init(&usbhs_bdata);
106} 100}
107 101
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index ce7d5e6e4150..63af4171c043 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -34,8 +34,8 @@
34#include <plat/board.h> 34#include <plat/board.h>
35#include <plat/common.h> 35#include <plat/common.h>
36#include <plat/usb.h> 36#include <plat/usb.h>
37#include <plat/display.h> 37#include <video/omapdss.h>
38#include <plat/panel-generic-dpi.h> 38#include <video/omap-panel-generic-dpi.h>
39 39
40#include "mux.h" 40#include "mux.h"
41#include "control.h" 41#include "control.h"
@@ -174,19 +174,14 @@ static void __init am3517_evm_rtc_init(void)
174 int r; 174 int r;
175 175
176 omap_mux_init_gpio(GPIO_RTCS35390A_IRQ, OMAP_PIN_INPUT_PULLUP); 176 omap_mux_init_gpio(GPIO_RTCS35390A_IRQ, OMAP_PIN_INPUT_PULLUP);
177 r = gpio_request(GPIO_RTCS35390A_IRQ, "rtcs35390a-irq"); 177
178 r = gpio_request_one(GPIO_RTCS35390A_IRQ, GPIOF_IN, "rtcs35390a-irq");
178 if (r < 0) { 179 if (r < 0) {
179 printk(KERN_WARNING "failed to request GPIO#%d\n", 180 printk(KERN_WARNING "failed to request GPIO#%d\n",
180 GPIO_RTCS35390A_IRQ); 181 GPIO_RTCS35390A_IRQ);
181 return; 182 return;
182 } 183 }
183 r = gpio_direction_input(GPIO_RTCS35390A_IRQ); 184
184 if (r < 0) {
185 printk(KERN_WARNING "GPIO#%d cannot be configured as input\n",
186 GPIO_RTCS35390A_IRQ);
187 gpio_free(GPIO_RTCS35390A_IRQ);
188 return;
189 }
190 am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ); 185 am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ);
191} 186}
192 187
@@ -242,6 +237,15 @@ static int dvi_enabled;
242 237
243#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \ 238#if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
244 defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE) 239 defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
240static struct gpio am3517_evm_dss_gpios[] __initdata = {
241 /* GPIO 182 = LCD Backlight Power */
242 { LCD_PANEL_BKLIGHT_PWR, GPIOF_OUT_INIT_HIGH, "lcd_backlight_pwr" },
243 /* GPIO 181 = LCD Panel PWM */
244 { LCD_PANEL_PWM, GPIOF_OUT_INIT_HIGH, "lcd bl enable" },
245 /* GPIO 176 = LCD Panel Power enable pin */
246 { LCD_PANEL_PWR, GPIOF_OUT_INIT_HIGH, "dvi enable" },
247};
248
245static void __init am3517_evm_display_init(void) 249static void __init am3517_evm_display_init(void)
246{ 250{
247 int r; 251 int r;
@@ -249,41 +253,15 @@ static void __init am3517_evm_display_init(void)
249 omap_mux_init_gpio(LCD_PANEL_PWR, OMAP_PIN_INPUT_PULLUP); 253 omap_mux_init_gpio(LCD_PANEL_PWR, OMAP_PIN_INPUT_PULLUP);
250 omap_mux_init_gpio(LCD_PANEL_BKLIGHT_PWR, OMAP_PIN_INPUT_PULLDOWN); 254 omap_mux_init_gpio(LCD_PANEL_BKLIGHT_PWR, OMAP_PIN_INPUT_PULLDOWN);
251 omap_mux_init_gpio(LCD_PANEL_PWM, OMAP_PIN_INPUT_PULLDOWN); 255 omap_mux_init_gpio(LCD_PANEL_PWM, OMAP_PIN_INPUT_PULLDOWN);
252 /* 256
253 * Enable GPIO 182 = LCD Backlight Power 257 r = gpio_request_array(am3517_evm_dss_gpios,
254 */ 258 ARRAY_SIZE(am3517_evm_dss_gpios));
255 r = gpio_request(LCD_PANEL_BKLIGHT_PWR, "lcd_backlight_pwr");
256 if (r) { 259 if (r) {
257 printk(KERN_ERR "failed to get lcd_backlight_pwr\n"); 260 printk(KERN_ERR "failed to get DSS panel control GPIOs\n");
258 return; 261 return;
259 } 262 }
260 gpio_direction_output(LCD_PANEL_BKLIGHT_PWR, 1);
261 /*
262 * Enable GPIO 181 = LCD Panel PWM
263 */
264 r = gpio_request(LCD_PANEL_PWM, "lcd_pwm");
265 if (r) {
266 printk(KERN_ERR "failed to get lcd_pwm\n");
267 goto err_1;
268 }
269 gpio_direction_output(LCD_PANEL_PWM, 1);
270 /*
271 * Enable GPIO 176 = LCD Panel Power enable pin
272 */
273 r = gpio_request(LCD_PANEL_PWR, "lcd_panel_pwr");
274 if (r) {
275 printk(KERN_ERR "failed to get lcd_panel_pwr\n");
276 goto err_2;
277 }
278 gpio_direction_output(LCD_PANEL_PWR, 1);
279 263
280 printk(KERN_INFO "Display initialized successfully\n"); 264 printk(KERN_INFO "Display initialized successfully\n");
281 return;
282
283err_2:
284 gpio_free(LCD_PANEL_PWM);
285err_1:
286 gpio_free(LCD_PANEL_BKLIGHT_PWR);
287} 265}
288#else 266#else
289static void __init am3517_evm_display_init(void) {} 267static void __init am3517_evm_display_init(void) {}
@@ -396,7 +374,7 @@ static struct omap_musb_board_data musb_board_data = {
396 .power = 500, 374 .power = 500,
397 .set_phy_power = am35x_musb_phy_power, 375 .set_phy_power = am35x_musb_phy_power,
398 .clear_irq = am35x_musb_clear_irq, 376 .clear_irq = am35x_musb_clear_irq,
399 .set_mode = am35x_musb_set_mode, 377 .set_mode = am35x_set_mode,
400 .reset = am35x_musb_reset, 378 .reset = am35x_musb_reset,
401}; 379};
402 380
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index f4f8374a0298..f3beb8eeef77 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -202,6 +202,7 @@ static inline void __init apollon_init_smc91x(void)
202 unsigned int rate; 202 unsigned int rate;
203 struct clk *gpmc_fck; 203 struct clk *gpmc_fck;
204 int eth_cs; 204 int eth_cs;
205 int err;
205 206
206 gpmc_fck = clk_get(NULL, "gpmc_fck"); /* Always on ENABLE_ON_INIT */ 207 gpmc_fck = clk_get(NULL, "gpmc_fck"); /* Always on ENABLE_ON_INIT */
207 if (IS_ERR(gpmc_fck)) { 208 if (IS_ERR(gpmc_fck)) {
@@ -245,15 +246,13 @@ static inline void __init apollon_init_smc91x(void)
245 apollon_smc91x_resources[0].end = base + 0x30f; 246 apollon_smc91x_resources[0].end = base + 0x30f;
246 udelay(100); 247 udelay(100);
247 248
248 omap_mux_init_gpio(74, 0); 249 omap_mux_init_gpio(APOLLON_ETHR_GPIO_IRQ, 0);
249 if (gpio_request(APOLLON_ETHR_GPIO_IRQ, "SMC91x irq") < 0) { 250 err = gpio_request_one(APOLLON_ETHR_GPIO_IRQ, GPIOF_IN, "SMC91x irq");
251 if (err) {
250 printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n", 252 printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n",
251 APOLLON_ETHR_GPIO_IRQ); 253 APOLLON_ETHR_GPIO_IRQ);
252 gpmc_cs_free(APOLLON_ETH_CS); 254 gpmc_cs_free(APOLLON_ETH_CS);
253 goto out;
254 } 255 }
255 gpio_direction_input(APOLLON_ETHR_GPIO_IRQ);
256
257out: 256out:
258 clk_disable(gpmc_fck); 257 clk_disable(gpmc_fck);
259 clk_put(gpmc_fck); 258 clk_put(gpmc_fck);
@@ -280,20 +279,19 @@ static void __init omap_apollon_init_early(void)
280 omap2_init_common_devices(NULL, NULL); 279 omap2_init_common_devices(NULL, NULL);
281} 280}
282 281
282static struct gpio apollon_gpio_leds[] __initdata = {
283 { LED0_GPIO13, GPIOF_OUT_INIT_LOW, "LED0" }, /* LED0 - AA10 */
284 { LED1_GPIO14, GPIOF_OUT_INIT_LOW, "LED1" }, /* LED1 - AA6 */
285 { LED2_GPIO15, GPIOF_OUT_INIT_LOW, "LED2" }, /* LED2 - AA4 */
286};
287
283static void __init apollon_led_init(void) 288static void __init apollon_led_init(void)
284{ 289{
285 /* LED0 - AA10 */
286 omap_mux_init_signal("vlynq_clk.gpio_13", 0); 290 omap_mux_init_signal("vlynq_clk.gpio_13", 0);
287 gpio_request(LED0_GPIO13, "LED0");
288 gpio_direction_output(LED0_GPIO13, 0);
289 /* LED1 - AA6 */
290 omap_mux_init_signal("vlynq_rx1.gpio_14", 0); 291 omap_mux_init_signal("vlynq_rx1.gpio_14", 0);
291 gpio_request(LED1_GPIO14, "LED1");
292 gpio_direction_output(LED1_GPIO14, 0);
293 /* LED2 - AA4 */
294 omap_mux_init_signal("vlynq_rx0.gpio_15", 0); 292 omap_mux_init_signal("vlynq_rx0.gpio_15", 0);
295 gpio_request(LED2_GPIO15, "LED2"); 293
296 gpio_direction_output(LED2_GPIO15, 0); 294 gpio_request_array(apollon_gpio_leds, ARRAY_SIZE(apollon_gpio_leds));
297} 295}
298 296
299static void __init apollon_usb_init(void) 297static void __init apollon_usb_init(void)
@@ -301,8 +299,7 @@ static void __init apollon_usb_init(void)
301 /* USB device */ 299 /* USB device */
302 /* DEVICE_SUSPEND */ 300 /* DEVICE_SUSPEND */
303 omap_mux_init_signal("mcbsp2_clkx.gpio_12", 0); 301 omap_mux_init_signal("mcbsp2_clkx.gpio_12", 0);
304 gpio_request(12, "USB suspend"); 302 gpio_request_one(12, GPIOF_OUT_INIT_LOW, "USB suspend");
305 gpio_direction_output(12, 0);
306 omap2_usbfs_init(&apollon_usb_config); 303 omap2_usbfs_init(&apollon_usb_config);
307} 304}
308 305
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 02a12b41c0ff..c63115bc1536 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -45,8 +45,8 @@
45#include <plat/nand.h> 45#include <plat/nand.h>
46#include <plat/gpmc.h> 46#include <plat/gpmc.h>
47#include <plat/usb.h> 47#include <plat/usb.h>
48#include <plat/display.h> 48#include <video/omapdss.h>
49#include <plat/panel-generic-dpi.h> 49#include <video/omap-panel-generic-dpi.h>
50#include <plat/mcspi.h> 50#include <plat/mcspi.h>
51 51
52#include <mach/hardware.h> 52#include <mach/hardware.h>
@@ -54,6 +54,7 @@
54#include "mux.h" 54#include "mux.h"
55#include "sdram-micron-mt46h32m32lf-6.h" 55#include "sdram-micron-mt46h32m32lf-6.h"
56#include "hsmmc.h" 56#include "hsmmc.h"
57#include "common-board-devices.h"
57 58
58#define CM_T35_GPIO_PENDOWN 57 59#define CM_T35_GPIO_PENDOWN 57
59 60
@@ -66,86 +67,28 @@
66 67
67#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 68#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
68#include <linux/smsc911x.h> 69#include <linux/smsc911x.h>
70#include <plat/gpmc-smsc911x.h>
69 71
70static struct smsc911x_platform_config cm_t35_smsc911x_config = { 72static struct omap_smsc911x_platform_data cm_t35_smsc911x_cfg = {
71 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
72 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
73 .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
74 .phy_interface = PHY_INTERFACE_MODE_MII,
75};
76
77static struct resource cm_t35_smsc911x_resources[] = {
78 {
79 .flags = IORESOURCE_MEM,
80 },
81 {
82 .start = OMAP_GPIO_IRQ(CM_T35_SMSC911X_GPIO),
83 .end = OMAP_GPIO_IRQ(CM_T35_SMSC911X_GPIO),
84 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
85 },
86};
87
88static struct platform_device cm_t35_smsc911x_device = {
89 .name = "smsc911x",
90 .id = 0, 73 .id = 0,
91 .num_resources = ARRAY_SIZE(cm_t35_smsc911x_resources), 74 .cs = CM_T35_SMSC911X_CS,
92 .resource = cm_t35_smsc911x_resources, 75 .gpio_irq = CM_T35_SMSC911X_GPIO,
93 .dev = { 76 .gpio_reset = -EINVAL,
94 .platform_data = &cm_t35_smsc911x_config, 77 .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
95 },
96};
97
98static struct resource sb_t35_smsc911x_resources[] = {
99 {
100 .flags = IORESOURCE_MEM,
101 },
102 {
103 .start = OMAP_GPIO_IRQ(SB_T35_SMSC911X_GPIO),
104 .end = OMAP_GPIO_IRQ(SB_T35_SMSC911X_GPIO),
105 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
106 },
107}; 78};
108 79
109static struct platform_device sb_t35_smsc911x_device = { 80static struct omap_smsc911x_platform_data sb_t35_smsc911x_cfg = {
110 .name = "smsc911x",
111 .id = 1, 81 .id = 1,
112 .num_resources = ARRAY_SIZE(sb_t35_smsc911x_resources), 82 .cs = SB_T35_SMSC911X_CS,
113 .resource = sb_t35_smsc911x_resources, 83 .gpio_irq = SB_T35_SMSC911X_GPIO,
114 .dev = { 84 .gpio_reset = -EINVAL,
115 .platform_data = &cm_t35_smsc911x_config, 85 .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
116 },
117}; 86};
118 87
119static void __init cm_t35_init_smsc911x(struct platform_device *dev,
120 int cs, int irq_gpio)
121{
122 unsigned long cs_mem_base;
123
124 if (gpmc_cs_request(cs, SZ_16M, &cs_mem_base) < 0) {
125 pr_err("CM-T35: Failed request for GPMC mem for smsc911x\n");
126 return;
127 }
128
129 dev->resource[0].start = cs_mem_base + 0x0;
130 dev->resource[0].end = cs_mem_base + 0xff;
131
132 if ((gpio_request(irq_gpio, "ETH IRQ") == 0) &&
133 (gpio_direction_input(irq_gpio) == 0)) {
134 gpio_export(irq_gpio, 0);
135 } else {
136 pr_err("CM-T35: could not obtain gpio for SMSC911X IRQ\n");
137 return;
138 }
139
140 platform_device_register(dev);
141}
142
143static void __init cm_t35_init_ethernet(void) 88static void __init cm_t35_init_ethernet(void)
144{ 89{
145 cm_t35_init_smsc911x(&cm_t35_smsc911x_device, 90 gpmc_smsc911x_init(&cm_t35_smsc911x_cfg);
146 CM_T35_SMSC911X_CS, CM_T35_SMSC911X_GPIO); 91 gpmc_smsc911x_init(&sb_t35_smsc911x_cfg);
147 cm_t35_init_smsc911x(&sb_t35_smsc911x_device,
148 SB_T35_SMSC911X_CS, SB_T35_SMSC911X_GPIO);
149} 92}
150#else 93#else
151static inline void __init cm_t35_init_ethernet(void) { return; } 94static inline void __init cm_t35_init_ethernet(void) { return; }
@@ -235,69 +178,10 @@ static void __init cm_t35_init_nand(void)
235static inline void cm_t35_init_nand(void) {} 178static inline void cm_t35_init_nand(void) {}
236#endif 179#endif
237 180
238#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
239 defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
240#include <linux/spi/ads7846.h>
241
242static struct omap2_mcspi_device_config ads7846_mcspi_config = {
243 .turbo_mode = 0,
244 .single_channel = 1, /* 0: slave, 1: master */
245};
246
247static int ads7846_get_pendown_state(void)
248{
249 return !gpio_get_value(CM_T35_GPIO_PENDOWN);
250}
251
252static struct ads7846_platform_data ads7846_config = {
253 .x_max = 0x0fff,
254 .y_max = 0x0fff,
255 .x_plate_ohms = 180,
256 .pressure_max = 255,
257 .debounce_max = 10,
258 .debounce_tol = 3,
259 .debounce_rep = 1,
260 .get_pendown_state = ads7846_get_pendown_state,
261 .keep_vref_on = 1,
262};
263
264static struct spi_board_info cm_t35_spi_board_info[] __initdata = {
265 {
266 .modalias = "ads7846",
267 .bus_num = 1,
268 .chip_select = 0,
269 .max_speed_hz = 1500000,
270 .controller_data = &ads7846_mcspi_config,
271 .irq = OMAP_GPIO_IRQ(CM_T35_GPIO_PENDOWN),
272 .platform_data = &ads7846_config,
273 },
274};
275
276static void __init cm_t35_init_ads7846(void)
277{
278 if ((gpio_request(CM_T35_GPIO_PENDOWN, "ADS7846_PENDOWN") == 0) &&
279 (gpio_direction_input(CM_T35_GPIO_PENDOWN) == 0)) {
280 gpio_export(CM_T35_GPIO_PENDOWN, 0);
281 } else {
282 pr_err("CM-T35: could not obtain gpio for ADS7846_PENDOWN\n");
283 return;
284 }
285
286 spi_register_board_info(cm_t35_spi_board_info,
287 ARRAY_SIZE(cm_t35_spi_board_info));
288}
289#else
290static inline void cm_t35_init_ads7846(void) {}
291#endif
292
293#define CM_T35_LCD_EN_GPIO 157 181#define CM_T35_LCD_EN_GPIO 157
294#define CM_T35_LCD_BL_GPIO 58 182#define CM_T35_LCD_BL_GPIO 58
295#define CM_T35_DVI_EN_GPIO 54 183#define CM_T35_DVI_EN_GPIO 54
296 184
297static int lcd_bl_gpio;
298static int lcd_en_gpio;
299static int dvi_en_gpio;
300
301static int lcd_enabled; 185static int lcd_enabled;
302static int dvi_enabled; 186static int dvi_enabled;
303 187
@@ -308,8 +192,8 @@ static int cm_t35_panel_enable_lcd(struct omap_dss_device *dssdev)
308 return -EINVAL; 192 return -EINVAL;
309 } 193 }
310 194
311 gpio_set_value(lcd_en_gpio, 1); 195 gpio_set_value(CM_T35_LCD_EN_GPIO, 1);
312 gpio_set_value(lcd_bl_gpio, 1); 196 gpio_set_value(CM_T35_LCD_BL_GPIO, 1);
313 197
314 lcd_enabled = 1; 198 lcd_enabled = 1;
315 199
@@ -320,8 +204,8 @@ static void cm_t35_panel_disable_lcd(struct omap_dss_device *dssdev)
320{ 204{
321 lcd_enabled = 0; 205 lcd_enabled = 0;
322 206
323 gpio_set_value(lcd_bl_gpio, 0); 207 gpio_set_value(CM_T35_LCD_BL_GPIO, 0);
324 gpio_set_value(lcd_en_gpio, 0); 208 gpio_set_value(CM_T35_LCD_EN_GPIO, 0);
325} 209}
326 210
327static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev) 211static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev)
@@ -331,7 +215,7 @@ static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev)
331 return -EINVAL; 215 return -EINVAL;
332 } 216 }
333 217
334 gpio_set_value(dvi_en_gpio, 0); 218 gpio_set_value(CM_T35_DVI_EN_GPIO, 0);
335 dvi_enabled = 1; 219 dvi_enabled = 1;
336 220
337 return 0; 221 return 0;
@@ -339,7 +223,7 @@ static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev)
339 223
340static void cm_t35_panel_disable_dvi(struct omap_dss_device *dssdev) 224static void cm_t35_panel_disable_dvi(struct omap_dss_device *dssdev)
341{ 225{
342 gpio_set_value(dvi_en_gpio, 1); 226 gpio_set_value(CM_T35_DVI_EN_GPIO, 1);
343 dvi_enabled = 0; 227 dvi_enabled = 0;
344} 228}
345 229
@@ -421,62 +305,38 @@ static struct spi_board_info cm_t35_lcd_spi_board_info[] __initdata = {
421 }, 305 },
422}; 306};
423 307
308static struct gpio cm_t35_dss_gpios[] __initdata = {
309 { CM_T35_LCD_EN_GPIO, GPIOF_OUT_INIT_LOW, "lcd enable" },
310 { CM_T35_LCD_BL_GPIO, GPIOF_OUT_INIT_LOW, "lcd bl enable" },
311 { CM_T35_DVI_EN_GPIO, GPIOF_OUT_INIT_HIGH, "dvi enable" },
312};
313
424static void __init cm_t35_init_display(void) 314static void __init cm_t35_init_display(void)
425{ 315{
426 int err; 316 int err;
427 317
428 lcd_en_gpio = CM_T35_LCD_EN_GPIO;
429 lcd_bl_gpio = CM_T35_LCD_BL_GPIO;
430 dvi_en_gpio = CM_T35_DVI_EN_GPIO;
431
432 spi_register_board_info(cm_t35_lcd_spi_board_info, 318 spi_register_board_info(cm_t35_lcd_spi_board_info,
433 ARRAY_SIZE(cm_t35_lcd_spi_board_info)); 319 ARRAY_SIZE(cm_t35_lcd_spi_board_info));
434 320
435 err = gpio_request(lcd_en_gpio, "LCD RST"); 321 err = gpio_request_array(cm_t35_dss_gpios,
436 if (err) { 322 ARRAY_SIZE(cm_t35_dss_gpios));
437 pr_err("CM-T35: failed to get LCD reset GPIO\n");
438 goto out;
439 }
440
441 err = gpio_request(lcd_bl_gpio, "LCD BL");
442 if (err) { 323 if (err) {
443 pr_err("CM-T35: failed to get LCD backlight control GPIO\n"); 324 pr_err("CM-T35: failed to request DSS control GPIOs\n");
444 goto err_lcd_bl; 325 return;
445 }
446
447 err = gpio_request(dvi_en_gpio, "DVI EN");
448 if (err) {
449 pr_err("CM-T35: failed to get DVI reset GPIO\n");
450 goto err_dvi_en;
451 } 326 }
452 327
453 gpio_export(lcd_en_gpio, 0); 328 gpio_export(CM_T35_LCD_EN_GPIO, 0);
454 gpio_export(lcd_bl_gpio, 0); 329 gpio_export(CM_T35_LCD_BL_GPIO, 0);
455 gpio_export(dvi_en_gpio, 0); 330 gpio_export(CM_T35_DVI_EN_GPIO, 0);
456 gpio_direction_output(lcd_en_gpio, 0);
457 gpio_direction_output(lcd_bl_gpio, 0);
458 gpio_direction_output(dvi_en_gpio, 1);
459 331
460 msleep(50); 332 msleep(50);
461 gpio_set_value(lcd_en_gpio, 1); 333 gpio_set_value(CM_T35_LCD_EN_GPIO, 1);
462 334
463 err = omap_display_init(&cm_t35_dss_data); 335 err = omap_display_init(&cm_t35_dss_data);
464 if (err) { 336 if (err) {
465 pr_err("CM-T35: failed to register DSS device\n"); 337 pr_err("CM-T35: failed to register DSS device\n");
466 goto err_dev_reg; 338 gpio_free_array(cm_t35_dss_gpios, ARRAY_SIZE(cm_t35_dss_gpios));
467 } 339 }
468
469 return;
470
471err_dev_reg:
472 gpio_free(dvi_en_gpio);
473err_dvi_en:
474 gpio_free(lcd_bl_gpio);
475err_lcd_bl:
476 gpio_free(lcd_en_gpio);
477out:
478
479 return;
480} 340}
481 341
482static struct regulator_consumer_supply cm_t35_vmmc1_supply = { 342static struct regulator_consumer_supply cm_t35_vmmc1_supply = {
@@ -609,10 +469,8 @@ static int cm_t35_twl_gpio_setup(struct device *dev, unsigned gpio,
609{ 469{
610 int wlan_rst = gpio + 2; 470 int wlan_rst = gpio + 2;
611 471
612 if ((gpio_request(wlan_rst, "WLAN RST") == 0) && 472 if (gpio_request_one(wlan_rst, GPIOF_OUT_INIT_HIGH, "WLAN RST") == 0) {
613 (gpio_direction_output(wlan_rst, 1) == 0)) {
614 gpio_export(wlan_rst, 0); 473 gpio_export(wlan_rst, 0);
615
616 udelay(10); 474 udelay(10);
617 gpio_set_value(wlan_rst, 0); 475 gpio_set_value(wlan_rst, 0);
618 udelay(10); 476 udelay(10);
@@ -653,19 +511,9 @@ static struct twl4030_platform_data cm_t35_twldata = {
653 .vpll2 = &cm_t35_vpll2, 511 .vpll2 = &cm_t35_vpll2,
654}; 512};
655 513
656static struct i2c_board_info __initdata cm_t35_i2c_boardinfo[] = {
657 {
658 I2C_BOARD_INFO("tps65930", 0x48),
659 .flags = I2C_CLIENT_WAKE,
660 .irq = INT_34XX_SYS_NIRQ,
661 .platform_data = &cm_t35_twldata,
662 },
663};
664
665static void __init cm_t35_init_i2c(void) 514static void __init cm_t35_init_i2c(void)
666{ 515{
667 omap_register_i2c_bus(1, 2600, cm_t35_i2c_boardinfo, 516 omap3_pmic_init("tps65930", &cm_t35_twldata);
668 ARRAY_SIZE(cm_t35_i2c_boardinfo));
669} 517}
670 518
671static void __init cm_t35_init_early(void) 519static void __init cm_t35_init_early(void)
@@ -775,12 +623,6 @@ static struct omap_board_mux board_mux[] __initdata = {
775}; 623};
776#endif 624#endif
777 625
778static struct omap_musb_board_data musb_board_data = {
779 .interface_type = MUSB_INTERFACE_ULPI,
780 .mode = MUSB_OTG,
781 .power = 100,
782};
783
784static struct omap_board_config_kernel cm_t35_config[] __initdata = { 626static struct omap_board_config_kernel cm_t35_config[] __initdata = {
785}; 627};
786 628
@@ -792,12 +634,12 @@ static void __init cm_t35_init(void)
792 omap_serial_init(); 634 omap_serial_init();
793 cm_t35_init_i2c(); 635 cm_t35_init_i2c();
794 cm_t35_init_nand(); 636 cm_t35_init_nand();
795 cm_t35_init_ads7846(); 637 omap_ads7846_init(1, CM_T35_GPIO_PENDOWN, 0, NULL);
796 cm_t35_init_ethernet(); 638 cm_t35_init_ethernet();
797 cm_t35_init_led(); 639 cm_t35_init_led();
798 cm_t35_init_display(); 640 cm_t35_init_display();
799 641
800 usb_musb_init(&musb_board_data); 642 usb_musb_init(NULL);
801 usbhs_init(&usbhs_bdata); 643 usbhs_init(&usbhs_bdata);
802} 644}
803 645
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index a27e3eee8292..08f08e812492 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -148,14 +148,13 @@ static void __init cm_t3517_init_rtc(void)
148{ 148{
149 int err; 149 int err;
150 150
151 err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en"); 151 err = gpio_request_one(RTC_CS_EN_GPIO, GPIOF_OUT_INIT_HIGH,
152 "rtc cs en");
152 if (err) { 153 if (err) {
153 pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err); 154 pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err);
154 return; 155 return;
155 } 156 }
156 157
157 gpio_direction_output(RTC_CS_EN_GPIO, 1);
158
159 platform_device_register(&cm_t3517_rtc_device); 158 platform_device_register(&cm_t3517_rtc_device);
160} 159}
161#else 160#else
@@ -182,11 +181,11 @@ static int cm_t3517_init_usbh(void)
182{ 181{
183 int err; 182 int err;
184 183
185 err = gpio_request(USB_HUB_RESET_GPIO, "usb hub rst"); 184 err = gpio_request_one(USB_HUB_RESET_GPIO, GPIOF_OUT_INIT_LOW,
185 "usb hub rst");
186 if (err) { 186 if (err) {
187 pr_err("CM-T3517: usb hub rst gpio request failed: %d\n", err); 187 pr_err("CM-T3517: usb hub rst gpio request failed: %d\n", err);
188 } else { 188 } else {
189 gpio_direction_output(USB_HUB_RESET_GPIO, 0);
190 udelay(10); 189 udelay(10);
191 gpio_set_value(USB_HUB_RESET_GPIO, 1); 190 gpio_set_value(USB_HUB_RESET_GPIO, 1);
192 msleep(1); 191 msleep(1);
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 65f9fde2c567..cf520d7dd614 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -45,13 +45,12 @@
45#include <plat/gpmc.h> 45#include <plat/gpmc.h>
46#include <plat/nand.h> 46#include <plat/nand.h>
47#include <plat/usb.h> 47#include <plat/usb.h>
48#include <plat/display.h> 48#include <video/omapdss.h>
49#include <plat/panel-generic-dpi.h> 49#include <video/omap-panel-generic-dpi.h>
50 50
51#include <plat/mcspi.h> 51#include <plat/mcspi.h>
52#include <linux/input/matrix_keypad.h> 52#include <linux/input/matrix_keypad.h>
53#include <linux/spi/spi.h> 53#include <linux/spi/spi.h>
54#include <linux/spi/ads7846.h>
55#include <linux/dm9000.h> 54#include <linux/dm9000.h>
56#include <linux/interrupt.h> 55#include <linux/interrupt.h>
57 56
@@ -60,6 +59,7 @@
60#include "mux.h" 59#include "mux.h"
61#include "hsmmc.h" 60#include "hsmmc.h"
62#include "timer-gp.h" 61#include "timer-gp.h"
62#include "common-board-devices.h"
63 63
64#define NAND_BLOCK_SIZE SZ_128K 64#define NAND_BLOCK_SIZE SZ_128K
65 65
@@ -97,13 +97,6 @@ static struct mtd_partition devkit8000_nand_partitions[] = {
97 }, 97 },
98}; 98};
99 99
100static struct omap_nand_platform_data devkit8000_nand_data = {
101 .options = NAND_BUSWIDTH_16,
102 .parts = devkit8000_nand_partitions,
103 .nr_parts = ARRAY_SIZE(devkit8000_nand_partitions),
104 .dma_channel = -1, /* disable DMA in OMAP NAND driver */
105};
106
107static struct omap2_hsmmc_info mmc[] = { 100static struct omap2_hsmmc_info mmc[] = {
108 { 101 {
109 .mmc = 1, 102 .mmc = 1,
@@ -249,7 +242,7 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
249 /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */ 242 /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
250 devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0; 243 devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0;
251 ret = gpio_request_one(devkit8000_lcd_device.reset_gpio, 244 ret = gpio_request_one(devkit8000_lcd_device.reset_gpio,
252 GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN"); 245 GPIOF_OUT_INIT_LOW, "LCD_PWREN");
253 if (ret < 0) { 246 if (ret < 0) {
254 devkit8000_lcd_device.reset_gpio = -EINVAL; 247 devkit8000_lcd_device.reset_gpio = -EINVAL;
255 printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n"); 248 printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n");
@@ -258,7 +251,7 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
258 /* gpio + 7 is "DVI_PD" (out, active low) */ 251 /* gpio + 7 is "DVI_PD" (out, active low) */
259 devkit8000_dvi_device.reset_gpio = gpio + 7; 252 devkit8000_dvi_device.reset_gpio = gpio + 7;
260 ret = gpio_request_one(devkit8000_dvi_device.reset_gpio, 253 ret = gpio_request_one(devkit8000_dvi_device.reset_gpio,
261 GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown"); 254 GPIOF_OUT_INIT_LOW, "DVI PowerDown");
262 if (ret < 0) { 255 if (ret < 0) {
263 devkit8000_dvi_device.reset_gpio = -EINVAL; 256 devkit8000_dvi_device.reset_gpio = -EINVAL;
264 printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n"); 257 printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n");
@@ -366,19 +359,9 @@ static struct twl4030_platform_data devkit8000_twldata = {
366 .keypad = &devkit8000_kp_data, 359 .keypad = &devkit8000_kp_data,
367}; 360};
368 361
369static struct i2c_board_info __initdata devkit8000_i2c_boardinfo[] = {
370 {
371 I2C_BOARD_INFO("tps65930", 0x48),
372 .flags = I2C_CLIENT_WAKE,
373 .irq = INT_34XX_SYS_NIRQ,
374 .platform_data = &devkit8000_twldata,
375 },
376};
377
378static int __init devkit8000_i2c_init(void) 362static int __init devkit8000_i2c_init(void)
379{ 363{
380 omap_register_i2c_bus(1, 2600, devkit8000_i2c_boardinfo, 364 omap3_pmic_init("tps65930", &devkit8000_twldata);
381 ARRAY_SIZE(devkit8000_i2c_boardinfo));
382 /* Bus 3 is attached to the DVI port where devices like the pico DLP 365 /* Bus 3 is attached to the DVI port where devices like the pico DLP
383 * projector don't work reliably with 400kHz */ 366 * projector don't work reliably with 400kHz */
384 omap_register_i2c_bus(3, 400, NULL, 0); 367 omap_register_i2c_bus(3, 400, NULL, 0);
@@ -463,56 +446,6 @@ static void __init devkit8000_init_irq(void)
463#endif 446#endif
464} 447}
465 448
466static void __init devkit8000_ads7846_init(void)
467{
468 int gpio = OMAP3_DEVKIT_TS_GPIO;
469 int ret;
470
471 ret = gpio_request(gpio, "ads7846_pen_down");
472 if (ret < 0) {
473 printk(KERN_ERR "Failed to request GPIO %d for "
474 "ads7846 pen down IRQ\n", gpio);
475 return;
476 }
477
478 gpio_direction_input(gpio);
479}
480
481static int ads7846_get_pendown_state(void)
482{
483 return !gpio_get_value(OMAP3_DEVKIT_TS_GPIO);
484}
485
486static struct ads7846_platform_data ads7846_config = {
487 .x_max = 0x0fff,
488 .y_max = 0x0fff,
489 .x_plate_ohms = 180,
490 .pressure_max = 255,
491 .debounce_max = 10,
492 .debounce_tol = 5,
493 .debounce_rep = 1,
494 .get_pendown_state = ads7846_get_pendown_state,
495 .keep_vref_on = 1,
496 .settle_delay_usecs = 150,
497};
498
499static struct omap2_mcspi_device_config ads7846_mcspi_config = {
500 .turbo_mode = 0,
501 .single_channel = 1, /* 0: slave, 1: master */
502};
503
504static struct spi_board_info devkit8000_spi_board_info[] __initdata = {
505 {
506 .modalias = "ads7846",
507 .bus_num = 2,
508 .chip_select = 0,
509 .max_speed_hz = 1500000,
510 .controller_data = &ads7846_mcspi_config,
511 .irq = OMAP_GPIO_IRQ(OMAP3_DEVKIT_TS_GPIO),
512 .platform_data = &ads7846_config,
513 }
514};
515
516#define OMAP_DM9000_BASE 0x2c000000 449#define OMAP_DM9000_BASE 0x2c000000
517 450
518static struct resource omap_dm9000_resources[] = { 451static struct resource omap_dm9000_resources[] = {
@@ -550,14 +483,14 @@ static void __init omap_dm9000_init(void)
550{ 483{
551 unsigned char *eth_addr = omap_dm9000_platdata.dev_addr; 484 unsigned char *eth_addr = omap_dm9000_platdata.dev_addr;
552 struct omap_die_id odi; 485 struct omap_die_id odi;
486 int ret;
553 487
554 if (gpio_request(OMAP_DM9000_GPIO_IRQ, "dm9000 irq") < 0) { 488 ret = gpio_request_one(OMAP_DM9000_GPIO_IRQ, GPIOF_IN, "dm9000 irq");
489 if (ret < 0) {
555 printk(KERN_ERR "Failed to request GPIO%d for dm9000 IRQ\n", 490 printk(KERN_ERR "Failed to request GPIO%d for dm9000 IRQ\n",
556 OMAP_DM9000_GPIO_IRQ); 491 OMAP_DM9000_GPIO_IRQ);
557 return; 492 return;
558 } 493 }
559
560 gpio_direction_input(OMAP_DM9000_GPIO_IRQ);
561 494
562 /* init the mac address using DIE id */ 495 /* init the mac address using DIE id */
563 omap_get_die_id(&odi); 496 omap_get_die_id(&odi);
@@ -576,45 +509,6 @@ static struct platform_device *devkit8000_devices[] __initdata = {
576 &omap_dm9000_dev, 509 &omap_dm9000_dev,
577}; 510};
578 511
579static void __init devkit8000_flash_init(void)
580{
581 u8 cs = 0;
582 u8 nandcs = GPMC_CS_NUM + 1;
583
584 /* find out the chip-select on which NAND exists */
585 while (cs < GPMC_CS_NUM) {
586 u32 ret = 0;
587 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
588
589 if ((ret & 0xC00) == 0x800) {
590 printk(KERN_INFO "Found NAND on CS%d\n", cs);
591 if (nandcs > GPMC_CS_NUM)
592 nandcs = cs;
593 }
594 cs++;
595 }
596
597 if (nandcs > GPMC_CS_NUM) {
598 printk(KERN_INFO "NAND: Unable to find configuration "
599 "in GPMC\n ");
600 return;
601 }
602
603 if (nandcs < GPMC_CS_NUM) {
604 devkit8000_nand_data.cs = nandcs;
605
606 printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
607 if (gpmc_nand_init(&devkit8000_nand_data) < 0)
608 printk(KERN_ERR "Unable to register NAND device\n");
609 }
610}
611
612static struct omap_musb_board_data musb_board_data = {
613 .interface_type = MUSB_INTERFACE_ULPI,
614 .mode = MUSB_OTG,
615 .power = 100,
616};
617
618static const struct usbhs_omap_board_data usbhs_bdata __initconst = { 512static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
619 513
620 .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, 514 .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
@@ -795,14 +689,13 @@ static void __init devkit8000_init(void)
795 ARRAY_SIZE(devkit8000_devices)); 689 ARRAY_SIZE(devkit8000_devices));
796 690
797 omap_display_init(&devkit8000_dss_data); 691 omap_display_init(&devkit8000_dss_data);
798 spi_register_board_info(devkit8000_spi_board_info,
799 ARRAY_SIZE(devkit8000_spi_board_info));
800 692
801 devkit8000_ads7846_init(); 693 omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL);
802 694
803 usb_musb_init(&musb_board_data); 695 usb_musb_init(NULL);
804 usbhs_init(&usbhs_bdata); 696 usbhs_init(&usbhs_bdata);
805 devkit8000_flash_init(); 697 omap_nand_flash_init(NAND_BUSWIDTH_16, devkit8000_nand_partitions,
698 ARRAY_SIZE(devkit8000_nand_partitions));
806 699
807 /* Ensure SDRC pins are mux'd for self-refresh */ 700 /* Ensure SDRC pins are mux'd for self-refresh */
808 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); 701 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 34cf982b9679..0c1bfca3f731 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -31,13 +31,14 @@
31#include <plat/common.h> 31#include <plat/common.h>
32#include <plat/gpmc.h> 32#include <plat/gpmc.h>
33#include <plat/usb.h> 33#include <plat/usb.h>
34#include <plat/display.h> 34#include <video/omapdss.h>
35#include <plat/panel-generic-dpi.h> 35#include <video/omap-panel-generic-dpi.h>
36#include <plat/onenand.h> 36#include <plat/onenand.h>
37 37
38#include "mux.h" 38#include "mux.h"
39#include "hsmmc.h" 39#include "hsmmc.h"
40#include "sdram-numonyx-m65kxxxxam.h" 40#include "sdram-numonyx-m65kxxxxam.h"
41#include "common-board-devices.h"
41 42
42#define IGEP2_SMSC911X_CS 5 43#define IGEP2_SMSC911X_CS 5
43#define IGEP2_SMSC911X_GPIO 176 44#define IGEP2_SMSC911X_GPIO 176
@@ -54,6 +55,11 @@
54#define IGEP2_RC_GPIO_WIFI_NRESET 139 55#define IGEP2_RC_GPIO_WIFI_NRESET 139
55#define IGEP2_RC_GPIO_BT_NRESET 137 56#define IGEP2_RC_GPIO_BT_NRESET 137
56 57
58#define IGEP3_GPIO_LED0_GREEN 54
59#define IGEP3_GPIO_LED0_RED 53
60#define IGEP3_GPIO_LED1_RED 16
61#define IGEP3_GPIO_USBH_NRESET 183
62
57/* 63/*
58 * IGEP2 Hardware Revision Table 64 * IGEP2 Hardware Revision Table
59 * 65 *
@@ -68,6 +74,7 @@
68 74
69#define IGEP2_BOARD_HWREV_B 0 75#define IGEP2_BOARD_HWREV_B 0
70#define IGEP2_BOARD_HWREV_C 1 76#define IGEP2_BOARD_HWREV_C 1
77#define IGEP3_BOARD_HWREV 2
71 78
72static u8 hwrev; 79static u8 hwrev;
73 80
@@ -75,24 +82,29 @@ static void __init igep2_get_revision(void)
75{ 82{
76 u8 ret; 83 u8 ret;
77 84
85 if (machine_is_igep0030()) {
86 hwrev = IGEP3_BOARD_HWREV;
87 return;
88 }
89
78 omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT); 90 omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT);
79 91
80 if ((gpio_request(IGEP2_GPIO_LED1_RED, "GPIO_HW0_REV") == 0) && 92 if (gpio_request_one(IGEP2_GPIO_LED1_RED, GPIOF_IN, "GPIO_HW0_REV")) {
81 (gpio_direction_input(IGEP2_GPIO_LED1_RED) == 0)) {
82 ret = gpio_get_value(IGEP2_GPIO_LED1_RED);
83 if (ret == 0) {
84 pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n");
85 hwrev = IGEP2_BOARD_HWREV_C;
86 } else if (ret == 1) {
87 pr_info("IGEP2: Hardware Revision B/C (B compatible)\n");
88 hwrev = IGEP2_BOARD_HWREV_B;
89 } else {
90 pr_err("IGEP2: Unknown Hardware Revision\n");
91 hwrev = -1;
92 }
93 } else {
94 pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n"); 93 pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n");
95 pr_err("IGEP2: Unknown Hardware Revision\n"); 94 pr_err("IGEP2: Unknown Hardware Revision\n");
95 return;
96 }
97
98 ret = gpio_get_value(IGEP2_GPIO_LED1_RED);
99 if (ret == 0) {
100 pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n");
101 hwrev = IGEP2_BOARD_HWREV_C;
102 } else if (ret == 1) {
103 pr_info("IGEP2: Hardware Revision B/C (B compatible)\n");
104 hwrev = IGEP2_BOARD_HWREV_B;
105 } else {
106 pr_err("IGEP2: Unknown Hardware Revision\n");
107 hwrev = -1;
96 } 108 }
97 109
98 gpio_free(IGEP2_GPIO_LED1_RED); 110 gpio_free(IGEP2_GPIO_LED1_RED);
@@ -111,7 +123,7 @@ static void __init igep2_get_revision(void)
111 * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048) 123 * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048)
112 */ 124 */
113 125
114static struct mtd_partition igep2_onenand_partitions[] = { 126static struct mtd_partition igep_onenand_partitions[] = {
115 { 127 {
116 .name = "X-Loader", 128 .name = "X-Loader",
117 .offset = 0, 129 .offset = 0,
@@ -139,21 +151,21 @@ static struct mtd_partition igep2_onenand_partitions[] = {
139 }, 151 },
140}; 152};
141 153
142static struct omap_onenand_platform_data igep2_onenand_data = { 154static struct omap_onenand_platform_data igep_onenand_data = {
143 .parts = igep2_onenand_partitions, 155 .parts = igep_onenand_partitions,
144 .nr_parts = ARRAY_SIZE(igep2_onenand_partitions), 156 .nr_parts = ARRAY_SIZE(igep_onenand_partitions),
145 .dma_channel = -1, /* disable DMA in OMAP OneNAND driver */ 157 .dma_channel = -1, /* disable DMA in OMAP OneNAND driver */
146}; 158};
147 159
148static struct platform_device igep2_onenand_device = { 160static struct platform_device igep_onenand_device = {
149 .name = "omap2-onenand", 161 .name = "omap2-onenand",
150 .id = -1, 162 .id = -1,
151 .dev = { 163 .dev = {
152 .platform_data = &igep2_onenand_data, 164 .platform_data = &igep_onenand_data,
153 }, 165 },
154}; 166};
155 167
156static void __init igep2_flash_init(void) 168static void __init igep_flash_init(void)
157{ 169{
158 u8 cs = 0; 170 u8 cs = 0;
159 u8 onenandcs = GPMC_CS_NUM + 1; 171 u8 onenandcs = GPMC_CS_NUM + 1;
@@ -165,7 +177,7 @@ static void __init igep2_flash_init(void)
165 /* Check if NAND/oneNAND is configured */ 177 /* Check if NAND/oneNAND is configured */
166 if ((ret & 0xC00) == 0x800) 178 if ((ret & 0xC00) == 0x800)
167 /* NAND found */ 179 /* NAND found */
168 pr_err("IGEP2: Unsupported NAND found\n"); 180 pr_err("IGEP: Unsupported NAND found\n");
169 else { 181 else {
170 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); 182 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
171 if ((ret & 0x3F) == (ONENAND_MAP >> 24)) 183 if ((ret & 0x3F) == (ONENAND_MAP >> 24))
@@ -175,85 +187,46 @@ static void __init igep2_flash_init(void)
175 } 187 }
176 188
177 if (onenandcs > GPMC_CS_NUM) { 189 if (onenandcs > GPMC_CS_NUM) {
178 pr_err("IGEP2: Unable to find configuration in GPMC\n"); 190 pr_err("IGEP: Unable to find configuration in GPMC\n");
179 return; 191 return;
180 } 192 }
181 193
182 igep2_onenand_data.cs = onenandcs; 194 igep_onenand_data.cs = onenandcs;
183 195
184 if (platform_device_register(&igep2_onenand_device) < 0) 196 if (platform_device_register(&igep_onenand_device) < 0)
185 pr_err("IGEP2: Unable to register OneNAND device\n"); 197 pr_err("IGEP: Unable to register OneNAND device\n");
186} 198}
187 199
188#else 200#else
189static void __init igep2_flash_init(void) {} 201static void __init igep_flash_init(void) {}
190#endif 202#endif
191 203
192#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 204#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
193 205
194#include <linux/smsc911x.h> 206#include <linux/smsc911x.h>
207#include <plat/gpmc-smsc911x.h>
195 208
196static struct smsc911x_platform_config igep2_smsc911x_config = { 209static struct omap_smsc911x_platform_data smsc911x_cfg = {
197 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, 210 .cs = IGEP2_SMSC911X_CS,
198 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, 211 .gpio_irq = IGEP2_SMSC911X_GPIO,
199 .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS , 212 .gpio_reset = -EINVAL,
200 .phy_interface = PHY_INTERFACE_MODE_MII, 213 .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
201};
202
203static struct resource igep2_smsc911x_resources[] = {
204 {
205 .flags = IORESOURCE_MEM,
206 },
207 {
208 .start = OMAP_GPIO_IRQ(IGEP2_SMSC911X_GPIO),
209 .end = OMAP_GPIO_IRQ(IGEP2_SMSC911X_GPIO),
210 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
211 },
212};
213
214static struct platform_device igep2_smsc911x_device = {
215 .name = "smsc911x",
216 .id = 0,
217 .num_resources = ARRAY_SIZE(igep2_smsc911x_resources),
218 .resource = igep2_smsc911x_resources,
219 .dev = {
220 .platform_data = &igep2_smsc911x_config,
221 },
222}; 214};
223 215
224static inline void __init igep2_init_smsc911x(void) 216static inline void __init igep2_init_smsc911x(void)
225{ 217{
226 unsigned long cs_mem_base; 218 gpmc_smsc911x_init(&smsc911x_cfg);
227
228 if (gpmc_cs_request(IGEP2_SMSC911X_CS, SZ_16M, &cs_mem_base) < 0) {
229 pr_err("IGEP v2: Failed request for GPMC mem for smsc911x\n");
230 gpmc_cs_free(IGEP2_SMSC911X_CS);
231 return;
232 }
233
234 igep2_smsc911x_resources[0].start = cs_mem_base + 0x0;
235 igep2_smsc911x_resources[0].end = cs_mem_base + 0xff;
236
237 if ((gpio_request(IGEP2_SMSC911X_GPIO, "SMSC911X IRQ") == 0) &&
238 (gpio_direction_input(IGEP2_SMSC911X_GPIO) == 0)) {
239 gpio_export(IGEP2_SMSC911X_GPIO, 0);
240 } else {
241 pr_err("IGEP v2: Could not obtain gpio for for SMSC911X IRQ\n");
242 return;
243 }
244
245 platform_device_register(&igep2_smsc911x_device);
246} 219}
247 220
248#else 221#else
249static inline void __init igep2_init_smsc911x(void) { } 222static inline void __init igep2_init_smsc911x(void) { }
250#endif 223#endif
251 224
252static struct regulator_consumer_supply igep2_vmmc1_supply = 225static struct regulator_consumer_supply igep_vmmc1_supply =
253 REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"); 226 REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
254 227
255/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ 228/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
256static struct regulator_init_data igep2_vmmc1 = { 229static struct regulator_init_data igep_vmmc1 = {
257 .constraints = { 230 .constraints = {
258 .min_uV = 1850000, 231 .min_uV = 1850000,
259 .max_uV = 3150000, 232 .max_uV = 3150000,
@@ -264,13 +237,13 @@ static struct regulator_init_data igep2_vmmc1 = {
264 | REGULATOR_CHANGE_STATUS, 237 | REGULATOR_CHANGE_STATUS,
265 }, 238 },
266 .num_consumer_supplies = 1, 239 .num_consumer_supplies = 1,
267 .consumer_supplies = &igep2_vmmc1_supply, 240 .consumer_supplies = &igep_vmmc1_supply,
268}; 241};
269 242
270static struct regulator_consumer_supply igep2_vio_supply = 243static struct regulator_consumer_supply igep_vio_supply =
271 REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"); 244 REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
272 245
273static struct regulator_init_data igep2_vio = { 246static struct regulator_init_data igep_vio = {
274 .constraints = { 247 .constraints = {
275 .min_uV = 1800000, 248 .min_uV = 1800000,
276 .max_uV = 1800000, 249 .max_uV = 1800000,
@@ -282,34 +255,34 @@ static struct regulator_init_data igep2_vio = {
282 | REGULATOR_CHANGE_STATUS, 255 | REGULATOR_CHANGE_STATUS,
283 }, 256 },
284 .num_consumer_supplies = 1, 257 .num_consumer_supplies = 1,
285 .consumer_supplies = &igep2_vio_supply, 258 .consumer_supplies = &igep_vio_supply,
286}; 259};
287 260
288static struct regulator_consumer_supply igep2_vmmc2_supply = 261static struct regulator_consumer_supply igep_vmmc2_supply =
289 REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"); 262 REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
290 263
291static struct regulator_init_data igep2_vmmc2 = { 264static struct regulator_init_data igep_vmmc2 = {
292 .constraints = { 265 .constraints = {
293 .valid_modes_mask = REGULATOR_MODE_NORMAL, 266 .valid_modes_mask = REGULATOR_MODE_NORMAL,
294 .always_on = 1, 267 .always_on = 1,
295 }, 268 },
296 .num_consumer_supplies = 1, 269 .num_consumer_supplies = 1,
297 .consumer_supplies = &igep2_vmmc2_supply, 270 .consumer_supplies = &igep_vmmc2_supply,
298}; 271};
299 272
300static struct fixed_voltage_config igep2_vwlan = { 273static struct fixed_voltage_config igep_vwlan = {
301 .supply_name = "vwlan", 274 .supply_name = "vwlan",
302 .microvolts = 3300000, 275 .microvolts = 3300000,
303 .gpio = -EINVAL, 276 .gpio = -EINVAL,
304 .enabled_at_boot = 1, 277 .enabled_at_boot = 1,
305 .init_data = &igep2_vmmc2, 278 .init_data = &igep_vmmc2,
306}; 279};
307 280
308static struct platform_device igep2_vwlan_device = { 281static struct platform_device igep_vwlan_device = {
309 .name = "reg-fixed-voltage", 282 .name = "reg-fixed-voltage",
310 .id = 0, 283 .id = 0,
311 .dev = { 284 .dev = {
312 .platform_data = &igep2_vwlan, 285 .platform_data = &igep_vwlan,
313 }, 286 },
314}; 287};
315 288
@@ -334,20 +307,17 @@ static struct omap2_hsmmc_info mmc[] = {
334#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) 307#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
335#include <linux/leds.h> 308#include <linux/leds.h>
336 309
337static struct gpio_led igep2_gpio_leds[] = { 310static struct gpio_led igep_gpio_leds[] = {
338 [0] = { 311 [0] = {
339 .name = "gpio-led:red:d0", 312 .name = "gpio-led:red:d0",
340 .gpio = IGEP2_GPIO_LED0_RED,
341 .default_trigger = "default-off" 313 .default_trigger = "default-off"
342 }, 314 },
343 [1] = { 315 [1] = {
344 .name = "gpio-led:green:d0", 316 .name = "gpio-led:green:d0",
345 .gpio = IGEP2_GPIO_LED0_GREEN,
346 .default_trigger = "default-off", 317 .default_trigger = "default-off",
347 }, 318 },
348 [2] = { 319 [2] = {
349 .name = "gpio-led:red:d1", 320 .name = "gpio-led:red:d1",
350 .gpio = IGEP2_GPIO_LED1_RED,
351 .default_trigger = "default-off", 321 .default_trigger = "default-off",
352 }, 322 },
353 [3] = { 323 [3] = {
@@ -358,94 +328,119 @@ static struct gpio_led igep2_gpio_leds[] = {
358 }, 328 },
359}; 329};
360 330
361static struct gpio_led_platform_data igep2_led_pdata = { 331static struct gpio_led_platform_data igep_led_pdata = {
362 .leds = igep2_gpio_leds, 332 .leds = igep_gpio_leds,
363 .num_leds = ARRAY_SIZE(igep2_gpio_leds), 333 .num_leds = ARRAY_SIZE(igep_gpio_leds),
364}; 334};
365 335
366static struct platform_device igep2_led_device = { 336static struct platform_device igep_led_device = {
367 .name = "leds-gpio", 337 .name = "leds-gpio",
368 .id = -1, 338 .id = -1,
369 .dev = { 339 .dev = {
370 .platform_data = &igep2_led_pdata, 340 .platform_data = &igep_led_pdata,
371 }, 341 },
372}; 342};
373 343
374static void __init igep2_leds_init(void) 344static void __init igep_leds_init(void)
375{ 345{
376 platform_device_register(&igep2_led_device); 346 if (machine_is_igep0020()) {
347 igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED;
348 igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN;
349 igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED;
350 } else {
351 igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED;
352 igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN;
353 igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED;
354 }
355
356 platform_device_register(&igep_led_device);
377} 357}
378 358
379#else 359#else
380static inline void igep2_leds_init(void) 360static struct gpio igep_gpio_leds[] __initdata = {
361 { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d0" },
362 { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:green:d0" },
363 { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d1" },
364};
365
366static inline void igep_leds_init(void)
381{ 367{
382 if ((gpio_request(IGEP2_GPIO_LED0_RED, "gpio-led:red:d0") == 0) && 368 int i;
383 (gpio_direction_output(IGEP2_GPIO_LED0_RED, 0) == 0))
384 gpio_export(IGEP2_GPIO_LED0_RED, 0);
385 else
386 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n");
387 369
388 if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) && 370 if (machine_is_igep0020()) {
389 (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 0) == 0)) 371 igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED;
390 gpio_export(IGEP2_GPIO_LED0_GREEN, 0); 372 igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN;
391 else 373 igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED;
392 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n"); 374 } else {
375 igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED;
376 igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN;
377 igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED;
378 }
393 379
394 if ((gpio_request(IGEP2_GPIO_LED1_RED, "gpio-led:red:d1") == 0) && 380 if (gpio_request_array(igep_gpio_leds, ARRAY_SIZE(igep_gpio_leds))) {
395 (gpio_direction_output(IGEP2_GPIO_LED1_RED, 0) == 0)) 381 pr_warning("IGEP v2: Could not obtain leds gpios\n");
396 gpio_export(IGEP2_GPIO_LED1_RED, 0); 382 return;
397 else 383 }
398 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n");
399 384
385 for (i = 0; i < ARRAY_SIZE(igep_gpio_leds); i++)
386 gpio_export(igep_gpio_leds[i].gpio, 0);
400} 387}
401#endif 388#endif
402 389
403static int igep2_twl_gpio_setup(struct device *dev, 390static struct gpio igep2_twl_gpios[] = {
391 { -EINVAL, GPIOF_IN, "GPIO_EHCI_NOC" },
392 { -EINVAL, GPIOF_OUT_INIT_LOW, "GPIO_USBH_CPEN" },
393};
394
395static int igep_twl_gpio_setup(struct device *dev,
404 unsigned gpio, unsigned ngpio) 396 unsigned gpio, unsigned ngpio)
405{ 397{
398 int ret;
399
406 /* gpio + 0 is "mmc0_cd" (input/IRQ) */ 400 /* gpio + 0 is "mmc0_cd" (input/IRQ) */
407 mmc[0].gpio_cd = gpio + 0; 401 mmc[0].gpio_cd = gpio + 0;
408 omap2_hsmmc_init(mmc); 402 omap2_hsmmc_init(mmc);
409 403
410 /*
411 * REVISIT: need ehci-omap hooks for external VBUS
412 * power switch and overcurrent detect
413 */
414 if ((gpio_request(gpio + 1, "GPIO_EHCI_NOC") < 0) ||
415 (gpio_direction_input(gpio + 1) < 0))
416 pr_err("IGEP2: Could not obtain gpio for EHCI NOC");
417
418 /*
419 * TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN
420 * (out, active low)
421 */
422 if ((gpio_request(gpio + TWL4030_GPIO_MAX, "GPIO_USBH_CPEN") < 0) ||
423 (gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0) < 0))
424 pr_err("IGEP2: Could not obtain gpio for USBH_CPEN");
425
426 /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ 404 /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
427#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE) 405#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
428 if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0) 406 ret = gpio_request_one(gpio + TWL4030_GPIO_MAX + 1, GPIOF_OUT_INIT_HIGH,
429 && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0)) 407 "gpio-led:green:d1");
408 if (ret == 0)
430 gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0); 409 gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0);
431 else 410 else
432 pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_GREEN\n"); 411 pr_warning("IGEP: Could not obtain gpio GPIO_LED1_GREEN\n");
433#else 412#else
434 igep2_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1; 413 igep_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
435#endif 414#endif
436 415
416 if (machine_is_igep0030())
417 return 0;
418
419 /*
420 * REVISIT: need ehci-omap hooks for external VBUS
421 * power switch and overcurrent detect
422 */
423 igep2_twl_gpios[0].gpio = gpio + 1;
424
425 /* TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN (out, active low) */
426 igep2_twl_gpios[1].gpio = gpio + TWL4030_GPIO_MAX;
427
428 ret = gpio_request_array(igep2_twl_gpios, ARRAY_SIZE(igep2_twl_gpios));
429 if (ret < 0)
430 pr_err("IGEP2: Could not obtain gpio for USBH_CPEN");
431
437 return 0; 432 return 0;
438}; 433};
439 434
440static struct twl4030_gpio_platform_data igep2_twl4030_gpio_pdata = { 435static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = {
441 .gpio_base = OMAP_MAX_GPIO_LINES, 436 .gpio_base = OMAP_MAX_GPIO_LINES,
442 .irq_base = TWL4030_GPIO_IRQ_BASE, 437 .irq_base = TWL4030_GPIO_IRQ_BASE,
443 .irq_end = TWL4030_GPIO_IRQ_END, 438 .irq_end = TWL4030_GPIO_IRQ_END,
444 .use_leds = true, 439 .use_leds = true,
445 .setup = igep2_twl_gpio_setup, 440 .setup = igep_twl_gpio_setup,
446}; 441};
447 442
448static struct twl4030_usb_data igep2_usb_data = { 443static struct twl4030_usb_data igep_usb_data = {
449 .usb_mode = T2_USB_MODE_ULPI, 444 .usb_mode = T2_USB_MODE_ULPI,
450}; 445};
451 446
@@ -507,16 +502,17 @@ static struct regulator_init_data igep2_vpll2 = {
507 502
508static void __init igep2_display_init(void) 503static void __init igep2_display_init(void)
509{ 504{
510 if (gpio_request(IGEP2_GPIO_DVI_PUP, "GPIO_DVI_PUP") && 505 int err = gpio_request_one(IGEP2_GPIO_DVI_PUP, GPIOF_OUT_INIT_HIGH,
511 gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1)) 506 "GPIO_DVI_PUP");
507 if (err)
512 pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n"); 508 pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n");
513} 509}
514 510
515static struct platform_device *igep2_devices[] __initdata = { 511static struct platform_device *igep_devices[] __initdata = {
516 &igep2_vwlan_device, 512 &igep_vwlan_device,
517}; 513};
518 514
519static void __init igep2_init_early(void) 515static void __init igep_init_early(void)
520{ 516{
521 omap2_init_common_infrastructure(); 517 omap2_init_common_infrastructure();
522 omap2_init_common_devices(m65kxxxxam_sdrc_params, 518 omap2_init_common_devices(m65kxxxxam_sdrc_params,
@@ -561,27 +557,15 @@ static struct twl4030_keypad_data igep2_keypad_pdata = {
561 .rep = 1, 557 .rep = 1,
562}; 558};
563 559
564static struct twl4030_platform_data igep2_twldata = { 560static struct twl4030_platform_data igep_twldata = {
565 .irq_base = TWL4030_IRQ_BASE, 561 .irq_base = TWL4030_IRQ_BASE,
566 .irq_end = TWL4030_IRQ_END, 562 .irq_end = TWL4030_IRQ_END,
567 563
568 /* platform_data for children goes here */ 564 /* platform_data for children goes here */
569 .usb = &igep2_usb_data, 565 .usb = &igep_usb_data,
570 .codec = &igep2_codec_data, 566 .gpio = &igep_twl4030_gpio_pdata,
571 .gpio = &igep2_twl4030_gpio_pdata, 567 .vmmc1 = &igep_vmmc1,
572 .keypad = &igep2_keypad_pdata, 568 .vio = &igep_vio,
573 .vmmc1 = &igep2_vmmc1,
574 .vpll2 = &igep2_vpll2,
575 .vio = &igep2_vio,
576};
577
578static struct i2c_board_info __initdata igep2_i2c1_boardinfo[] = {
579 {
580 I2C_BOARD_INFO("twl4030", 0x48),
581 .flags = I2C_CLIENT_WAKE,
582 .irq = INT_34XX_SYS_NIRQ,
583 .platform_data = &igep2_twldata,
584 },
585}; 569};
586 570
587static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = { 571static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = {
@@ -590,32 +574,29 @@ static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = {
590 }, 574 },
591}; 575};
592 576
593static void __init igep2_i2c_init(void) 577static void __init igep_i2c_init(void)
594{ 578{
595 int ret; 579 int ret;
596 580
597 ret = omap_register_i2c_bus(1, 2600, igep2_i2c1_boardinfo, 581 if (machine_is_igep0020()) {
598 ARRAY_SIZE(igep2_i2c1_boardinfo)); 582 /*
599 if (ret) 583 * Bus 3 is attached to the DVI port where devices like the
600 pr_warning("IGEP2: Could not register I2C1 bus (%d)\n", ret); 584 * pico DLP projector don't work reliably with 400kHz
585 */
586 ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo,
587 ARRAY_SIZE(igep2_i2c3_boardinfo));
588 if (ret)
589 pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret);
590
591 igep_twldata.codec = &igep2_codec_data;
592 igep_twldata.keypad = &igep2_keypad_pdata;
593 igep_twldata.vpll2 = &igep2_vpll2;
594 }
601 595
602 /* 596 omap3_pmic_init("twl4030", &igep_twldata);
603 * Bus 3 is attached to the DVI port where devices like the pico DLP
604 * projector don't work reliably with 400kHz
605 */
606 ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo,
607 ARRAY_SIZE(igep2_i2c3_boardinfo));
608 if (ret)
609 pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret);
610} 597}
611 598
612static struct omap_musb_board_data musb_board_data = { 599static const struct usbhs_omap_board_data igep2_usbhs_bdata __initconst = {
613 .interface_type = MUSB_INTERFACE_ULPI,
614 .mode = MUSB_OTG,
615 .power = 100,
616};
617
618static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
619 .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, 600 .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
620 .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, 601 .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
621 .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, 602 .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
@@ -626,6 +607,17 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
626 .reset_gpio_port[2] = -EINVAL, 607 .reset_gpio_port[2] = -EINVAL,
627}; 608};
628 609
610static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
611 .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
612 .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
613 .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
614
615 .phy_reset = true,
616 .reset_gpio_port[0] = -EINVAL,
617 .reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET,
618 .reset_gpio_port[2] = -EINVAL,
619};
620
629#ifdef CONFIG_OMAP_MUX 621#ifdef CONFIG_OMAP_MUX
630static struct omap_board_mux board_mux[] __initdata = { 622static struct omap_board_mux board_mux[] __initdata = {
631 { .reg_offset = OMAP_MUX_TERMINATOR }, 623 { .reg_offset = OMAP_MUX_TERMINATOR },
@@ -633,82 +625,95 @@ static struct omap_board_mux board_mux[] __initdata = {
633#endif 625#endif
634 626
635#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) 627#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
628static struct gpio igep_wlan_bt_gpios[] __initdata = {
629 { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NPD" },
630 { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NRESET" },
631 { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_BT_NRESET" },
632};
636 633
637static void __init igep2_wlan_bt_init(void) 634static void __init igep_wlan_bt_init(void)
638{ 635{
639 unsigned npd, wreset, btreset; 636 int err;
640 637
641 /* GPIO's for WLAN-BT combo depends on hardware revision */ 638 /* GPIO's for WLAN-BT combo depends on hardware revision */
642 if (hwrev == IGEP2_BOARD_HWREV_B) { 639 if (hwrev == IGEP2_BOARD_HWREV_B) {
643 npd = IGEP2_RB_GPIO_WIFI_NPD; 640 igep_wlan_bt_gpios[0].gpio = IGEP2_RB_GPIO_WIFI_NPD;
644 wreset = IGEP2_RB_GPIO_WIFI_NRESET; 641 igep_wlan_bt_gpios[1].gpio = IGEP2_RB_GPIO_WIFI_NRESET;
645 btreset = IGEP2_RB_GPIO_BT_NRESET; 642 igep_wlan_bt_gpios[2].gpio = IGEP2_RB_GPIO_BT_NRESET;
646 } else if (hwrev == IGEP2_BOARD_HWREV_C) { 643 } else if (hwrev == IGEP2_BOARD_HWREV_C || machine_is_igep0030()) {
647 npd = IGEP2_RC_GPIO_WIFI_NPD; 644 igep_wlan_bt_gpios[0].gpio = IGEP2_RC_GPIO_WIFI_NPD;
648 wreset = IGEP2_RC_GPIO_WIFI_NRESET; 645 igep_wlan_bt_gpios[1].gpio = IGEP2_RC_GPIO_WIFI_NRESET;
649 btreset = IGEP2_RC_GPIO_BT_NRESET; 646 igep_wlan_bt_gpios[2].gpio = IGEP2_RC_GPIO_BT_NRESET;
650 } else 647 } else
651 return; 648 return;
652 649
653 /* Set GPIO's for WLAN-BT combo module */ 650 err = gpio_request_array(igep_wlan_bt_gpios,
654 if ((gpio_request(npd, "GPIO_WIFI_NPD") == 0) && 651 ARRAY_SIZE(igep_wlan_bt_gpios));
655 (gpio_direction_output(npd, 1) == 0)) { 652 if (err) {
656 gpio_export(npd, 0); 653 pr_warning("IGEP2: Could not obtain WIFI/BT gpios\n");
657 } else 654 return;
658 pr_warning("IGEP2: Could not obtain gpio GPIO_WIFI_NPD\n"); 655 }
659 656
660 if ((gpio_request(wreset, "GPIO_WIFI_NRESET") == 0) && 657 gpio_export(igep_wlan_bt_gpios[0].gpio, 0);
661 (gpio_direction_output(wreset, 1) == 0)) { 658 gpio_export(igep_wlan_bt_gpios[1].gpio, 0);
662 gpio_export(wreset, 0); 659 gpio_export(igep_wlan_bt_gpios[2].gpio, 0);
663 gpio_set_value(wreset, 0); 660
664 udelay(10); 661 gpio_set_value(igep_wlan_bt_gpios[1].gpio, 0);
665 gpio_set_value(wreset, 1); 662 udelay(10);
666 } else 663 gpio_set_value(igep_wlan_bt_gpios[1].gpio, 1);
667 pr_warning("IGEP2: Could not obtain gpio GPIO_WIFI_NRESET\n");
668 664
669 if ((gpio_request(btreset, "GPIO_BT_NRESET") == 0) &&
670 (gpio_direction_output(btreset, 1) == 0)) {
671 gpio_export(btreset, 0);
672 } else
673 pr_warning("IGEP2: Could not obtain gpio GPIO_BT_NRESET\n");
674} 665}
675#else 666#else
676static inline void __init igep2_wlan_bt_init(void) { } 667static inline void __init igep_wlan_bt_init(void) { }
677#endif 668#endif
678 669
679static void __init igep2_init(void) 670static void __init igep_init(void)
680{ 671{
681 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 672 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
682 673
683 /* Get IGEP2 hardware revision */ 674 /* Get IGEP2 hardware revision */
684 igep2_get_revision(); 675 igep2_get_revision();
685 /* Register I2C busses and drivers */ 676 /* Register I2C busses and drivers */
686 igep2_i2c_init(); 677 igep_i2c_init();
687 platform_add_devices(igep2_devices, ARRAY_SIZE(igep2_devices)); 678 platform_add_devices(igep_devices, ARRAY_SIZE(igep_devices));
688 omap_display_init(&igep2_dss_data);
689 omap_serial_init(); 679 omap_serial_init();
690 usb_musb_init(&musb_board_data); 680 usb_musb_init(NULL);
691 usbhs_init(&usbhs_bdata);
692 681
693 igep2_flash_init(); 682 igep_flash_init();
694 igep2_leds_init(); 683 igep_leds_init();
695 igep2_display_init();
696 igep2_init_smsc911x();
697 684
698 /* 685 /*
699 * WLAN-BT combo module from MuRata which has a Marvell WLAN 686 * WLAN-BT combo module from MuRata which has a Marvell WLAN
700 * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface. 687 * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface.
701 */ 688 */
702 igep2_wlan_bt_init(); 689 igep_wlan_bt_init();
703 690
691 if (machine_is_igep0020()) {
692 omap_display_init(&igep2_dss_data);
693 igep2_display_init();
694 igep2_init_smsc911x();
695 usbhs_init(&igep2_usbhs_bdata);
696 } else {
697 usbhs_init(&igep3_usbhs_bdata);
698 }
704} 699}
705 700
706MACHINE_START(IGEP0020, "IGEP v2 board") 701MACHINE_START(IGEP0020, "IGEP v2 board")
707 .boot_params = 0x80000100, 702 .boot_params = 0x80000100,
708 .reserve = omap_reserve, 703 .reserve = omap_reserve,
709 .map_io = omap3_map_io, 704 .map_io = omap3_map_io,
710 .init_early = igep2_init_early, 705 .init_early = igep_init_early,
706 .init_irq = omap_init_irq,
707 .init_machine = igep_init,
708 .timer = &omap_timer,
709MACHINE_END
710
711MACHINE_START(IGEP0030, "IGEP OMAP3 module")
712 .boot_params = 0x80000100,
713 .reserve = omap_reserve,
714 .map_io = omap3_map_io,
715 .init_early = igep_init_early,
711 .init_irq = omap_init_irq, 716 .init_irq = omap_init_irq,
712 .init_machine = igep2_init, 717 .init_machine = igep_init,
713 .timer = &omap_timer, 718 .timer = &omap_timer,
714MACHINE_END 719MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c
deleted file mode 100644
index 2cf86c3cb1a3..000000000000
--- a/arch/arm/mach-omap2/board-igep0030.c
+++ /dev/null
@@ -1,458 +0,0 @@
1/*
2 * Copyright (C) 2010 - ISEE 2007 SL
3 *
4 * Modified from mach-omap2/board-generic.c
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18#include <linux/gpio.h>
19#include <linux/interrupt.h>
20
21#include <linux/regulator/machine.h>
22#include <linux/regulator/fixed.h>
23#include <linux/i2c/twl.h>
24#include <linux/mmc/host.h>
25
26#include <asm/mach-types.h>
27#include <asm/mach/arch.h>
28
29#include <plat/board.h>
30#include <plat/common.h>
31#include <plat/gpmc.h>
32#include <plat/usb.h>
33#include <plat/onenand.h>
34
35#include "mux.h"
36#include "hsmmc.h"
37#include "sdram-numonyx-m65kxxxxam.h"
38
39#define IGEP3_GPIO_LED0_GREEN 54
40#define IGEP3_GPIO_LED0_RED 53
41#define IGEP3_GPIO_LED1_RED 16
42
43#define IGEP3_GPIO_WIFI_NPD 138
44#define IGEP3_GPIO_WIFI_NRESET 139
45#define IGEP3_GPIO_BT_NRESET 137
46
47#define IGEP3_GPIO_USBH_NRESET 183
48
49
50#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
51 defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
52
53#define ONENAND_MAP 0x20000000
54
55/*
56 * x2 Flash built-in COMBO POP MEMORY
57 * Since the device is equipped with two DataRAMs, and two-plane NAND
58 * Flash memory array, these two component enables simultaneous program
59 * of 4KiB. Plane1 has only even blocks such as block0, block2, block4
60 * while Plane2 has only odd blocks such as block1, block3, block5.
61 * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048)
62 */
63
64static struct mtd_partition igep3_onenand_partitions[] = {
65 {
66 .name = "X-Loader",
67 .offset = 0,
68 .size = 2 * (64*(2*2048))
69 },
70 {
71 .name = "U-Boot",
72 .offset = MTDPART_OFS_APPEND,
73 .size = 6 * (64*(2*2048)),
74 },
75 {
76 .name = "Environment",
77 .offset = MTDPART_OFS_APPEND,
78 .size = 2 * (64*(2*2048)),
79 },
80 {
81 .name = "Kernel",
82 .offset = MTDPART_OFS_APPEND,
83 .size = 12 * (64*(2*2048)),
84 },
85 {
86 .name = "File System",
87 .offset = MTDPART_OFS_APPEND,
88 .size = MTDPART_SIZ_FULL,
89 },
90};
91
92static struct omap_onenand_platform_data igep3_onenand_pdata = {
93 .parts = igep3_onenand_partitions,
94 .nr_parts = ARRAY_SIZE(igep3_onenand_partitions),
95 .onenand_setup = NULL,
96 .dma_channel = -1, /* disable DMA in OMAP OneNAND driver */
97};
98
99static struct platform_device igep3_onenand_device = {
100 .name = "omap2-onenand",
101 .id = -1,
102 .dev = {
103 .platform_data = &igep3_onenand_pdata,
104 },
105};
106
107static void __init igep3_flash_init(void)
108{
109 u8 cs = 0;
110 u8 onenandcs = GPMC_CS_NUM + 1;
111
112 for (cs = 0; cs < GPMC_CS_NUM; cs++) {
113 u32 ret;
114 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
115
116 /* Check if NAND/oneNAND is configured */
117 if ((ret & 0xC00) == 0x800)
118 /* NAND found */
119 pr_err("IGEP3: Unsupported NAND found\n");
120 else {
121 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
122
123 if ((ret & 0x3F) == (ONENAND_MAP >> 24))
124 /* OneNAND found */
125 onenandcs = cs;
126 }
127 }
128
129 if (onenandcs > GPMC_CS_NUM) {
130 pr_err("IGEP3: Unable to find configuration in GPMC\n");
131 return;
132 }
133
134 igep3_onenand_pdata.cs = onenandcs;
135
136 if (platform_device_register(&igep3_onenand_device) < 0)
137 pr_err("IGEP3: Unable to register OneNAND device\n");
138}
139
140#else
141static void __init igep3_flash_init(void) {}
142#endif
143
144static struct regulator_consumer_supply igep3_vmmc1_supply =
145 REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
146
147/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
148static struct regulator_init_data igep3_vmmc1 = {
149 .constraints = {
150 .min_uV = 1850000,
151 .max_uV = 3150000,
152 .valid_modes_mask = REGULATOR_MODE_NORMAL
153 | REGULATOR_MODE_STANDBY,
154 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
155 | REGULATOR_CHANGE_MODE
156 | REGULATOR_CHANGE_STATUS,
157 },
158 .num_consumer_supplies = 1,
159 .consumer_supplies = &igep3_vmmc1_supply,
160};
161
162static struct regulator_consumer_supply igep3_vio_supply =
163 REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
164
165static struct regulator_init_data igep3_vio = {
166 .constraints = {
167 .min_uV = 1800000,
168 .max_uV = 1800000,
169 .apply_uV = 1,
170 .valid_modes_mask = REGULATOR_MODE_NORMAL
171 | REGULATOR_MODE_STANDBY,
172 .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
173 | REGULATOR_CHANGE_MODE
174 | REGULATOR_CHANGE_STATUS,
175 },
176 .num_consumer_supplies = 1,
177 .consumer_supplies = &igep3_vio_supply,
178};
179
180static struct regulator_consumer_supply igep3_vmmc2_supply =
181 REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
182
183static struct regulator_init_data igep3_vmmc2 = {
184 .constraints = {
185 .valid_modes_mask = REGULATOR_MODE_NORMAL,
186 .always_on = 1,
187 },
188 .num_consumer_supplies = 1,
189 .consumer_supplies = &igep3_vmmc2_supply,
190};
191
192static struct fixed_voltage_config igep3_vwlan = {
193 .supply_name = "vwlan",
194 .microvolts = 3300000,
195 .gpio = -EINVAL,
196 .enabled_at_boot = 1,
197 .init_data = &igep3_vmmc2,
198};
199
200static struct platform_device igep3_vwlan_device = {
201 .name = "reg-fixed-voltage",
202 .id = 0,
203 .dev = {
204 .platform_data = &igep3_vwlan,
205 },
206};
207
208static struct omap2_hsmmc_info mmc[] = {
209 [0] = {
210 .mmc = 1,
211 .caps = MMC_CAP_4_BIT_DATA,
212 .gpio_cd = -EINVAL,
213 .gpio_wp = -EINVAL,
214 },
215#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
216 [1] = {
217 .mmc = 2,
218 .caps = MMC_CAP_4_BIT_DATA,
219 .gpio_cd = -EINVAL,
220 .gpio_wp = -EINVAL,
221 },
222#endif
223 {} /* Terminator */
224};
225
226#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
227#include <linux/leds.h>
228
229static struct gpio_led igep3_gpio_leds[] = {
230 [0] = {
231 .name = "gpio-led:red:d0",
232 .gpio = IGEP3_GPIO_LED0_RED,
233 .default_trigger = "default-off"
234 },
235 [1] = {
236 .name = "gpio-led:green:d0",
237 .gpio = IGEP3_GPIO_LED0_GREEN,
238 .default_trigger = "default-off",
239 },
240 [2] = {
241 .name = "gpio-led:red:d1",
242 .gpio = IGEP3_GPIO_LED1_RED,
243 .default_trigger = "default-off",
244 },
245 [3] = {
246 .name = "gpio-led:green:d1",
247 .default_trigger = "heartbeat",
248 .gpio = -EINVAL, /* gets replaced */
249 },
250};
251
252static struct gpio_led_platform_data igep3_led_pdata = {
253 .leds = igep3_gpio_leds,
254 .num_leds = ARRAY_SIZE(igep3_gpio_leds),
255};
256
257static struct platform_device igep3_led_device = {
258 .name = "leds-gpio",
259 .id = -1,
260 .dev = {
261 .platform_data = &igep3_led_pdata,
262 },
263};
264
265static void __init igep3_leds_init(void)
266{
267 platform_device_register(&igep3_led_device);
268}
269
270#else
271static inline void igep3_leds_init(void)
272{
273 if ((gpio_request(IGEP3_GPIO_LED0_RED, "gpio-led:red:d0") == 0) &&
274 (gpio_direction_output(IGEP3_GPIO_LED0_RED, 1) == 0)) {
275 gpio_export(IGEP3_GPIO_LED0_RED, 0);
276 gpio_set_value(IGEP3_GPIO_LED0_RED, 1);
277 } else
278 pr_warning("IGEP3: Could not obtain gpio GPIO_LED0_RED\n");
279
280 if ((gpio_request(IGEP3_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) &&
281 (gpio_direction_output(IGEP3_GPIO_LED0_GREEN, 1) == 0)) {
282 gpio_export(IGEP3_GPIO_LED0_GREEN, 0);
283 gpio_set_value(IGEP3_GPIO_LED0_GREEN, 1);
284 } else
285 pr_warning("IGEP3: Could not obtain gpio GPIO_LED0_GREEN\n");
286
287 if ((gpio_request(IGEP3_GPIO_LED1_RED, "gpio-led:red:d1") == 0) &&
288 (gpio_direction_output(IGEP3_GPIO_LED1_RED, 1) == 0)) {
289 gpio_export(IGEP3_GPIO_LED1_RED, 0);
290 gpio_set_value(IGEP3_GPIO_LED1_RED, 1);
291 } else
292 pr_warning("IGEP3: Could not obtain gpio GPIO_LED1_RED\n");
293}
294#endif
295
296static int igep3_twl4030_gpio_setup(struct device *dev,
297 unsigned gpio, unsigned ngpio)
298{
299 /* gpio + 0 is "mmc0_cd" (input/IRQ) */
300 mmc[0].gpio_cd = gpio + 0;
301 omap2_hsmmc_init(mmc);
302
303 /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
304#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
305 if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
306 && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0)) {
307 gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0);
308 gpio_set_value(gpio + TWL4030_GPIO_MAX + 1, 0);
309 } else
310 pr_warning("IGEP3: Could not obtain gpio GPIO_LED1_GREEN\n");
311#else
312 igep3_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
313#endif
314
315 return 0;
316};
317
318static struct twl4030_gpio_platform_data igep3_twl4030_gpio_pdata = {
319 .gpio_base = OMAP_MAX_GPIO_LINES,
320 .irq_base = TWL4030_GPIO_IRQ_BASE,
321 .irq_end = TWL4030_GPIO_IRQ_END,
322 .use_leds = true,
323 .setup = igep3_twl4030_gpio_setup,
324};
325
326static struct twl4030_usb_data igep3_twl4030_usb_data = {
327 .usb_mode = T2_USB_MODE_ULPI,
328};
329
330static struct platform_device *igep3_devices[] __initdata = {
331 &igep3_vwlan_device,
332};
333
334static void __init igep3_init_early(void)
335{
336 omap2_init_common_infrastructure();
337 omap2_init_common_devices(m65kxxxxam_sdrc_params,
338 m65kxxxxam_sdrc_params);
339}
340
341static struct twl4030_platform_data igep3_twl4030_pdata = {
342 .irq_base = TWL4030_IRQ_BASE,
343 .irq_end = TWL4030_IRQ_END,
344
345 /* platform_data for children goes here */
346 .usb = &igep3_twl4030_usb_data,
347 .gpio = &igep3_twl4030_gpio_pdata,
348 .vmmc1 = &igep3_vmmc1,
349 .vio = &igep3_vio,
350};
351
352static struct i2c_board_info __initdata igep3_i2c_boardinfo[] = {
353 {
354 I2C_BOARD_INFO("twl4030", 0x48),
355 .flags = I2C_CLIENT_WAKE,
356 .irq = INT_34XX_SYS_NIRQ,
357 .platform_data = &igep3_twl4030_pdata,
358 },
359};
360
361static int __init igep3_i2c_init(void)
362{
363 omap_register_i2c_bus(1, 2600, igep3_i2c_boardinfo,
364 ARRAY_SIZE(igep3_i2c_boardinfo));
365
366 return 0;
367}
368
369static struct omap_musb_board_data musb_board_data = {
370 .interface_type = MUSB_INTERFACE_ULPI,
371 .mode = MUSB_OTG,
372 .power = 100,
373};
374
375#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
376
377static void __init igep3_wifi_bt_init(void)
378{
379 /* Configure MUX values for W-LAN + Bluetooth GPIO's */
380 omap_mux_init_gpio(IGEP3_GPIO_WIFI_NPD, OMAP_PIN_OUTPUT);
381 omap_mux_init_gpio(IGEP3_GPIO_WIFI_NRESET, OMAP_PIN_OUTPUT);
382 omap_mux_init_gpio(IGEP3_GPIO_BT_NRESET, OMAP_PIN_OUTPUT);
383
384 /* Set GPIO's for W-LAN + Bluetooth combo module */
385 if ((gpio_request(IGEP3_GPIO_WIFI_NPD, "GPIO_WIFI_NPD") == 0) &&
386 (gpio_direction_output(IGEP3_GPIO_WIFI_NPD, 1) == 0)) {
387 gpio_export(IGEP3_GPIO_WIFI_NPD, 0);
388 } else
389 pr_warning("IGEP3: Could not obtain gpio GPIO_WIFI_NPD\n");
390
391 if ((gpio_request(IGEP3_GPIO_WIFI_NRESET, "GPIO_WIFI_NRESET") == 0) &&
392 (gpio_direction_output(IGEP3_GPIO_WIFI_NRESET, 1) == 0)) {
393 gpio_export(IGEP3_GPIO_WIFI_NRESET, 0);
394 gpio_set_value(IGEP3_GPIO_WIFI_NRESET, 0);
395 udelay(10);
396 gpio_set_value(IGEP3_GPIO_WIFI_NRESET, 1);
397 } else
398 pr_warning("IGEP3: Could not obtain gpio GPIO_WIFI_NRESET\n");
399
400 if ((gpio_request(IGEP3_GPIO_BT_NRESET, "GPIO_BT_NRESET") == 0) &&
401 (gpio_direction_output(IGEP3_GPIO_BT_NRESET, 1) == 0)) {
402 gpio_export(IGEP3_GPIO_BT_NRESET, 0);
403 } else
404 pr_warning("IGEP3: Could not obtain gpio GPIO_BT_NRESET\n");
405}
406#else
407void __init igep3_wifi_bt_init(void) {}
408#endif
409
410static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
411 .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
412 .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
413 .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
414
415 .phy_reset = true,
416 .reset_gpio_port[0] = -EINVAL,
417 .reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET,
418 .reset_gpio_port[2] = -EINVAL,
419};
420
421#ifdef CONFIG_OMAP_MUX
422static struct omap_board_mux board_mux[] __initdata = {
423 OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
424 { .reg_offset = OMAP_MUX_TERMINATOR },
425};
426#endif
427
428static void __init igep3_init(void)
429{
430 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
431
432 /* Register I2C busses and drivers */
433 igep3_i2c_init();
434 platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices));
435 omap_serial_init();
436 usb_musb_init(&musb_board_data);
437 usbhs_init(&usbhs_bdata);
438
439 igep3_flash_init();
440 igep3_leds_init();
441
442 /*
443 * WLAN-BT combo module from MuRata which has a Marvell WLAN
444 * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface.
445 */
446 igep3_wifi_bt_init();
447
448}
449
450MACHINE_START(IGEP0030, "IGEP OMAP3 module")
451 .boot_params = 0x80000100,
452 .reserve = omap_reserve,
453 .map_io = omap3_map_io,
454 .init_early = igep3_init_early,
455 .init_irq = omap_init_irq,
456 .init_machine = igep3_init,
457 .timer = &omap_timer,
458MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index e2ba77957a8c..f7d6038075f0 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -22,7 +22,6 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25#include <linux/spi/ads7846.h>
26#include <linux/regulator/machine.h> 25#include <linux/regulator/machine.h>
27#include <linux/i2c/twl.h> 26#include <linux/i2c/twl.h>
28#include <linux/io.h> 27#include <linux/io.h>
@@ -43,47 +42,19 @@
43 42
44#include <asm/delay.h> 43#include <asm/delay.h>
45#include <plat/usb.h> 44#include <plat/usb.h>
45#include <plat/gpmc-smsc911x.h>
46 46
47#include "board-flash.h" 47#include "board-flash.h"
48#include "mux.h" 48#include "mux.h"
49#include "hsmmc.h" 49#include "hsmmc.h"
50#include "control.h" 50#include "control.h"
51#include "common-board-devices.h"
51 52
52#define LDP_SMSC911X_CS 1 53#define LDP_SMSC911X_CS 1
53#define LDP_SMSC911X_GPIO 152 54#define LDP_SMSC911X_GPIO 152
54#define DEBUG_BASE 0x08000000 55#define DEBUG_BASE 0x08000000
55#define LDP_ETHR_START DEBUG_BASE 56#define LDP_ETHR_START DEBUG_BASE
56 57
57static struct resource ldp_smsc911x_resources[] = {
58 [0] = {
59 .start = LDP_ETHR_START,
60 .end = LDP_ETHR_START + SZ_4K,
61 .flags = IORESOURCE_MEM,
62 },
63 [1] = {
64 .start = 0,
65 .end = 0,
66 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
67 },
68};
69
70static struct smsc911x_platform_config ldp_smsc911x_config = {
71 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
72 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
73 .flags = SMSC911X_USE_32BIT,
74 .phy_interface = PHY_INTERFACE_MODE_MII,
75};
76
77static struct platform_device ldp_smsc911x_device = {
78 .name = "smsc911x",
79 .id = -1,
80 .num_resources = ARRAY_SIZE(ldp_smsc911x_resources),
81 .resource = ldp_smsc911x_resources,
82 .dev = {
83 .platform_data = &ldp_smsc911x_config,
84 },
85};
86
87static uint32_t board_keymap[] = { 58static uint32_t board_keymap[] = {
88 KEY(0, 0, KEY_1), 59 KEY(0, 0, KEY_1),
89 KEY(1, 0, KEY_2), 60 KEY(1, 0, KEY_2),
@@ -197,82 +168,16 @@ static struct platform_device ldp_gpio_keys_device = {
197 }, 168 },
198}; 169};
199 170
200static int ts_gpio; 171static struct omap_smsc911x_platform_data smsc911x_cfg = {
201 172 .cs = LDP_SMSC911X_CS,
202/** 173 .gpio_irq = LDP_SMSC911X_GPIO,
203 * @brief ads7846_dev_init : Requests & sets GPIO line for pen-irq 174 .gpio_reset = -EINVAL,
204 * 175 .flags = SMSC911X_USE_32BIT,
205 * @return - void. If request gpio fails then Flag KERN_ERR.
206 */
207static void ads7846_dev_init(void)
208{
209 if (gpio_request(ts_gpio, "ads7846 irq") < 0) {
210 printk(KERN_ERR "can't get ads746 pen down GPIO\n");
211 return;
212 }
213
214 gpio_direction_input(ts_gpio);
215 gpio_set_debounce(ts_gpio, 310);
216}
217
218static int ads7846_get_pendown_state(void)
219{
220 return !gpio_get_value(ts_gpio);
221}
222
223static struct ads7846_platform_data tsc2046_config __initdata = {
224 .get_pendown_state = ads7846_get_pendown_state,
225 .keep_vref_on = 1,
226};
227
228static struct omap2_mcspi_device_config tsc2046_mcspi_config = {
229 .turbo_mode = 0,
230 .single_channel = 1, /* 0: slave, 1: master */
231};
232
233static struct spi_board_info ldp_spi_board_info[] __initdata = {
234 [0] = {
235 /*
236 * TSC2046 operates at a max freqency of 2MHz, so
237 * operate slightly below at 1.5MHz
238 */
239 .modalias = "ads7846",
240 .bus_num = 1,
241 .chip_select = 0,
242 .max_speed_hz = 1500000,
243 .controller_data = &tsc2046_mcspi_config,
244 .irq = 0,
245 .platform_data = &tsc2046_config,
246 },
247}; 176};
248 177
249static inline void __init ldp_init_smsc911x(void) 178static inline void __init ldp_init_smsc911x(void)
250{ 179{
251 int eth_cs; 180 gpmc_smsc911x_init(&smsc911x_cfg);
252 unsigned long cs_mem_base;
253 int eth_gpio = 0;
254
255 eth_cs = LDP_SMSC911X_CS;
256
257 if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) {
258 printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n");
259 return;
260 }
261
262 ldp_smsc911x_resources[0].start = cs_mem_base + 0x0;
263 ldp_smsc911x_resources[0].end = cs_mem_base + 0xff;
264 udelay(100);
265
266 eth_gpio = LDP_SMSC911X_GPIO;
267
268 ldp_smsc911x_resources[1].start = OMAP_GPIO_IRQ(eth_gpio);
269
270 if (gpio_request(eth_gpio, "smsc911x irq") < 0) {
271 printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
272 eth_gpio);
273 return;
274 }
275 gpio_direction_input(eth_gpio);
276} 181}
277 182
278static struct platform_device ldp_lcd_device = { 183static struct platform_device ldp_lcd_device = {
@@ -360,19 +265,9 @@ static struct twl4030_platform_data ldp_twldata = {
360 .keypad = &ldp_kp_twl4030_data, 265 .keypad = &ldp_kp_twl4030_data,
361}; 266};
362 267
363static struct i2c_board_info __initdata ldp_i2c_boardinfo[] = {
364 {
365 I2C_BOARD_INFO("twl4030", 0x48),
366 .flags = I2C_CLIENT_WAKE,
367 .irq = INT_34XX_SYS_NIRQ,
368 .platform_data = &ldp_twldata,
369 },
370};
371
372static int __init omap_i2c_init(void) 268static int __init omap_i2c_init(void)
373{ 269{
374 omap_register_i2c_bus(1, 2600, ldp_i2c_boardinfo, 270 omap3_pmic_init("twl4030", &ldp_twldata);
375 ARRAY_SIZE(ldp_i2c_boardinfo));
376 omap_register_i2c_bus(2, 400, NULL, 0); 271 omap_register_i2c_bus(2, 400, NULL, 0);
377 omap_register_i2c_bus(3, 400, NULL, 0); 272 omap_register_i2c_bus(3, 400, NULL, 0);
378 return 0; 273 return 0;
@@ -389,7 +284,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
389}; 284};
390 285
391static struct platform_device *ldp_devices[] __initdata = { 286static struct platform_device *ldp_devices[] __initdata = {
392 &ldp_smsc911x_device,
393 &ldp_lcd_device, 287 &ldp_lcd_device,
394 &ldp_gpio_keys_device, 288 &ldp_gpio_keys_device,
395}; 289};
@@ -400,12 +294,6 @@ static struct omap_board_mux board_mux[] __initdata = {
400}; 294};
401#endif 295#endif
402 296
403static struct omap_musb_board_data musb_board_data = {
404 .interface_type = MUSB_INTERFACE_ULPI,
405 .mode = MUSB_OTG,
406 .power = 100,
407};
408
409static struct mtd_partition ldp_nand_partitions[] = { 297static struct mtd_partition ldp_nand_partitions[] = {
410 /* All the partition sizes are listed in terms of NAND block size */ 298 /* All the partition sizes are listed in terms of NAND block size */
411 { 299 {
@@ -446,13 +334,9 @@ static void __init omap_ldp_init(void)
446 ldp_init_smsc911x(); 334 ldp_init_smsc911x();
447 omap_i2c_init(); 335 omap_i2c_init();
448 platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices)); 336 platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices));
449 ts_gpio = 54; 337 omap_ads7846_init(1, 54, 310, NULL);
450 ldp_spi_board_info[0].irq = gpio_to_irq(ts_gpio);
451 spi_register_board_info(ldp_spi_board_info,
452 ARRAY_SIZE(ldp_spi_board_info));
453 ads7846_dev_init();
454 omap_serial_init(); 338 omap_serial_init();
455 usb_musb_init(&musb_board_data); 339 usb_musb_init(NULL);
456 board_nand_init(ldp_nand_partitions, 340 board_nand_init(ldp_nand_partitions,
457 ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0); 341 ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0);
458 342
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index e710cd9e079b..8d74318ed495 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -106,14 +106,13 @@ static void __init n8x0_usb_init(void)
106 static char announce[] __initdata = KERN_INFO "TUSB 6010\n"; 106 static char announce[] __initdata = KERN_INFO "TUSB 6010\n";
107 107
108 /* PM companion chip power control pin */ 108 /* PM companion chip power control pin */
109 ret = gpio_request(TUSB6010_GPIO_ENABLE, "TUSB6010 enable"); 109 ret = gpio_request_one(TUSB6010_GPIO_ENABLE, GPIOF_OUT_INIT_LOW,
110 "TUSB6010 enable");
110 if (ret != 0) { 111 if (ret != 0) {
111 printk(KERN_ERR "Could not get TUSB power GPIO%i\n", 112 printk(KERN_ERR "Could not get TUSB power GPIO%i\n",
112 TUSB6010_GPIO_ENABLE); 113 TUSB6010_GPIO_ENABLE);
113 return; 114 return;
114 } 115 }
115 gpio_direction_output(TUSB6010_GPIO_ENABLE, 0);
116
117 tusb_set_power(0); 116 tusb_set_power(0);
118 117
119 ret = tusb6010_setup_interface(&tusb_data, TUSB6010_REFCLK_19, 2, 118 ret = tusb6010_setup_interface(&tusb_data, TUSB6010_REFCLK_19, 2,
@@ -494,8 +493,12 @@ static struct omap_mmc_platform_data mmc1_data = {
494 493
495static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC]; 494static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
496 495
497static void __init n8x0_mmc_init(void) 496static struct gpio n810_emmc_gpios[] __initdata = {
497 { N810_EMMC_VSD_GPIO, GPIOF_OUT_INIT_LOW, "MMC slot 2 Vddf" },
498 { N810_EMMC_VIO_GPIO, GPIOF_OUT_INIT_LOW, "MMC slot 2 Vdd" },
499};
498 500
501static void __init n8x0_mmc_init(void)
499{ 502{
500 int err; 503 int err;
501 504
@@ -512,27 +515,18 @@ static void __init n8x0_mmc_init(void)
512 mmc1_data.slots[1].ban_openended = 1; 515 mmc1_data.slots[1].ban_openended = 1;
513 } 516 }
514 517
515 err = gpio_request(N8X0_SLOT_SWITCH_GPIO, "MMC slot switch"); 518 err = gpio_request_one(N8X0_SLOT_SWITCH_GPIO, GPIOF_OUT_INIT_LOW,
519 "MMC slot switch");
516 if (err) 520 if (err)
517 return; 521 return;
518 522
519 gpio_direction_output(N8X0_SLOT_SWITCH_GPIO, 0);
520
521 if (machine_is_nokia_n810()) { 523 if (machine_is_nokia_n810()) {
522 err = gpio_request(N810_EMMC_VSD_GPIO, "MMC slot 2 Vddf"); 524 err = gpio_request_array(n810_emmc_gpios,
523 if (err) { 525 ARRAY_SIZE(n810_emmc_gpios));
524 gpio_free(N8X0_SLOT_SWITCH_GPIO);
525 return;
526 }
527 gpio_direction_output(N810_EMMC_VSD_GPIO, 0);
528
529 err = gpio_request(N810_EMMC_VIO_GPIO, "MMC slot 2 Vdd");
530 if (err) { 526 if (err) {
531 gpio_free(N8X0_SLOT_SWITCH_GPIO); 527 gpio_free(N8X0_SLOT_SWITCH_GPIO);
532 gpio_free(N810_EMMC_VSD_GPIO);
533 return; 528 return;
534 } 529 }
535 gpio_direction_output(N810_EMMC_VIO_GPIO, 0);
536 } 530 }
537 531
538 mmc_data[0] = &mmc1_data; 532 mmc_data[0] = &mmc1_data;
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 33007fd4a083..be71426359f2 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -41,8 +41,8 @@
41 41
42#include <plat/board.h> 42#include <plat/board.h>
43#include <plat/common.h> 43#include <plat/common.h>
44#include <plat/display.h> 44#include <video/omapdss.h>
45#include <plat/panel-generic-dpi.h> 45#include <video/omap-panel-generic-dpi.h>
46#include <plat/gpmc.h> 46#include <plat/gpmc.h>
47#include <plat/nand.h> 47#include <plat/nand.h>
48#include <plat/usb.h> 48#include <plat/usb.h>
@@ -52,6 +52,7 @@
52#include "hsmmc.h" 52#include "hsmmc.h"
53#include "timer-gp.h" 53#include "timer-gp.h"
54#include "pm.h" 54#include "pm.h"
55#include "common-board-devices.h"
55 56
56#define NAND_BLOCK_SIZE SZ_128K 57#define NAND_BLOCK_SIZE SZ_128K
57 58
@@ -79,6 +80,12 @@ static u8 omap3_beagle_get_rev(void)
79 return omap3_beagle_version; 80 return omap3_beagle_version;
80} 81}
81 82
83static struct gpio omap3_beagle_rev_gpios[] __initdata = {
84 { 171, GPIOF_IN, "rev_id_0" },
85 { 172, GPIOF_IN, "rev_id_1" },
86 { 173, GPIOF_IN, "rev_id_2" },
87};
88
82static void __init omap3_beagle_init_rev(void) 89static void __init omap3_beagle_init_rev(void)
83{ 90{
84 int ret; 91 int ret;
@@ -88,21 +95,13 @@ static void __init omap3_beagle_init_rev(void)
88 omap_mux_init_gpio(172, OMAP_PIN_INPUT_PULLUP); 95 omap_mux_init_gpio(172, OMAP_PIN_INPUT_PULLUP);
89 omap_mux_init_gpio(173, OMAP_PIN_INPUT_PULLUP); 96 omap_mux_init_gpio(173, OMAP_PIN_INPUT_PULLUP);
90 97
91 ret = gpio_request(171, "rev_id_0"); 98 ret = gpio_request_array(omap3_beagle_rev_gpios,
92 if (ret < 0) 99 ARRAY_SIZE(omap3_beagle_rev_gpios));
93 goto fail0; 100 if (ret < 0) {
94 101 printk(KERN_ERR "Unable to get revision detection GPIO pins\n");
95 ret = gpio_request(172, "rev_id_1"); 102 omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
96 if (ret < 0) 103 return;
97 goto fail1; 104 }
98
99 ret = gpio_request(173, "rev_id_2");
100 if (ret < 0)
101 goto fail2;
102
103 gpio_direction_input(171);
104 gpio_direction_input(172);
105 gpio_direction_input(173);
106 105
107 beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1) 106 beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
108 | (gpio_get_value(173) << 2); 107 | (gpio_get_value(173) << 2);
@@ -128,18 +127,6 @@ static void __init omap3_beagle_init_rev(void)
128 printk(KERN_INFO "OMAP3 Beagle Rev: unknown %hd\n", beagle_rev); 127 printk(KERN_INFO "OMAP3 Beagle Rev: unknown %hd\n", beagle_rev);
129 omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN; 128 omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
130 } 129 }
131
132 return;
133
134fail2:
135 gpio_free(172);
136fail1:
137 gpio_free(171);
138fail0:
139 printk(KERN_ERR "Unable to get revision detection GPIO pins\n");
140 omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
141
142 return;
143} 130}
144 131
145static struct mtd_partition omap3beagle_nand_partitions[] = { 132static struct mtd_partition omap3beagle_nand_partitions[] = {
@@ -173,15 +160,6 @@ static struct mtd_partition omap3beagle_nand_partitions[] = {
173 }, 160 },
174}; 161};
175 162
176static struct omap_nand_platform_data omap3beagle_nand_data = {
177 .options = NAND_BUSWIDTH_16,
178 .parts = omap3beagle_nand_partitions,
179 .nr_parts = ARRAY_SIZE(omap3beagle_nand_partitions),
180 .dma_channel = -1, /* disable DMA in OMAP NAND driver */
181 .nand_setup = NULL,
182 .dev_ready = NULL,
183};
184
185/* DSS */ 163/* DSS */
186 164
187static int beagle_enable_dvi(struct omap_dss_device *dssdev) 165static int beagle_enable_dvi(struct omap_dss_device *dssdev)
@@ -243,13 +221,10 @@ static void __init beagle_display_init(void)
243{ 221{
244 int r; 222 int r;
245 223
246 r = gpio_request(beagle_dvi_device.reset_gpio, "DVI reset"); 224 r = gpio_request_one(beagle_dvi_device.reset_gpio, GPIOF_OUT_INIT_LOW,
247 if (r < 0) { 225 "DVI reset");
226 if (r < 0)
248 printk(KERN_ERR "Unable to get DVI reset GPIO\n"); 227 printk(KERN_ERR "Unable to get DVI reset GPIO\n");
249 return;
250 }
251
252 gpio_direction_output(beagle_dvi_device.reset_gpio, 0);
253} 228}
254 229
255#include "sdram-micron-mt46h32m32lf-6.h" 230#include "sdram-micron-mt46h32m32lf-6.h"
@@ -276,7 +251,7 @@ static struct gpio_led gpio_leds[];
276static int beagle_twl_gpio_setup(struct device *dev, 251static int beagle_twl_gpio_setup(struct device *dev,
277 unsigned gpio, unsigned ngpio) 252 unsigned gpio, unsigned ngpio)
278{ 253{
279 int r; 254 int r, usb_pwr_level;
280 255
281 if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) { 256 if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
282 mmc[0].gpio_wp = -EINVAL; 257 mmc[0].gpio_wp = -EINVAL;
@@ -295,66 +270,46 @@ static int beagle_twl_gpio_setup(struct device *dev,
295 beagle_vmmc1_supply.dev = mmc[0].dev; 270 beagle_vmmc1_supply.dev = mmc[0].dev;
296 beagle_vsim_supply.dev = mmc[0].dev; 271 beagle_vsim_supply.dev = mmc[0].dev;
297 272
298 /* REVISIT: need ehci-omap hooks for external VBUS
299 * power switch and overcurrent detect
300 */
301 if (omap3_beagle_get_rev() != OMAP3BEAGLE_BOARD_XM) {
302 r = gpio_request(gpio + 1, "EHCI_nOC");
303 if (!r) {
304 r = gpio_direction_input(gpio + 1);
305 if (r)
306 gpio_free(gpio + 1);
307 }
308 if (r)
309 pr_err("%s: unable to configure EHCI_nOC\n", __func__);
310 }
311
312 /* 273 /*
313 * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active 274 * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active
314 * high / others active low) 275 * high / others active low)
315 */ 276 * DVI reset GPIO is different between beagle revisions
316 gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
317 if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
318 gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
319 else
320 gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
321
322 /* DVI reset GPIO is different between beagle revisions */
323 if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
324 beagle_dvi_device.reset_gpio = 129;
325 else
326 beagle_dvi_device.reset_gpio = 170;
327
328 /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
329 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
330
331 /*
332 * gpio + 1 on Xm controls the TFP410's enable line (active low)
333 * gpio + 2 control varies depending on the board rev as follows:
334 * P7/P8 revisions(prototype): Camera EN
335 * A2+ revisions (production): LDO (supplies DVI, serial, led blocks)
336 */ 277 */
337 if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) { 278 if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
338 r = gpio_request(gpio + 1, "nDVI_PWR_EN"); 279 usb_pwr_level = GPIOF_OUT_INIT_HIGH;
339 if (!r) { 280 beagle_dvi_device.reset_gpio = 129;
340 r = gpio_direction_output(gpio + 1, 0); 281 /*
341 if (r) 282 * gpio + 1 on Xm controls the TFP410's enable line (active low)
342 gpio_free(gpio + 1); 283 * gpio + 2 control varies depending on the board rev as below:
343 } 284 * P7/P8 revisions(prototype): Camera EN
285 * A2+ revisions (production): LDO (DVI, serial, led blocks)
286 */
287 r = gpio_request_one(gpio + 1, GPIOF_OUT_INIT_LOW,
288 "nDVI_PWR_EN");
344 if (r) 289 if (r)
345 pr_err("%s: unable to configure nDVI_PWR_EN\n", 290 pr_err("%s: unable to configure nDVI_PWR_EN\n",
346 __func__); 291 __func__);
347 r = gpio_request(gpio + 2, "DVI_LDO_EN"); 292 r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH,
348 if (!r) { 293 "DVI_LDO_EN");
349 r = gpio_direction_output(gpio + 2, 1);
350 if (r)
351 gpio_free(gpio + 2);
352 }
353 if (r) 294 if (r)
354 pr_err("%s: unable to configure DVI_LDO_EN\n", 295 pr_err("%s: unable to configure DVI_LDO_EN\n",
355 __func__); 296 __func__);
297 } else {
298 usb_pwr_level = GPIOF_OUT_INIT_LOW;
299 beagle_dvi_device.reset_gpio = 170;
300 /*
301 * REVISIT: need ehci-omap hooks for external VBUS
302 * power switch and overcurrent detect
303 */
304 if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
305 pr_err("%s: unable to configure EHCI_nOC\n", __func__);
356 } 306 }
357 307
308 gpio_request_one(gpio + TWL4030_GPIO_MAX, usb_pwr_level, "nEN_USB_PWR");
309
310 /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
311 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
312
358 return 0; 313 return 0;
359} 314}
360 315
@@ -453,15 +408,6 @@ static struct twl4030_platform_data beagle_twldata = {
453 .vpll2 = &beagle_vpll2, 408 .vpll2 = &beagle_vpll2,
454}; 409};
455 410
456static struct i2c_board_info __initdata beagle_i2c_boardinfo[] = {
457 {
458 I2C_BOARD_INFO("twl4030", 0x48),
459 .flags = I2C_CLIENT_WAKE,
460 .irq = INT_34XX_SYS_NIRQ,
461 .platform_data = &beagle_twldata,
462 },
463};
464
465static struct i2c_board_info __initdata beagle_i2c_eeprom[] = { 411static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
466 { 412 {
467 I2C_BOARD_INFO("eeprom", 0x50), 413 I2C_BOARD_INFO("eeprom", 0x50),
@@ -470,8 +416,7 @@ static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
470 416
471static int __init omap3_beagle_i2c_init(void) 417static int __init omap3_beagle_i2c_init(void)
472{ 418{
473 omap_register_i2c_bus(1, 2600, beagle_i2c_boardinfo, 419 omap3_pmic_init("twl4030", &beagle_twldata);
474 ARRAY_SIZE(beagle_i2c_boardinfo));
475 /* Bus 3 is attached to the DVI port where devices like the pico DLP 420 /* Bus 3 is attached to the DVI port where devices like the pico DLP
476 * projector don't work reliably with 400kHz */ 421 * projector don't work reliably with 400kHz */
477 omap_register_i2c_bus(3, 100, beagle_i2c_eeprom, ARRAY_SIZE(beagle_i2c_eeprom)); 422 omap_register_i2c_bus(3, 100, beagle_i2c_eeprom, ARRAY_SIZE(beagle_i2c_eeprom));
@@ -551,39 +496,6 @@ static struct platform_device *omap3_beagle_devices[] __initdata = {
551 &keys_gpio, 496 &keys_gpio,
552}; 497};
553 498
554static void __init omap3beagle_flash_init(void)
555{
556 u8 cs = 0;
557 u8 nandcs = GPMC_CS_NUM + 1;
558
559 /* find out the chip-select on which NAND exists */
560 while (cs < GPMC_CS_NUM) {
561 u32 ret = 0;
562 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
563
564 if ((ret & 0xC00) == 0x800) {
565 printk(KERN_INFO "Found NAND on CS%d\n", cs);
566 if (nandcs > GPMC_CS_NUM)
567 nandcs = cs;
568 }
569 cs++;
570 }
571
572 if (nandcs > GPMC_CS_NUM) {
573 printk(KERN_INFO "NAND: Unable to find configuration "
574 "in GPMC\n ");
575 return;
576 }
577
578 if (nandcs < GPMC_CS_NUM) {
579 omap3beagle_nand_data.cs = nandcs;
580
581 printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
582 if (gpmc_nand_init(&omap3beagle_nand_data) < 0)
583 printk(KERN_ERR "Unable to register NAND device\n");
584 }
585}
586
587static const struct usbhs_omap_board_data usbhs_bdata __initconst = { 499static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
588 500
589 .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, 501 .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
@@ -602,12 +514,6 @@ static struct omap_board_mux board_mux[] __initdata = {
602}; 514};
603#endif 515#endif
604 516
605static struct omap_musb_board_data musb_board_data = {
606 .interface_type = MUSB_INTERFACE_ULPI,
607 .mode = MUSB_OTG,
608 .power = 100,
609};
610
611static void __init beagle_opp_init(void) 517static void __init beagle_opp_init(void)
612{ 518{
613 int r = 0; 519 int r = 0;
@@ -665,13 +571,13 @@ static void __init omap3_beagle_init(void)
665 omap_serial_init(); 571 omap_serial_init();
666 572
667 omap_mux_init_gpio(170, OMAP_PIN_INPUT); 573 omap_mux_init_gpio(170, OMAP_PIN_INPUT);
668 gpio_request(170, "DVI_nPD");
669 /* REVISIT leave DVI powered down until it's needed ... */ 574 /* REVISIT leave DVI powered down until it's needed ... */
670 gpio_direction_output(170, true); 575 gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
671 576
672 usb_musb_init(&musb_board_data); 577 usb_musb_init(NULL);
673 usbhs_init(&usbhs_bdata); 578 usbhs_init(&usbhs_bdata);
674 omap3beagle_flash_init(); 579 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
580 ARRAY_SIZE(omap3beagle_nand_partitions));
675 581
676 /* Ensure SDRC pins are mux'd for self-refresh */ 582 /* Ensure SDRC pins are mux'd for self-refresh */
677 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); 583 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 5a1a916e5cc8..b4d43464a303 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -44,12 +44,13 @@
44#include <plat/usb.h> 44#include <plat/usb.h>
45#include <plat/common.h> 45#include <plat/common.h>
46#include <plat/mcspi.h> 46#include <plat/mcspi.h>
47#include <plat/display.h> 47#include <video/omapdss.h>
48#include <plat/panel-generic-dpi.h> 48#include <video/omap-panel-generic-dpi.h>
49 49
50#include "mux.h" 50#include "mux.h"
51#include "sdram-micron-mt46h32m32lf-6.h" 51#include "sdram-micron-mt46h32m32lf-6.h"
52#include "hsmmc.h" 52#include "hsmmc.h"
53#include "common-board-devices.h"
53 54
54#define OMAP3_EVM_TS_GPIO 175 55#define OMAP3_EVM_TS_GPIO 175
55#define OMAP3_EVM_EHCI_VBUS 22 56#define OMAP3_EVM_EHCI_VBUS 22
@@ -101,49 +102,20 @@ static void __init omap3_evm_get_revision(void)
101} 102}
102 103
103#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 104#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
104static struct resource omap3evm_smsc911x_resources[] = { 105#include <plat/gpmc-smsc911x.h>
105 [0] = {
106 .start = OMAP3EVM_ETHR_START,
107 .end = (OMAP3EVM_ETHR_START + OMAP3EVM_ETHR_SIZE - 1),
108 .flags = IORESOURCE_MEM,
109 },
110 [1] = {
111 .start = OMAP_GPIO_IRQ(OMAP3EVM_ETHR_GPIO_IRQ),
112 .end = OMAP_GPIO_IRQ(OMAP3EVM_ETHR_GPIO_IRQ),
113 .flags = (IORESOURCE_IRQ | IRQF_TRIGGER_LOW),
114 },
115};
116 106
117static struct smsc911x_platform_config smsc911x_config = { 107static struct omap_smsc911x_platform_data smsc911x_cfg = {
118 .phy_interface = PHY_INTERFACE_MODE_MII, 108 .cs = OMAP3EVM_SMSC911X_CS,
119 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, 109 .gpio_irq = OMAP3EVM_ETHR_GPIO_IRQ,
120 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, 110 .gpio_reset = -EINVAL,
121 .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS), 111 .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
122};
123
124static struct platform_device omap3evm_smsc911x_device = {
125 .name = "smsc911x",
126 .id = -1,
127 .num_resources = ARRAY_SIZE(omap3evm_smsc911x_resources),
128 .resource = &omap3evm_smsc911x_resources[0],
129 .dev = {
130 .platform_data = &smsc911x_config,
131 },
132}; 112};
133 113
134static inline void __init omap3evm_init_smsc911x(void) 114static inline void __init omap3evm_init_smsc911x(void)
135{ 115{
136 int eth_cs, eth_rst;
137 struct clk *l3ck; 116 struct clk *l3ck;
138 unsigned int rate; 117 unsigned int rate;
139 118
140 if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
141 eth_rst = OMAP3EVM_GEN1_ETHR_GPIO_RST;
142 else
143 eth_rst = OMAP3EVM_GEN2_ETHR_GPIO_RST;
144
145 eth_cs = OMAP3EVM_SMSC911X_CS;
146
147 l3ck = clk_get(NULL, "l3_ck"); 119 l3ck = clk_get(NULL, "l3_ck");
148 if (IS_ERR(l3ck)) 120 if (IS_ERR(l3ck))
149 rate = 100000000; 121 rate = 100000000;
@@ -152,33 +124,13 @@ static inline void __init omap3evm_init_smsc911x(void)
152 124
153 /* Configure ethernet controller reset gpio */ 125 /* Configure ethernet controller reset gpio */
154 if (cpu_is_omap3430()) { 126 if (cpu_is_omap3430()) {
155 if (gpio_request(eth_rst, "SMSC911x gpio") < 0) { 127 if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
156 pr_err(KERN_ERR "Failed to request %d for smsc911x\n", 128 smsc911x_cfg.gpio_reset = OMAP3EVM_GEN1_ETHR_GPIO_RST;
157 eth_rst); 129 else
158 return; 130 smsc911x_cfg.gpio_reset = OMAP3EVM_GEN2_ETHR_GPIO_RST;
159 }
160
161 if (gpio_direction_output(eth_rst, 1) < 0) {
162 pr_err(KERN_ERR "Failed to set direction of %d for" \
163 " smsc911x\n", eth_rst);
164 return;
165 }
166 /* reset pulse to ethernet controller*/
167 usleep_range(150, 220);
168 gpio_set_value(eth_rst, 0);
169 usleep_range(150, 220);
170 gpio_set_value(eth_rst, 1);
171 usleep_range(1, 2);
172 }
173
174 if (gpio_request(OMAP3EVM_ETHR_GPIO_IRQ, "SMSC911x irq") < 0) {
175 printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
176 OMAP3EVM_ETHR_GPIO_IRQ);
177 return;
178 } 131 }
179 132
180 gpio_direction_input(OMAP3EVM_ETHR_GPIO_IRQ); 133 gpmc_smsc911x_init(&smsc911x_cfg);
181 platform_device_register(&omap3evm_smsc911x_device);
182} 134}
183 135
184#else 136#else
@@ -197,6 +149,15 @@ static inline void __init omap3evm_init_smsc911x(void) { return; }
197#define OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO 210 149#define OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO 210
198#define OMAP3EVM_DVI_PANEL_EN_GPIO 199 150#define OMAP3EVM_DVI_PANEL_EN_GPIO 199
199 151
152static struct gpio omap3_evm_dss_gpios[] __initdata = {
153 { OMAP3EVM_LCD_PANEL_RESB, GPIOF_OUT_INIT_HIGH, "lcd_panel_resb" },
154 { OMAP3EVM_LCD_PANEL_INI, GPIOF_OUT_INIT_HIGH, "lcd_panel_ini" },
155 { OMAP3EVM_LCD_PANEL_QVGA, GPIOF_OUT_INIT_LOW, "lcd_panel_qvga" },
156 { OMAP3EVM_LCD_PANEL_LR, GPIOF_OUT_INIT_HIGH, "lcd_panel_lr" },
157 { OMAP3EVM_LCD_PANEL_UD, GPIOF_OUT_INIT_HIGH, "lcd_panel_ud" },
158 { OMAP3EVM_LCD_PANEL_ENVDD, GPIOF_OUT_INIT_LOW, "lcd_panel_envdd" },
159};
160
200static int lcd_enabled; 161static int lcd_enabled;
201static int dvi_enabled; 162static int dvi_enabled;
202 163
@@ -204,61 +165,10 @@ static void __init omap3_evm_display_init(void)
204{ 165{
205 int r; 166 int r;
206 167
207 r = gpio_request(OMAP3EVM_LCD_PANEL_RESB, "lcd_panel_resb"); 168 r = gpio_request_array(omap3_evm_dss_gpios,
208 if (r) { 169 ARRAY_SIZE(omap3_evm_dss_gpios));
209 printk(KERN_ERR "failed to get lcd_panel_resb\n"); 170 if (r)
210 return; 171 printk(KERN_ERR "failed to get lcd_panel_* gpios\n");
211 }
212 gpio_direction_output(OMAP3EVM_LCD_PANEL_RESB, 1);
213
214 r = gpio_request(OMAP3EVM_LCD_PANEL_INI, "lcd_panel_ini");
215 if (r) {
216 printk(KERN_ERR "failed to get lcd_panel_ini\n");
217 goto err_1;
218 }
219 gpio_direction_output(OMAP3EVM_LCD_PANEL_INI, 1);
220
221 r = gpio_request(OMAP3EVM_LCD_PANEL_QVGA, "lcd_panel_qvga");
222 if (r) {
223 printk(KERN_ERR "failed to get lcd_panel_qvga\n");
224 goto err_2;
225 }
226 gpio_direction_output(OMAP3EVM_LCD_PANEL_QVGA, 0);
227
228 r = gpio_request(OMAP3EVM_LCD_PANEL_LR, "lcd_panel_lr");
229 if (r) {
230 printk(KERN_ERR "failed to get lcd_panel_lr\n");
231 goto err_3;
232 }
233 gpio_direction_output(OMAP3EVM_LCD_PANEL_LR, 1);
234
235 r = gpio_request(OMAP3EVM_LCD_PANEL_UD, "lcd_panel_ud");
236 if (r) {
237 printk(KERN_ERR "failed to get lcd_panel_ud\n");
238 goto err_4;
239 }
240 gpio_direction_output(OMAP3EVM_LCD_PANEL_UD, 1);
241
242 r = gpio_request(OMAP3EVM_LCD_PANEL_ENVDD, "lcd_panel_envdd");
243 if (r) {
244 printk(KERN_ERR "failed to get lcd_panel_envdd\n");
245 goto err_5;
246 }
247 gpio_direction_output(OMAP3EVM_LCD_PANEL_ENVDD, 0);
248
249 return;
250
251err_5:
252 gpio_free(OMAP3EVM_LCD_PANEL_UD);
253err_4:
254 gpio_free(OMAP3EVM_LCD_PANEL_LR);
255err_3:
256 gpio_free(OMAP3EVM_LCD_PANEL_QVGA);
257err_2:
258 gpio_free(OMAP3EVM_LCD_PANEL_INI);
259err_1:
260 gpio_free(OMAP3EVM_LCD_PANEL_RESB);
261
262} 172}
263 173
264static int omap3_evm_enable_lcd(struct omap_dss_device *dssdev) 174static int omap3_evm_enable_lcd(struct omap_dss_device *dssdev)
@@ -448,7 +358,7 @@ static struct platform_device leds_gpio = {
448static int omap3evm_twl_gpio_setup(struct device *dev, 358static int omap3evm_twl_gpio_setup(struct device *dev,
449 unsigned gpio, unsigned ngpio) 359 unsigned gpio, unsigned ngpio)
450{ 360{
451 int r; 361 int r, lcd_bl_en;
452 362
453 /* gpio + 0 is "mmc0_cd" (input/IRQ) */ 363 /* gpio + 0 is "mmc0_cd" (input/IRQ) */
454 omap_mux_init_gpio(63, OMAP_PIN_INPUT); 364 omap_mux_init_gpio(63, OMAP_PIN_INPUT);
@@ -465,16 +375,14 @@ static int omap3evm_twl_gpio_setup(struct device *dev,
465 */ 375 */
466 376
467 /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ 377 /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
468 r = gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL"); 378 lcd_bl_en = get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2 ?
469 if (!r) 379 GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
470 r = gpio_direction_output(gpio + TWL4030_GPIO_MAX, 380 r = gpio_request_one(gpio + TWL4030_GPIO_MAX, lcd_bl_en, "EN_LCD_BKL");
471 (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) ? 1 : 0);
472 if (r) 381 if (r)
473 printk(KERN_ERR "failed to get/set lcd_bkl gpio\n"); 382 printk(KERN_ERR "failed to get/set lcd_bkl gpio\n");
474 383
475 /* gpio + 7 == DVI Enable */ 384 /* gpio + 7 == DVI Enable */
476 gpio_request(gpio + 7, "EN_DVI"); 385 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI");
477 gpio_direction_output(gpio + 7, 0);
478 386
479 /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ 387 /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
480 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; 388 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -652,78 +560,18 @@ static struct twl4030_platform_data omap3evm_twldata = {
652 .vdac = &omap3_evm_vdac, 560 .vdac = &omap3_evm_vdac,
653 .vpll2 = &omap3_evm_vpll2, 561 .vpll2 = &omap3_evm_vpll2,
654 .vio = &omap3evm_vio, 562 .vio = &omap3evm_vio,
655}; 563 .vmmc1 = &omap3evm_vmmc1,
656 564 .vsim = &omap3evm_vsim,
657static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = {
658 {
659 I2C_BOARD_INFO("twl4030", 0x48),
660 .flags = I2C_CLIENT_WAKE,
661 .irq = INT_34XX_SYS_NIRQ,
662 .platform_data = &omap3evm_twldata,
663 },
664}; 565};
665 566
666static int __init omap3_evm_i2c_init(void) 567static int __init omap3_evm_i2c_init(void)
667{ 568{
668 /* 569 omap3_pmic_init("twl4030", &omap3evm_twldata);
669 * REVISIT: These entries can be set in omap3evm_twl_data
670 * after a merge with MFD tree
671 */
672 omap3evm_twldata.vmmc1 = &omap3evm_vmmc1;
673 omap3evm_twldata.vsim = &omap3evm_vsim;
674
675 omap_register_i2c_bus(1, 2600, omap3evm_i2c_boardinfo,
676 ARRAY_SIZE(omap3evm_i2c_boardinfo));
677 omap_register_i2c_bus(2, 400, NULL, 0); 570 omap_register_i2c_bus(2, 400, NULL, 0);
678 omap_register_i2c_bus(3, 400, NULL, 0); 571 omap_register_i2c_bus(3, 400, NULL, 0);
679 return 0; 572 return 0;
680} 573}
681 574
682static void ads7846_dev_init(void)
683{
684 if (gpio_request(OMAP3_EVM_TS_GPIO, "ADS7846 pendown") < 0)
685 printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
686
687 gpio_direction_input(OMAP3_EVM_TS_GPIO);
688 gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310);
689}
690
691static int ads7846_get_pendown_state(void)
692{
693 return !gpio_get_value(OMAP3_EVM_TS_GPIO);
694}
695
696static struct ads7846_platform_data ads7846_config = {
697 .x_max = 0x0fff,
698 .y_max = 0x0fff,
699 .x_plate_ohms = 180,
700 .pressure_max = 255,
701 .debounce_max = 10,
702 .debounce_tol = 3,
703 .debounce_rep = 1,
704 .get_pendown_state = ads7846_get_pendown_state,
705 .keep_vref_on = 1,
706 .settle_delay_usecs = 150,
707 .wakeup = true,
708};
709
710static struct omap2_mcspi_device_config ads7846_mcspi_config = {
711 .turbo_mode = 0,
712 .single_channel = 1, /* 0: slave, 1: master */
713};
714
715static struct spi_board_info omap3evm_spi_board_info[] = {
716 [0] = {
717 .modalias = "ads7846",
718 .bus_num = 1,
719 .chip_select = 0,
720 .max_speed_hz = 1500000,
721 .controller_data = &ads7846_mcspi_config,
722 .irq = OMAP_GPIO_IRQ(OMAP3_EVM_TS_GPIO),
723 .platform_data = &ads7846_config,
724 },
725};
726
727static struct omap_board_config_kernel omap3_evm_config[] __initdata = { 575static struct omap_board_config_kernel omap3_evm_config[] __initdata = {
728}; 576};
729 577
@@ -825,6 +673,11 @@ static struct omap_musb_board_data musb_board_data = {
825 .power = 100, 673 .power = 100,
826}; 674};
827 675
676static struct gpio omap3_evm_ehci_gpios[] __initdata = {
677 { OMAP3_EVM_EHCI_VBUS, GPIOF_OUT_INIT_HIGH, "enable EHCI VBUS" },
678 { OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" },
679};
680
828static void __init omap3_evm_init(void) 681static void __init omap3_evm_init(void)
829{ 682{
830 omap3_evm_get_revision(); 683 omap3_evm_get_revision();
@@ -841,9 +694,6 @@ static void __init omap3_evm_init(void)
841 694
842 omap_display_init(&omap3_evm_dss_data); 695 omap_display_init(&omap3_evm_dss_data);
843 696
844 spi_register_board_info(omap3evm_spi_board_info,
845 ARRAY_SIZE(omap3evm_spi_board_info));
846
847 omap_serial_init(); 697 omap_serial_init();
848 698
849 /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */ 699 /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */
@@ -851,16 +701,12 @@ static void __init omap3_evm_init(void)
851 701
852 if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) { 702 if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
853 /* enable EHCI VBUS using GPIO22 */ 703 /* enable EHCI VBUS using GPIO22 */
854 omap_mux_init_gpio(22, OMAP_PIN_INPUT_PULLUP); 704 omap_mux_init_gpio(OMAP3_EVM_EHCI_VBUS, OMAP_PIN_INPUT_PULLUP);
855 gpio_request(OMAP3_EVM_EHCI_VBUS, "enable EHCI VBUS");
856 gpio_direction_output(OMAP3_EVM_EHCI_VBUS, 0);
857 gpio_set_value(OMAP3_EVM_EHCI_VBUS, 1);
858
859 /* Select EHCI port on main board */ 705 /* Select EHCI port on main board */
860 omap_mux_init_gpio(61, OMAP_PIN_INPUT_PULLUP); 706 omap_mux_init_gpio(OMAP3_EVM_EHCI_SELECT,
861 gpio_request(OMAP3_EVM_EHCI_SELECT, "select EHCI port"); 707 OMAP_PIN_INPUT_PULLUP);
862 gpio_direction_output(OMAP3_EVM_EHCI_SELECT, 0); 708 gpio_request_array(omap3_evm_ehci_gpios,
863 gpio_set_value(OMAP3_EVM_EHCI_SELECT, 0); 709 ARRAY_SIZE(omap3_evm_ehci_gpios));
864 710
865 /* setup EHCI phy reset config */ 711 /* setup EHCI phy reset config */
866 omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP); 712 omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP);
@@ -876,7 +722,7 @@ static void __init omap3_evm_init(void)
876 } 722 }
877 usb_musb_init(&musb_board_data); 723 usb_musb_init(&musb_board_data);
878 usbhs_init(&usbhs_bdata); 724 usbhs_init(&usbhs_bdata);
879 ads7846_dev_init(); 725 omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
880 omap3evm_init_smsc911x(); 726 omap3evm_init_smsc911x();
881 omap3_evm_display_init(); 727 omap3_evm_display_init();
882 728
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index b726943d7c93..60d9be49dbab 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -37,6 +37,7 @@
37#include "hsmmc.h" 37#include "hsmmc.h"
38#include "timer-gp.h" 38#include "timer-gp.h"
39#include "control.h" 39#include "control.h"
40#include "common-board-devices.h"
40 41
41#include <plat/mux.h> 42#include <plat/mux.h>
42#include <plat/board.h> 43#include <plat/board.h>
@@ -93,19 +94,9 @@ static struct twl4030_platform_data omap3logic_twldata = {
93 .vmmc1 = &omap3logic_vmmc1, 94 .vmmc1 = &omap3logic_vmmc1,
94}; 95};
95 96
96static struct i2c_board_info __initdata omap3logic_i2c_boardinfo[] = {
97 {
98 I2C_BOARD_INFO("twl4030", 0x48),
99 .flags = I2C_CLIENT_WAKE,
100 .irq = INT_34XX_SYS_NIRQ,
101 .platform_data = &omap3logic_twldata,
102 },
103};
104
105static int __init omap3logic_i2c_init(void) 97static int __init omap3logic_i2c_init(void)
106{ 98{
107 omap_register_i2c_bus(1, 2600, omap3logic_i2c_boardinfo, 99 omap3_pmic_init("twl4030", &omap3logic_twldata);
108 ARRAY_SIZE(omap3logic_i2c_boardinfo));
109 return 0; 100 return 0;
110} 101}
111 102
@@ -147,7 +138,6 @@ static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
147 .cs = OMAP3LOGIC_SMSC911X_CS, 138 .cs = OMAP3LOGIC_SMSC911X_CS,
148 .gpio_irq = -EINVAL, 139 .gpio_irq = -EINVAL,
149 .gpio_reset = -EINVAL, 140 .gpio_reset = -EINVAL,
150 .flags = IORESOURCE_IRQ_LOWLEVEL,
151}; 141};
152 142
153/* TODO/FIXME (comment by Peter Barada, LogicPD): 143/* TODO/FIXME (comment by Peter Barada, LogicPD):
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 07dba888f450..1d10736c6d3c 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -22,7 +22,6 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23 23
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25#include <linux/spi/ads7846.h>
26#include <linux/regulator/machine.h> 25#include <linux/regulator/machine.h>
27#include <linux/i2c/twl.h> 26#include <linux/i2c/twl.h>
28#include <linux/wl12xx.h> 27#include <linux/wl12xx.h>
@@ -46,12 +45,13 @@
46#include <mach/hardware.h> 45#include <mach/hardware.h>
47#include <plat/mcspi.h> 46#include <plat/mcspi.h>
48#include <plat/usb.h> 47#include <plat/usb.h>
49#include <plat/display.h> 48#include <video/omapdss.h>
50#include <plat/nand.h> 49#include <plat/nand.h>
51 50
52#include "mux.h" 51#include "mux.h"
53#include "sdram-micron-mt46h32m32lf-6.h" 52#include "sdram-micron-mt46h32m32lf-6.h"
54#include "hsmmc.h" 53#include "hsmmc.h"
54#include "common-board-devices.h"
55 55
56#define PANDORA_WIFI_IRQ_GPIO 21 56#define PANDORA_WIFI_IRQ_GPIO 21
57#define PANDORA_WIFI_NRESET_GPIO 23 57#define PANDORA_WIFI_NRESET_GPIO 23
@@ -305,24 +305,13 @@ static int omap3pandora_twl_gpio_setup(struct device *dev,
305 305
306 /* gpio + 13 drives 32kHz buffer for wifi module */ 306 /* gpio + 13 drives 32kHz buffer for wifi module */
307 gpio_32khz = gpio + 13; 307 gpio_32khz = gpio + 13;
308 ret = gpio_request(gpio_32khz, "wifi 32kHz"); 308 ret = gpio_request_one(gpio_32khz, GPIOF_OUT_INIT_HIGH, "wifi 32kHz");
309 if (ret < 0) { 309 if (ret < 0) {
310 pr_err("Cannot get GPIO line %d, ret=%d\n", gpio_32khz, ret); 310 pr_err("Cannot get GPIO line %d, ret=%d\n", gpio_32khz, ret);
311 goto fail; 311 return -ENODEV;
312 }
313
314 ret = gpio_direction_output(gpio_32khz, 1);
315 if (ret < 0) {
316 pr_err("Cannot set GPIO line %d, ret=%d\n", gpio_32khz, ret);
317 goto fail_direction;
318 } 312 }
319 313
320 return 0; 314 return 0;
321
322fail_direction:
323 gpio_free(gpio_32khz);
324fail:
325 return -ENODEV;
326} 315}
327 316
328static struct twl4030_gpio_platform_data omap3pandora_gpio_data = { 317static struct twl4030_gpio_platform_data omap3pandora_gpio_data = {
@@ -544,15 +533,6 @@ static struct twl4030_platform_data omap3pandora_twldata = {
544 .bci = &pandora_bci_data, 533 .bci = &pandora_bci_data,
545}; 534};
546 535
547static struct i2c_board_info __initdata omap3pandora_i2c_boardinfo[] = {
548 {
549 I2C_BOARD_INFO("tps65950", 0x48),
550 .flags = I2C_CLIENT_WAKE,
551 .irq = INT_34XX_SYS_NIRQ,
552 .platform_data = &omap3pandora_twldata,
553 },
554};
555
556static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = { 536static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
557 { 537 {
558 I2C_BOARD_INFO("bq27500", 0x55), 538 I2C_BOARD_INFO("bq27500", 0x55),
@@ -562,61 +542,15 @@ static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
562 542
563static int __init omap3pandora_i2c_init(void) 543static int __init omap3pandora_i2c_init(void)
564{ 544{
565 omap_register_i2c_bus(1, 2600, omap3pandora_i2c_boardinfo, 545 omap3_pmic_init("tps65950", &omap3pandora_twldata);
566 ARRAY_SIZE(omap3pandora_i2c_boardinfo));
567 /* i2c2 pins are not connected */ 546 /* i2c2 pins are not connected */
568 omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo, 547 omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo,
569 ARRAY_SIZE(omap3pandora_i2c3_boardinfo)); 548 ARRAY_SIZE(omap3pandora_i2c3_boardinfo));
570 return 0; 549 return 0;
571} 550}
572 551
573static void __init omap3pandora_ads7846_init(void)
574{
575 int gpio = OMAP3_PANDORA_TS_GPIO;
576 int ret;
577
578 ret = gpio_request(gpio, "ads7846_pen_down");
579 if (ret < 0) {
580 printk(KERN_ERR "Failed to request GPIO %d for "
581 "ads7846 pen down IRQ\n", gpio);
582 return;
583 }
584
585 gpio_direction_input(gpio);
586}
587
588static int ads7846_get_pendown_state(void)
589{
590 return !gpio_get_value(OMAP3_PANDORA_TS_GPIO);
591}
592
593static struct ads7846_platform_data ads7846_config = {
594 .x_max = 0x0fff,
595 .y_max = 0x0fff,
596 .x_plate_ohms = 180,
597 .pressure_max = 255,
598 .debounce_max = 10,
599 .debounce_tol = 3,
600 .debounce_rep = 1,
601 .get_pendown_state = ads7846_get_pendown_state,
602 .keep_vref_on = 1,
603};
604
605static struct omap2_mcspi_device_config ads7846_mcspi_config = {
606 .turbo_mode = 0,
607 .single_channel = 1, /* 0: slave, 1: master */
608};
609
610static struct spi_board_info omap3pandora_spi_board_info[] __initdata = { 552static struct spi_board_info omap3pandora_spi_board_info[] __initdata = {
611 { 553 {
612 .modalias = "ads7846",
613 .bus_num = 1,
614 .chip_select = 0,
615 .max_speed_hz = 1500000,
616 .controller_data = &ads7846_mcspi_config,
617 .irq = OMAP_GPIO_IRQ(OMAP3_PANDORA_TS_GPIO),
618 .platform_data = &ads7846_config,
619 }, {
620 .modalias = "tpo_td043mtea1_panel_spi", 554 .modalias = "tpo_td043mtea1_panel_spi",
621 .bus_num = 1, 555 .bus_num = 1,
622 .chip_select = 1, 556 .chip_select = 1,
@@ -639,14 +573,10 @@ static void __init pandora_wl1251_init(void)
639 573
640 memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata)); 574 memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
641 575
642 ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq"); 576 ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq");
643 if (ret < 0) 577 if (ret < 0)
644 goto fail; 578 goto fail;
645 579
646 ret = gpio_direction_input(PANDORA_WIFI_IRQ_GPIO);
647 if (ret < 0)
648 goto fail_irq;
649
650 pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO); 580 pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO);
651 if (pandora_wl1251_pdata.irq < 0) 581 if (pandora_wl1251_pdata.irq < 0)
652 goto fail_irq; 582 goto fail_irq;
@@ -688,12 +618,6 @@ static struct omap_board_mux board_mux[] __initdata = {
688}; 618};
689#endif 619#endif
690 620
691static struct omap_musb_board_data musb_board_data = {
692 .interface_type = MUSB_INTERFACE_ULPI,
693 .mode = MUSB_OTG,
694 .power = 100,
695};
696
697static void __init omap3pandora_init(void) 621static void __init omap3pandora_init(void)
698{ 622{
699 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 623 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
@@ -705,9 +629,9 @@ static void __init omap3pandora_init(void)
705 omap_serial_init(); 629 omap_serial_init();
706 spi_register_board_info(omap3pandora_spi_board_info, 630 spi_register_board_info(omap3pandora_spi_board_info,
707 ARRAY_SIZE(omap3pandora_spi_board_info)); 631 ARRAY_SIZE(omap3pandora_spi_board_info));
708 omap3pandora_ads7846_init(); 632 omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL);
709 usbhs_init(&usbhs_bdata); 633 usbhs_init(&usbhs_bdata);
710 usb_musb_init(&musb_board_data); 634 usb_musb_init(NULL);
711 gpmc_nand_init(&pandora_nand_data); 635 gpmc_nand_init(&pandora_nand_data);
712 636
713 /* Ensure SDRC pins are mux'd for self-refresh */ 637 /* Ensure SDRC pins are mux'd for self-refresh */
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index a6e0b9161c99..0c108a212ea2 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -39,13 +39,12 @@
39#include <plat/gpmc.h> 39#include <plat/gpmc.h>
40#include <plat/nand.h> 40#include <plat/nand.h>
41#include <plat/usb.h> 41#include <plat/usb.h>
42#include <plat/display.h> 42#include <video/omapdss.h>
43#include <plat/panel-generic-dpi.h> 43#include <video/omap-panel-generic-dpi.h>
44 44
45#include <plat/mcspi.h> 45#include <plat/mcspi.h>
46#include <linux/input/matrix_keypad.h> 46#include <linux/input/matrix_keypad.h>
47#include <linux/spi/spi.h> 47#include <linux/spi/spi.h>
48#include <linux/spi/ads7846.h>
49#include <linux/interrupt.h> 48#include <linux/interrupt.h>
50#include <linux/smsc911x.h> 49#include <linux/smsc911x.h>
51#include <linux/i2c/at24.h> 50#include <linux/i2c/at24.h>
@@ -54,52 +53,28 @@
54#include "mux.h" 53#include "mux.h"
55#include "hsmmc.h" 54#include "hsmmc.h"
56#include "timer-gp.h" 55#include "timer-gp.h"
56#include "common-board-devices.h"
57 57
58#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 58#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
59#include <plat/gpmc-smsc911x.h>
60
59#define OMAP3STALKER_ETHR_START 0x2c000000 61#define OMAP3STALKER_ETHR_START 0x2c000000
60#define OMAP3STALKER_ETHR_SIZE 1024 62#define OMAP3STALKER_ETHR_SIZE 1024
61#define OMAP3STALKER_ETHR_GPIO_IRQ 19 63#define OMAP3STALKER_ETHR_GPIO_IRQ 19
62#define OMAP3STALKER_SMC911X_CS 5 64#define OMAP3STALKER_SMC911X_CS 5
63 65
64static struct resource omap3stalker_smsc911x_resources[] = { 66static struct omap_smsc911x_platform_data smsc911x_cfg = {
65 [0] = { 67 .cs = OMAP3STALKER_SMC911X_CS,
66 .start = OMAP3STALKER_ETHR_START, 68 .gpio_irq = OMAP3STALKER_ETHR_GPIO_IRQ,
67 .end = 69 .gpio_reset = -EINVAL,
68 (OMAP3STALKER_ETHR_START + OMAP3STALKER_ETHR_SIZE - 1),
69 .flags = IORESOURCE_MEM,
70 },
71 [1] = {
72 .start = OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ),
73 .end = OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ),
74 .flags = (IORESOURCE_IRQ | IRQF_TRIGGER_LOW),
75 },
76};
77
78static struct smsc911x_platform_config smsc911x_config = {
79 .phy_interface = PHY_INTERFACE_MODE_MII,
80 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
81 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
82 .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS), 70 .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS),
83}; 71};
84 72
85static struct platform_device omap3stalker_smsc911x_device = {
86 .name = "smsc911x",
87 .id = -1,
88 .num_resources = ARRAY_SIZE(omap3stalker_smsc911x_resources),
89 .resource = &omap3stalker_smsc911x_resources[0],
90 .dev = {
91 .platform_data = &smsc911x_config,
92 },
93};
94
95static inline void __init omap3stalker_init_eth(void) 73static inline void __init omap3stalker_init_eth(void)
96{ 74{
97 int eth_cs;
98 struct clk *l3ck; 75 struct clk *l3ck;
99 unsigned int rate; 76 unsigned int rate;
100 77
101 eth_cs = OMAP3STALKER_SMC911X_CS;
102
103 l3ck = clk_get(NULL, "l3_ck"); 78 l3ck = clk_get(NULL, "l3_ck");
104 if (IS_ERR(l3ck)) 79 if (IS_ERR(l3ck))
105 rate = 100000000; 80 rate = 100000000;
@@ -107,16 +82,7 @@ static inline void __init omap3stalker_init_eth(void)
107 rate = clk_get_rate(l3ck); 82 rate = clk_get_rate(l3ck);
108 83
109 omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP); 84 omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP);
110 if (gpio_request(OMAP3STALKER_ETHR_GPIO_IRQ, "SMC911x irq") < 0) { 85 gpmc_smsc911x_init(&smsc911x_cfg);
111 printk(KERN_ERR
112 "Failed to request GPIO%d for smc911x IRQ\n",
113 OMAP3STALKER_ETHR_GPIO_IRQ);
114 return;
115 }
116
117 gpio_direction_input(OMAP3STALKER_ETHR_GPIO_IRQ);
118
119 platform_device_register(&omap3stalker_smsc911x_device);
120} 86}
121 87
122#else 88#else
@@ -365,12 +331,11 @@ omap3stalker_twl_gpio_setup(struct device *dev,
365 */ 331 */
366 332
367 /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ 333 /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
368 gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL"); 334 gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW,
369 gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); 335 "EN_LCD_BKL");
370 336
371 /* gpio + 7 == DVI Enable */ 337 /* gpio + 7 == DVI Enable */
372 gpio_request(gpio + 7, "EN_DVI"); 338 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI");
373 gpio_direction_output(gpio + 7, 0);
374 339
375 /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */ 340 /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */
376 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; 341 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -489,15 +454,8 @@ static struct twl4030_platform_data omap3stalker_twldata = {
489 .codec = &omap3stalker_codec_data, 454 .codec = &omap3stalker_codec_data,
490 .vdac = &omap3_stalker_vdac, 455 .vdac = &omap3_stalker_vdac,
491 .vpll2 = &omap3_stalker_vpll2, 456 .vpll2 = &omap3_stalker_vpll2,
492}; 457 .vmmc1 = &omap3stalker_vmmc1,
493 458 .vsim = &omap3stalker_vsim,
494static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo[] = {
495 {
496 I2C_BOARD_INFO("twl4030", 0x48),
497 .flags = I2C_CLIENT_WAKE,
498 .irq = INT_34XX_SYS_NIRQ,
499 .platform_data = &omap3stalker_twldata,
500 },
501}; 459};
502 460
503static struct at24_platform_data fram_info = { 461static struct at24_platform_data fram_info = {
@@ -516,15 +474,7 @@ static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = {
516 474
517static int __init omap3_stalker_i2c_init(void) 475static int __init omap3_stalker_i2c_init(void)
518{ 476{
519 /* 477 omap3_pmic_init("twl4030", &omap3stalker_twldata);
520 * REVISIT: These entries can be set in omap3evm_twl_data
521 * after a merge with MFD tree
522 */
523 omap3stalker_twldata.vmmc1 = &omap3stalker_vmmc1;
524 omap3stalker_twldata.vsim = &omap3stalker_vsim;
525
526 omap_register_i2c_bus(1, 2600, omap3stalker_i2c_boardinfo,
527 ARRAY_SIZE(omap3stalker_i2c_boardinfo));
528 omap_register_i2c_bus(2, 400, NULL, 0); 478 omap_register_i2c_bus(2, 400, NULL, 0);
529 omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3, 479 omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3,
530 ARRAY_SIZE(omap3stalker_i2c_boardinfo3)); 480 ARRAY_SIZE(omap3stalker_i2c_boardinfo3));
@@ -532,49 +482,6 @@ static int __init omap3_stalker_i2c_init(void)
532} 482}
533 483
534#define OMAP3_STALKER_TS_GPIO 175 484#define OMAP3_STALKER_TS_GPIO 175
535static void ads7846_dev_init(void)
536{
537 if (gpio_request(OMAP3_STALKER_TS_GPIO, "ADS7846 pendown") < 0)
538 printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
539
540 gpio_direction_input(OMAP3_STALKER_TS_GPIO);
541 gpio_set_debounce(OMAP3_STALKER_TS_GPIO, 310);
542}
543
544static int ads7846_get_pendown_state(void)
545{
546 return !gpio_get_value(OMAP3_STALKER_TS_GPIO);
547}
548
549static struct ads7846_platform_data ads7846_config = {
550 .x_max = 0x0fff,
551 .y_max = 0x0fff,
552 .x_plate_ohms = 180,
553 .pressure_max = 255,
554 .debounce_max = 10,
555 .debounce_tol = 3,
556 .debounce_rep = 1,
557 .get_pendown_state = ads7846_get_pendown_state,
558 .keep_vref_on = 1,
559 .settle_delay_usecs = 150,
560};
561
562static struct omap2_mcspi_device_config ads7846_mcspi_config = {
563 .turbo_mode = 0,
564 .single_channel = 1, /* 0: slave, 1: master */
565};
566
567static struct spi_board_info omap3stalker_spi_board_info[] = {
568 [0] = {
569 .modalias = "ads7846",
570 .bus_num = 1,
571 .chip_select = 0,
572 .max_speed_hz = 1500000,
573 .controller_data = &ads7846_mcspi_config,
574 .irq = OMAP_GPIO_IRQ(OMAP3_STALKER_TS_GPIO),
575 .platform_data = &ads7846_config,
576 },
577};
578 485
579static struct omap_board_config_kernel omap3_stalker_config[] __initdata = { 486static struct omap_board_config_kernel omap3_stalker_config[] __initdata = {
580}; 487};
@@ -618,12 +525,6 @@ static struct omap_board_mux board_mux[] __initdata = {
618}; 525};
619#endif 526#endif
620 527
621static struct omap_musb_board_data musb_board_data = {
622 .interface_type = MUSB_INTERFACE_ULPI,
623 .mode = MUSB_OTG,
624 .power = 100,
625};
626
627static void __init omap3_stalker_init(void) 528static void __init omap3_stalker_init(void)
628{ 529{
629 omap3_mux_init(board_mux, OMAP_PACKAGE_CUS); 530 omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
@@ -636,13 +537,11 @@ static void __init omap3_stalker_init(void)
636 ARRAY_SIZE(omap3_stalker_devices)); 537 ARRAY_SIZE(omap3_stalker_devices));
637 538
638 omap_display_init(&omap3_stalker_dss_data); 539 omap_display_init(&omap3_stalker_dss_data);
639 spi_register_board_info(omap3stalker_spi_board_info,
640 ARRAY_SIZE(omap3stalker_spi_board_info));
641 540
642 omap_serial_init(); 541 omap_serial_init();
643 usb_musb_init(&musb_board_data); 542 usb_musb_init(NULL);
644 usbhs_init(&usbhs_bdata); 543 usbhs_init(&usbhs_bdata);
645 ads7846_dev_init(); 544 omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL);
646 545
647 omap_mux_init_gpio(21, OMAP_PIN_OUTPUT); 546 omap_mux_init_gpio(21, OMAP_PIN_OUTPUT);
648 omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP); 547 omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP);
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 127cb1752bdd..82872d7d313b 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -52,6 +52,7 @@
52#include "mux.h" 52#include "mux.h"
53#include "hsmmc.h" 53#include "hsmmc.h"
54#include "timer-gp.h" 54#include "timer-gp.h"
55#include "common-board-devices.h"
55 56
56#include <asm/setup.h> 57#include <asm/setup.h>
57 58
@@ -95,15 +96,6 @@ static struct mtd_partition omap3touchbook_nand_partitions[] = {
95 }, 96 },
96}; 97};
97 98
98static struct omap_nand_platform_data omap3touchbook_nand_data = {
99 .options = NAND_BUSWIDTH_16,
100 .parts = omap3touchbook_nand_partitions,
101 .nr_parts = ARRAY_SIZE(omap3touchbook_nand_partitions),
102 .dma_channel = -1, /* disable DMA in OMAP NAND driver */
103 .nand_setup = NULL,
104 .dev_ready = NULL,
105};
106
107#include "sdram-micron-mt46h32m32lf-6.h" 99#include "sdram-micron-mt46h32m32lf-6.h"
108 100
109static struct omap2_hsmmc_info mmc[] = { 101static struct omap2_hsmmc_info mmc[] = {
@@ -154,13 +146,11 @@ static int touchbook_twl_gpio_setup(struct device *dev,
154 /* REVISIT: need ehci-omap hooks for external VBUS 146 /* REVISIT: need ehci-omap hooks for external VBUS
155 * power switch and overcurrent detect 147 * power switch and overcurrent detect
156 */ 148 */
157 149 gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC");
158 gpio_request(gpio + 1, "EHCI_nOC");
159 gpio_direction_input(gpio + 1);
160 150
161 /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */ 151 /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
162 gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR"); 152 gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW,
163 gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); 153 "nEN_USB_PWR");
164 154
165 /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ 155 /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
166 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; 156 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -273,15 +263,6 @@ static struct twl4030_platform_data touchbook_twldata = {
273 .vpll2 = &touchbook_vpll2, 263 .vpll2 = &touchbook_vpll2,
274}; 264};
275 265
276static struct i2c_board_info __initdata touchbook_i2c_boardinfo[] = {
277 {
278 I2C_BOARD_INFO("twl4030", 0x48),
279 .flags = I2C_CLIENT_WAKE,
280 .irq = INT_34XX_SYS_NIRQ,
281 .platform_data = &touchbook_twldata,
282 },
283};
284
285static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = { 266static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
286 { 267 {
287 I2C_BOARD_INFO("bq27200", 0x55), 268 I2C_BOARD_INFO("bq27200", 0x55),
@@ -291,8 +272,7 @@ static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
291static int __init omap3_touchbook_i2c_init(void) 272static int __init omap3_touchbook_i2c_init(void)
292{ 273{
293 /* Standard TouchBook bus */ 274 /* Standard TouchBook bus */
294 omap_register_i2c_bus(1, 2600, touchbook_i2c_boardinfo, 275 omap3_pmic_init("twl4030", &touchbook_twldata);
295 ARRAY_SIZE(touchbook_i2c_boardinfo));
296 276
297 /* Additional TouchBook bus */ 277 /* Additional TouchBook bus */
298 omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo, 278 omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo,
@@ -301,19 +281,7 @@ static int __init omap3_touchbook_i2c_init(void)
301 return 0; 281 return 0;
302} 282}
303 283
304static void __init omap3_ads7846_init(void) 284static struct ads7846_platform_data ads7846_pdata = {
305{
306 if (gpio_request(OMAP3_TS_GPIO, "ads7846_pen_down")) {
307 printk(KERN_ERR "Failed to request GPIO %d for "
308 "ads7846 pen down IRQ\n", OMAP3_TS_GPIO);
309 return;
310 }
311
312 gpio_direction_input(OMAP3_TS_GPIO);
313 gpio_set_debounce(OMAP3_TS_GPIO, 310);
314}
315
316static struct ads7846_platform_data ads7846_config = {
317 .x_min = 100, 285 .x_min = 100,
318 .y_min = 265, 286 .y_min = 265,
319 .x_max = 3950, 287 .x_max = 3950,
@@ -327,23 +295,6 @@ static struct ads7846_platform_data ads7846_config = {
327 .keep_vref_on = 1, 295 .keep_vref_on = 1,
328}; 296};
329 297
330static struct omap2_mcspi_device_config ads7846_mcspi_config = {
331 .turbo_mode = 0,
332 .single_channel = 1, /* 0: slave, 1: master */
333};
334
335static struct spi_board_info omap3_ads7846_spi_board_info[] __initdata = {
336 {
337 .modalias = "ads7846",
338 .bus_num = 4,
339 .chip_select = 0,
340 .max_speed_hz = 1500000,
341 .controller_data = &ads7846_mcspi_config,
342 .irq = OMAP_GPIO_IRQ(OMAP3_TS_GPIO),
343 .platform_data = &ads7846_config,
344 }
345};
346
347static struct gpio_led gpio_leds[] = { 298static struct gpio_led gpio_leds[] = {
348 { 299 {
349 .name = "touchbook::usr0", 300 .name = "touchbook::usr0",
@@ -434,39 +385,6 @@ static struct platform_device *omap3_touchbook_devices[] __initdata = {
434 &keys_gpio, 385 &keys_gpio,
435}; 386};
436 387
437static void __init omap3touchbook_flash_init(void)
438{
439 u8 cs = 0;
440 u8 nandcs = GPMC_CS_NUM + 1;
441
442 /* find out the chip-select on which NAND exists */
443 while (cs < GPMC_CS_NUM) {
444 u32 ret = 0;
445 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
446
447 if ((ret & 0xC00) == 0x800) {
448 printk(KERN_INFO "Found NAND on CS%d\n", cs);
449 if (nandcs > GPMC_CS_NUM)
450 nandcs = cs;
451 }
452 cs++;
453 }
454
455 if (nandcs > GPMC_CS_NUM) {
456 printk(KERN_INFO "NAND: Unable to find configuration "
457 "in GPMC\n ");
458 return;
459 }
460
461 if (nandcs < GPMC_CS_NUM) {
462 omap3touchbook_nand_data.cs = nandcs;
463
464 printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
465 if (gpmc_nand_init(&omap3touchbook_nand_data) < 0)
466 printk(KERN_ERR "Unable to register NAND device\n");
467 }
468}
469
470static const struct usbhs_omap_board_data usbhs_bdata __initconst = { 388static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
471 389
472 .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, 390 .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
@@ -481,15 +399,10 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
481 399
482static void omap3_touchbook_poweroff(void) 400static void omap3_touchbook_poweroff(void)
483{ 401{
484 int r; 402 int pwr_off = TB_KILL_POWER_GPIO;
485 403
486 r = gpio_request(TB_KILL_POWER_GPIO, "DVI reset"); 404 if (gpio_request_one(pwr_off, GPIOF_OUT_INIT_LOW, "DVI reset") < 0)
487 if (r < 0) {
488 printk(KERN_ERR "Unable to get kill power GPIO\n"); 405 printk(KERN_ERR "Unable to get kill power GPIO\n");
489 return;
490 }
491
492 gpio_direction_output(TB_KILL_POWER_GPIO, 0);
493} 406}
494 407
495static int __init early_touchbook_revision(char *p) 408static int __init early_touchbook_revision(char *p)
@@ -501,12 +414,6 @@ static int __init early_touchbook_revision(char *p)
501} 414}
502early_param("tbr", early_touchbook_revision); 415early_param("tbr", early_touchbook_revision);
503 416
504static struct omap_musb_board_data musb_board_data = {
505 .interface_type = MUSB_INTERFACE_ULPI,
506 .mode = MUSB_OTG,
507 .power = 100,
508};
509
510static void __init omap3_touchbook_init(void) 417static void __init omap3_touchbook_init(void)
511{ 418{
512 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 419 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
@@ -521,17 +428,15 @@ static void __init omap3_touchbook_init(void)
521 omap_serial_init(); 428 omap_serial_init();
522 429
523 omap_mux_init_gpio(170, OMAP_PIN_INPUT); 430 omap_mux_init_gpio(170, OMAP_PIN_INPUT);
524 gpio_request(176, "DVI_nPD");
525 /* REVISIT leave DVI powered down until it's needed ... */ 431 /* REVISIT leave DVI powered down until it's needed ... */
526 gpio_direction_output(176, true); 432 gpio_request_one(176, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
527 433
528 /* Touchscreen and accelerometer */ 434 /* Touchscreen and accelerometer */
529 spi_register_board_info(omap3_ads7846_spi_board_info, 435 omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata);
530 ARRAY_SIZE(omap3_ads7846_spi_board_info)); 436 usb_musb_init(NULL);
531 omap3_ads7846_init();
532 usb_musb_init(&musb_board_data);
533 usbhs_init(&usbhs_bdata); 437 usbhs_init(&usbhs_bdata);
534 omap3touchbook_flash_init(); 438 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3touchbook_nand_partitions,
439 ARRAY_SIZE(omap3touchbook_nand_partitions));
535 440
536 /* Ensure SDRC pins are mux'd for self-refresh */ 441 /* Ensure SDRC pins are mux'd for self-refresh */
537 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); 442 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index f3a7b1011914..90485fced973 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -34,18 +34,19 @@
34#include <asm/mach-types.h> 34#include <asm/mach-types.h>
35#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
36#include <asm/mach/map.h> 36#include <asm/mach/map.h>
37#include <plat/display.h> 37#include <video/omapdss.h>
38 38
39#include <plat/board.h> 39#include <plat/board.h>
40#include <plat/common.h> 40#include <plat/common.h>
41#include <plat/usb.h> 41#include <plat/usb.h>
42#include <plat/mmc.h> 42#include <plat/mmc.h>
43#include <plat/panel-generic-dpi.h> 43#include <video/omap-panel-generic-dpi.h>
44#include "timer-gp.h" 44#include "timer-gp.h"
45 45
46#include "hsmmc.h" 46#include "hsmmc.h"
47#include "control.h" 47#include "control.h"
48#include "mux.h" 48#include "mux.h"
49#include "common-board-devices.h"
49 50
50#define GPIO_HUB_POWER 1 51#define GPIO_HUB_POWER 1
51#define GPIO_HUB_NRESET 62 52#define GPIO_HUB_NRESET 62
@@ -111,6 +112,11 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
111 .reset_gpio_port[2] = -EINVAL 112 .reset_gpio_port[2] = -EINVAL
112}; 113};
113 114
115static struct gpio panda_ehci_gpios[] __initdata = {
116 { GPIO_HUB_POWER, GPIOF_OUT_INIT_LOW, "hub_power" },
117 { GPIO_HUB_NRESET, GPIOF_OUT_INIT_LOW, "hub_nreset" },
118};
119
114static void __init omap4_ehci_init(void) 120static void __init omap4_ehci_init(void)
115{ 121{
116 int ret; 122 int ret;
@@ -120,44 +126,27 @@ static void __init omap4_ehci_init(void)
120 phy_ref_clk = clk_get(NULL, "auxclk3_ck"); 126 phy_ref_clk = clk_get(NULL, "auxclk3_ck");
121 if (IS_ERR(phy_ref_clk)) { 127 if (IS_ERR(phy_ref_clk)) {
122 pr_err("Cannot request auxclk3\n"); 128 pr_err("Cannot request auxclk3\n");
123 goto error1; 129 return;
124 } 130 }
125 clk_set_rate(phy_ref_clk, 19200000); 131 clk_set_rate(phy_ref_clk, 19200000);
126 clk_enable(phy_ref_clk); 132 clk_enable(phy_ref_clk);
127 133
128 /* disable the power to the usb hub prior to init */ 134 /* disable the power to the usb hub prior to init and reset phy+hub */
129 ret = gpio_request(GPIO_HUB_POWER, "hub_power"); 135 ret = gpio_request_array(panda_ehci_gpios,
136 ARRAY_SIZE(panda_ehci_gpios));
130 if (ret) { 137 if (ret) {
131 pr_err("Cannot request GPIO %d\n", GPIO_HUB_POWER); 138 pr_err("Unable to initialize EHCI power/reset\n");
132 goto error1; 139 return;
133 } 140 }
134 gpio_export(GPIO_HUB_POWER, 0);
135 gpio_direction_output(GPIO_HUB_POWER, 0);
136 gpio_set_value(GPIO_HUB_POWER, 0);
137 141
138 /* reset phy+hub */ 142 gpio_export(GPIO_HUB_POWER, 0);
139 ret = gpio_request(GPIO_HUB_NRESET, "hub_nreset");
140 if (ret) {
141 pr_err("Cannot request GPIO %d\n", GPIO_HUB_NRESET);
142 goto error2;
143 }
144 gpio_export(GPIO_HUB_NRESET, 0); 143 gpio_export(GPIO_HUB_NRESET, 0);
145 gpio_direction_output(GPIO_HUB_NRESET, 0);
146 gpio_set_value(GPIO_HUB_NRESET, 0);
147 gpio_set_value(GPIO_HUB_NRESET, 1); 144 gpio_set_value(GPIO_HUB_NRESET, 1);
148 145
149 usbhs_init(&usbhs_bdata); 146 usbhs_init(&usbhs_bdata);
150 147
151 /* enable power to hub */ 148 /* enable power to hub */
152 gpio_set_value(GPIO_HUB_POWER, 1); 149 gpio_set_value(GPIO_HUB_POWER, 1);
153 return;
154
155error2:
156 gpio_free(GPIO_HUB_POWER);
157error1:
158 pr_err("Unable to initialize EHCI power/reset\n");
159 return;
160
161} 150}
162 151
163static struct omap_musb_board_data musb_board_data = { 152static struct omap_musb_board_data musb_board_data = {
@@ -408,15 +397,6 @@ static struct twl4030_platform_data omap4_panda_twldata = {
408 .usb = &omap4_usbphy_data, 397 .usb = &omap4_usbphy_data,
409}; 398};
410 399
411static struct i2c_board_info __initdata omap4_panda_i2c_boardinfo[] = {
412 {
413 I2C_BOARD_INFO("twl6030", 0x48),
414 .flags = I2C_CLIENT_WAKE,
415 .irq = OMAP44XX_IRQ_SYS_1N,
416 .platform_data = &omap4_panda_twldata,
417 },
418};
419
420/* 400/*
421 * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM 401 * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM
422 * is connected as I2C slave device, and can be accessed at address 0x50 402 * is connected as I2C slave device, and can be accessed at address 0x50
@@ -429,12 +409,7 @@ static struct i2c_board_info __initdata panda_i2c_eeprom[] = {
429 409
430static int __init omap4_panda_i2c_init(void) 410static int __init omap4_panda_i2c_init(void)
431{ 411{
432 /* 412 omap4_pmic_init("twl6030", &omap4_panda_twldata);
433 * Phoenix Audio IC needs I2C1 to
434 * start with 400 KHz or less
435 */
436 omap_register_i2c_bus(1, 400, omap4_panda_i2c_boardinfo,
437 ARRAY_SIZE(omap4_panda_i2c_boardinfo));
438 omap_register_i2c_bus(2, 400, NULL, 0); 413 omap_register_i2c_bus(2, 400, NULL, 0);
439 /* 414 /*
440 * Bus 3 is attached to the DVI port where devices like the pico DLP 415 * Bus 3 is attached to the DVI port where devices like the pico DLP
@@ -651,27 +626,19 @@ static void omap4_panda_hdmi_mux_init(void)
651 OMAP_PIN_INPUT_PULLUP); 626 OMAP_PIN_INPUT_PULLUP);
652} 627}
653 628
629static struct gpio panda_hdmi_gpios[] = {
630 { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" },
631 { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
632};
633
654static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) 634static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
655{ 635{
656 int status; 636 int status;
657 637
658 status = gpio_request_one(HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, 638 status = gpio_request_array(panda_hdmi_gpios,
659 "hdmi_gpio_hpd"); 639 ARRAY_SIZE(panda_hdmi_gpios));
660 if (status) { 640 if (status)
661 pr_err("Cannot request GPIO %d\n", HDMI_GPIO_HPD); 641 pr_err("Cannot request HDMI GPIOs\n");
662 return status;
663 }
664 status = gpio_request_one(HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH,
665 "hdmi_gpio_ls_oe");
666 if (status) {
667 pr_err("Cannot request GPIO %d\n", HDMI_GPIO_LS_OE);
668 goto error1;
669 }
670
671 return 0;
672
673error1:
674 gpio_free(HDMI_GPIO_HPD);
675 642
676 return status; 643 return status;
677} 644}
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 59ca33326b8c..1555918e3ffa 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -43,8 +43,8 @@
43 43
44#include <plat/board.h> 44#include <plat/board.h>
45#include <plat/common.h> 45#include <plat/common.h>
46#include <plat/display.h> 46#include <video/omapdss.h>
47#include <plat/panel-generic-dpi.h> 47#include <video/omap-panel-generic-dpi.h>
48#include <mach/gpio.h> 48#include <mach/gpio.h>
49#include <plat/gpmc.h> 49#include <plat/gpmc.h>
50#include <mach/hardware.h> 50#include <mach/hardware.h>
@@ -56,6 +56,7 @@
56#include "mux.h" 56#include "mux.h"
57#include "sdram-micron-mt46h32m32lf-6.h" 57#include "sdram-micron-mt46h32m32lf-6.h"
58#include "hsmmc.h" 58#include "hsmmc.h"
59#include "common-board-devices.h"
59 60
60#define OVERO_GPIO_BT_XGATE 15 61#define OVERO_GPIO_BT_XGATE 15
61#define OVERO_GPIO_W2W_NRESET 16 62#define OVERO_GPIO_W2W_NRESET 16
@@ -74,30 +75,6 @@
74#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ 75#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
75 defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) 76 defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
76 77
77#include <linux/spi/ads7846.h>
78
79static struct omap2_mcspi_device_config ads7846_mcspi_config = {
80 .turbo_mode = 0,
81 .single_channel = 1, /* 0: slave, 1: master */
82};
83
84static int ads7846_get_pendown_state(void)
85{
86 return !gpio_get_value(OVERO_GPIO_PENDOWN);
87}
88
89static struct ads7846_platform_data ads7846_config = {
90 .x_max = 0x0fff,
91 .y_max = 0x0fff,
92 .x_plate_ohms = 180,
93 .pressure_max = 255,
94 .debounce_max = 10,
95 .debounce_tol = 3,
96 .debounce_rep = 1,
97 .get_pendown_state = ads7846_get_pendown_state,
98 .keep_vref_on = 1,
99};
100
101/* fixed regulator for ads7846 */ 78/* fixed regulator for ads7846 */
102static struct regulator_consumer_supply ads7846_supply = 79static struct regulator_consumer_supply ads7846_supply =
103 REGULATOR_SUPPLY("vcc", "spi1.0"); 80 REGULATOR_SUPPLY("vcc", "spi1.0");
@@ -128,14 +105,7 @@ static struct platform_device vads7846_device = {
128 105
129static void __init overo_ads7846_init(void) 106static void __init overo_ads7846_init(void)
130{ 107{
131 if ((gpio_request(OVERO_GPIO_PENDOWN, "ADS7846_PENDOWN") == 0) && 108 omap_ads7846_init(1, OVERO_GPIO_PENDOWN, 0, NULL);
132 (gpio_direction_input(OVERO_GPIO_PENDOWN) == 0)) {
133 gpio_export(OVERO_GPIO_PENDOWN, 0);
134 } else {
135 printk(KERN_ERR "could not obtain gpio for ADS7846_PENDOWN\n");
136 return;
137 }
138
139 platform_device_register(&vads7846_device); 109 platform_device_register(&vads7846_device);
140} 110}
141 111
@@ -146,106 +116,28 @@ static inline void __init overo_ads7846_init(void) { return; }
146#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 116#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
147 117
148#include <linux/smsc911x.h> 118#include <linux/smsc911x.h>
119#include <plat/gpmc-smsc911x.h>
149 120
150static struct resource overo_smsc911x_resources[] = { 121static struct omap_smsc911x_platform_data smsc911x_cfg = {
151 {
152 .name = "smsc911x-memory",
153 .flags = IORESOURCE_MEM,
154 },
155 {
156 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
157 },
158};
159
160static struct resource overo_smsc911x2_resources[] = {
161 {
162 .name = "smsc911x2-memory",
163 .flags = IORESOURCE_MEM,
164 },
165 {
166 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
167 },
168};
169
170static struct smsc911x_platform_config overo_smsc911x_config = {
171 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
172 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
173 .flags = SMSC911X_USE_32BIT ,
174 .phy_interface = PHY_INTERFACE_MODE_MII,
175};
176
177static struct platform_device overo_smsc911x_device = {
178 .name = "smsc911x",
179 .id = 0, 122 .id = 0,
180 .num_resources = ARRAY_SIZE(overo_smsc911x_resources), 123 .cs = OVERO_SMSC911X_CS,
181 .resource = overo_smsc911x_resources, 124 .gpio_irq = OVERO_SMSC911X_GPIO,
182 .dev = { 125 .gpio_reset = -EINVAL,
183 .platform_data = &overo_smsc911x_config, 126 .flags = SMSC911X_USE_32BIT,
184 },
185}; 127};
186 128
187static struct platform_device overo_smsc911x2_device = { 129static struct omap_smsc911x_platform_data smsc911x2_cfg = {
188 .name = "smsc911x",
189 .id = 1, 130 .id = 1,
190 .num_resources = ARRAY_SIZE(overo_smsc911x2_resources), 131 .cs = OVERO_SMSC911X2_CS,
191 .resource = overo_smsc911x2_resources, 132 .gpio_irq = OVERO_SMSC911X2_GPIO,
192 .dev = { 133 .gpio_reset = -EINVAL,
193 .platform_data = &overo_smsc911x_config, 134 .flags = SMSC911X_USE_32BIT,
194 },
195}; 135};
196 136
197static struct platform_device *smsc911x_devices[] = { 137static void __init overo_init_smsc911x(void)
198 &overo_smsc911x_device,
199 &overo_smsc911x2_device,
200};
201
202static inline void __init overo_init_smsc911x(void)
203{ 138{
204 unsigned long cs_mem_base, cs_mem_base2; 139 gpmc_smsc911x_init(&smsc911x_cfg);
205 140 gpmc_smsc911x_init(&smsc911x2_cfg);
206 /* set up first smsc911x chip */
207
208 if (gpmc_cs_request(OVERO_SMSC911X_CS, SZ_16M, &cs_mem_base) < 0) {
209 printk(KERN_ERR "Failed request for GPMC mem for smsc911x\n");
210 return;
211 }
212
213 overo_smsc911x_resources[0].start = cs_mem_base + 0x0;
214 overo_smsc911x_resources[0].end = cs_mem_base + 0xff;
215
216 if ((gpio_request(OVERO_SMSC911X_GPIO, "SMSC911X IRQ") == 0) &&
217 (gpio_direction_input(OVERO_SMSC911X_GPIO) == 0)) {
218 gpio_export(OVERO_SMSC911X_GPIO, 0);
219 } else {
220 printk(KERN_ERR "could not obtain gpio for SMSC911X IRQ\n");
221 return;
222 }
223
224 overo_smsc911x_resources[1].start = OMAP_GPIO_IRQ(OVERO_SMSC911X_GPIO);
225 overo_smsc911x_resources[1].end = 0;
226
227 /* set up second smsc911x chip */
228
229 if (gpmc_cs_request(OVERO_SMSC911X2_CS, SZ_16M, &cs_mem_base2) < 0) {
230 printk(KERN_ERR "Failed request for GPMC mem for smsc911x2\n");
231 return;
232 }
233
234 overo_smsc911x2_resources[0].start = cs_mem_base2 + 0x0;
235 overo_smsc911x2_resources[0].end = cs_mem_base2 + 0xff;
236
237 if ((gpio_request(OVERO_SMSC911X2_GPIO, "SMSC911X2 IRQ") == 0) &&
238 (gpio_direction_input(OVERO_SMSC911X2_GPIO) == 0)) {
239 gpio_export(OVERO_SMSC911X2_GPIO, 0);
240 } else {
241 printk(KERN_ERR "could not obtain gpio for SMSC911X2 IRQ\n");
242 return;
243 }
244
245 overo_smsc911x2_resources[1].start = OMAP_GPIO_IRQ(OVERO_SMSC911X2_GPIO);
246 overo_smsc911x2_resources[1].end = 0;
247
248 platform_add_devices(smsc911x_devices, ARRAY_SIZE(smsc911x_devices));
249} 141}
250 142
251#else 143#else
@@ -259,21 +151,20 @@ static int dvi_enabled;
259#define OVERO_GPIO_LCD_EN 144 151#define OVERO_GPIO_LCD_EN 144
260#define OVERO_GPIO_LCD_BL 145 152#define OVERO_GPIO_LCD_BL 145
261 153
154static struct gpio overo_dss_gpios[] __initdata = {
155 { OVERO_GPIO_LCD_EN, GPIOF_OUT_INIT_HIGH, "OVERO_GPIO_LCD_EN" },
156 { OVERO_GPIO_LCD_BL, GPIOF_OUT_INIT_HIGH, "OVERO_GPIO_LCD_BL" },
157};
158
262static void __init overo_display_init(void) 159static void __init overo_display_init(void)
263{ 160{
264 if ((gpio_request(OVERO_GPIO_LCD_EN, "OVERO_GPIO_LCD_EN") == 0) && 161 if (gpio_request_array(overo_dss_gpios, ARRAY_SIZE(overo_dss_gpios))) {
265 (gpio_direction_output(OVERO_GPIO_LCD_EN, 1) == 0)) 162 printk(KERN_ERR "could not obtain DSS control GPIOs\n");
266 gpio_export(OVERO_GPIO_LCD_EN, 0); 163 return;
267 else 164 }
268 printk(KERN_ERR "could not obtain gpio for "
269 "OVERO_GPIO_LCD_EN\n");
270 165
271 if ((gpio_request(OVERO_GPIO_LCD_BL, "OVERO_GPIO_LCD_BL") == 0) && 166 gpio_export(OVERO_GPIO_LCD_EN, 0);
272 (gpio_direction_output(OVERO_GPIO_LCD_BL, 1) == 0)) 167 gpio_export(OVERO_GPIO_LCD_BL, 0);
273 gpio_export(OVERO_GPIO_LCD_BL, 0);
274 else
275 printk(KERN_ERR "could not obtain gpio for "
276 "OVERO_GPIO_LCD_BL\n");
277} 168}
278 169
279static int overo_panel_enable_dvi(struct omap_dss_device *dssdev) 170static int overo_panel_enable_dvi(struct omap_dss_device *dssdev)
@@ -412,45 +303,6 @@ static struct mtd_partition overo_nand_partitions[] = {
412 }, 303 },
413}; 304};
414 305
415static struct omap_nand_platform_data overo_nand_data = {
416 .parts = overo_nand_partitions,
417 .nr_parts = ARRAY_SIZE(overo_nand_partitions),
418 .dma_channel = -1, /* disable DMA in OMAP NAND driver */
419};
420
421static void __init overo_flash_init(void)
422{
423 u8 cs = 0;
424 u8 nandcs = GPMC_CS_NUM + 1;
425
426 /* find out the chip-select on which NAND exists */
427 while (cs < GPMC_CS_NUM) {
428 u32 ret = 0;
429 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
430
431 if ((ret & 0xC00) == 0x800) {
432 printk(KERN_INFO "Found NAND on CS%d\n", cs);
433 if (nandcs > GPMC_CS_NUM)
434 nandcs = cs;
435 }
436 cs++;
437 }
438
439 if (nandcs > GPMC_CS_NUM) {
440 printk(KERN_INFO "NAND: Unable to find configuration "
441 "in GPMC\n ");
442 return;
443 }
444
445 if (nandcs < GPMC_CS_NUM) {
446 overo_nand_data.cs = nandcs;
447
448 printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
449 if (gpmc_nand_init(&overo_nand_data) < 0)
450 printk(KERN_ERR "Unable to register NAND device\n");
451 }
452}
453
454static struct omap2_hsmmc_info mmc[] = { 306static struct omap2_hsmmc_info mmc[] = {
455 { 307 {
456 .mmc = 1, 308 .mmc = 1,
@@ -648,37 +500,15 @@ static struct twl4030_platform_data overo_twldata = {
648 .vpll2 = &overo_vpll2, 500 .vpll2 = &overo_vpll2,
649}; 501};
650 502
651static struct i2c_board_info __initdata overo_i2c_boardinfo[] = {
652 {
653 I2C_BOARD_INFO("tps65950", 0x48),
654 .flags = I2C_CLIENT_WAKE,
655 .irq = INT_34XX_SYS_NIRQ,
656 .platform_data = &overo_twldata,
657 },
658};
659
660static int __init overo_i2c_init(void) 503static int __init overo_i2c_init(void)
661{ 504{
662 omap_register_i2c_bus(1, 2600, overo_i2c_boardinfo, 505 omap3_pmic_init("tps65950", &overo_twldata);
663 ARRAY_SIZE(overo_i2c_boardinfo));
664 /* i2c2 pins are used for gpio */ 506 /* i2c2 pins are used for gpio */
665 omap_register_i2c_bus(3, 400, NULL, 0); 507 omap_register_i2c_bus(3, 400, NULL, 0);
666 return 0; 508 return 0;
667} 509}
668 510
669static struct spi_board_info overo_spi_board_info[] __initdata = { 511static struct spi_board_info overo_spi_board_info[] __initdata = {
670#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
671 defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
672 {
673 .modalias = "ads7846",
674 .bus_num = 1,
675 .chip_select = 0,
676 .max_speed_hz = 1500000,
677 .controller_data = &ads7846_mcspi_config,
678 .irq = OMAP_GPIO_IRQ(OVERO_GPIO_PENDOWN),
679 .platform_data = &ads7846_config,
680 },
681#endif
682#if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \ 512#if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \
683 defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE) 513 defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE)
684 { 514 {
@@ -722,20 +552,22 @@ static struct omap_board_mux board_mux[] __initdata = {
722}; 552};
723#endif 553#endif
724 554
725static struct omap_musb_board_data musb_board_data = { 555static struct gpio overo_bt_gpios[] __initdata = {
726 .interface_type = MUSB_INTERFACE_ULPI, 556 { OVERO_GPIO_BT_XGATE, GPIOF_OUT_INIT_LOW, "lcd enable" },
727 .mode = MUSB_OTG, 557 { OVERO_GPIO_BT_NRESET, GPIOF_OUT_INIT_HIGH, "lcd bl enable" },
728 .power = 100,
729}; 558};
730 559
731static void __init overo_init(void) 560static void __init overo_init(void)
732{ 561{
562 int ret;
563
733 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 564 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
734 overo_i2c_init(); 565 overo_i2c_init();
735 omap_display_init(&overo_dss_data); 566 omap_display_init(&overo_dss_data);
736 omap_serial_init(); 567 omap_serial_init();
737 overo_flash_init(); 568 omap_nand_flash_init(0, overo_nand_partitions,
738 usb_musb_init(&musb_board_data); 569 ARRAY_SIZE(overo_nand_partitions));
570 usb_musb_init(NULL);
739 usbhs_init(&usbhs_bdata); 571 usbhs_init(&usbhs_bdata);
740 overo_spi_init(); 572 overo_spi_init();
741 overo_ads7846_init(); 573 overo_ads7846_init();
@@ -748,9 +580,9 @@ static void __init overo_init(void)
748 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); 580 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
749 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); 581 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
750 582
751 if ((gpio_request(OVERO_GPIO_W2W_NRESET, 583 ret = gpio_request_one(OVERO_GPIO_W2W_NRESET, GPIOF_OUT_INIT_HIGH,
752 "OVERO_GPIO_W2W_NRESET") == 0) && 584 "OVERO_GPIO_W2W_NRESET");
753 (gpio_direction_output(OVERO_GPIO_W2W_NRESET, 1) == 0)) { 585 if (ret == 0) {
754 gpio_export(OVERO_GPIO_W2W_NRESET, 0); 586 gpio_export(OVERO_GPIO_W2W_NRESET, 0);
755 gpio_set_value(OVERO_GPIO_W2W_NRESET, 0); 587 gpio_set_value(OVERO_GPIO_W2W_NRESET, 0);
756 udelay(10); 588 udelay(10);
@@ -760,25 +592,20 @@ static void __init overo_init(void)
760 "OVERO_GPIO_W2W_NRESET\n"); 592 "OVERO_GPIO_W2W_NRESET\n");
761 } 593 }
762 594
763 if ((gpio_request(OVERO_GPIO_BT_XGATE, "OVERO_GPIO_BT_XGATE") == 0) && 595 ret = gpio_request_array(overo_bt_gpios, ARRAY_SIZE(overo_bt_gpios));
764 (gpio_direction_output(OVERO_GPIO_BT_XGATE, 0) == 0)) 596 if (ret) {
597 pr_err("%s: could not obtain BT gpios\n", __func__);
598 } else {
765 gpio_export(OVERO_GPIO_BT_XGATE, 0); 599 gpio_export(OVERO_GPIO_BT_XGATE, 0);
766 else
767 printk(KERN_ERR "could not obtain gpio for OVERO_GPIO_BT_XGATE\n");
768
769 if ((gpio_request(OVERO_GPIO_BT_NRESET, "OVERO_GPIO_BT_NRESET") == 0) &&
770 (gpio_direction_output(OVERO_GPIO_BT_NRESET, 1) == 0)) {
771 gpio_export(OVERO_GPIO_BT_NRESET, 0); 600 gpio_export(OVERO_GPIO_BT_NRESET, 0);
772 gpio_set_value(OVERO_GPIO_BT_NRESET, 0); 601 gpio_set_value(OVERO_GPIO_BT_NRESET, 0);
773 mdelay(6); 602 mdelay(6);
774 gpio_set_value(OVERO_GPIO_BT_NRESET, 1); 603 gpio_set_value(OVERO_GPIO_BT_NRESET, 1);
775 } else {
776 printk(KERN_ERR "could not obtain gpio for "
777 "OVERO_GPIO_BT_NRESET\n");
778 } 604 }
779 605
780 if ((gpio_request(OVERO_GPIO_USBH_CPEN, "OVERO_GPIO_USBH_CPEN") == 0) && 606 ret = gpio_request_one(OVERO_GPIO_USBH_CPEN, GPIOF_OUT_INIT_HIGH,
781 (gpio_direction_output(OVERO_GPIO_USBH_CPEN, 1) == 0)) 607 "OVERO_GPIO_USBH_CPEN");
608 if (ret == 0)
782 gpio_export(OVERO_GPIO_USBH_CPEN, 0); 609 gpio_export(OVERO_GPIO_USBH_CPEN, 0);
783 else 610 else
784 printk(KERN_ERR "could not obtain gpio for " 611 printk(KERN_ERR "could not obtain gpio for "
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index 2af8b05e786d..42d10b12da3c 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -31,6 +31,7 @@
31#include "mux.h" 31#include "mux.h"
32#include "hsmmc.h" 32#include "hsmmc.h"
33#include "sdram-nokia.h" 33#include "sdram-nokia.h"
34#include "common-board-devices.h"
34 35
35static struct regulator_consumer_supply rm680_vemmc_consumers[] = { 36static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
36 REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"), 37 REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
@@ -90,19 +91,9 @@ static struct twl4030_platform_data rm680_twl_data = {
90 /* add rest of the children here */ 91 /* add rest of the children here */
91}; 92};
92 93
93static struct i2c_board_info __initdata rm680_twl_i2c_board_info[] = {
94 {
95 I2C_BOARD_INFO("twl5031", 0x48),
96 .flags = I2C_CLIENT_WAKE,
97 .irq = INT_34XX_SYS_NIRQ,
98 .platform_data = &rm680_twl_data,
99 },
100};
101
102static void __init rm680_i2c_init(void) 94static void __init rm680_i2c_init(void)
103{ 95{
104 omap_register_i2c_bus(1, 2900, rm680_twl_i2c_board_info, 96 omap_pmic_init(1, 2900, "twl5031", INT_34XX_SYS_NIRQ, &rm680_twl_data);
105 ARRAY_SIZE(rm680_twl_i2c_board_info));
106 omap_register_i2c_bus(2, 400, NULL, 0); 97 omap_register_i2c_bus(2, 400, NULL, 0);
107 omap_register_i2c_bus(3, 400, NULL, 0); 98 omap_register_i2c_bus(3, 400, NULL, 0);
108} 99}
@@ -153,17 +144,11 @@ static struct omap_board_mux board_mux[] __initdata = {
153}; 144};
154#endif 145#endif
155 146
156static struct omap_musb_board_data rm680_musb_data = {
157 .interface_type = MUSB_INTERFACE_ULPI,
158 .mode = MUSB_PERIPHERAL,
159 .power = 100,
160};
161
162static void __init rm680_init(void) 147static void __init rm680_init(void)
163{ 148{
164 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 149 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
165 omap_serial_init(); 150 omap_serial_init();
166 usb_musb_init(&rm680_musb_data); 151 usb_musb_init(NULL);
167 rm680_peripherals_init(); 152 rm680_peripherals_init();
168} 153}
169 154
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index bbcb6775a6a3..f6247e71a194 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -23,6 +23,7 @@
23#include <linux/gpio.h> 23#include <linux/gpio.h>
24#include <linux/gpio_keys.h> 24#include <linux/gpio_keys.h>
25#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
26#include <linux/power/isp1704_charger.h>
26 27
27#include <plat/mcspi.h> 28#include <plat/mcspi.h>
28#include <plat/board.h> 29#include <plat/board.h>
@@ -43,6 +44,7 @@
43 44
44#include "mux.h" 45#include "mux.h"
45#include "hsmmc.h" 46#include "hsmmc.h"
47#include "common-board-devices.h"
46 48
47#define SYSTEM_REV_B_USES_VAUX3 0x1699 49#define SYSTEM_REV_B_USES_VAUX3 0x1699
48#define SYSTEM_REV_S_USES_VAUX3 0x8 50#define SYSTEM_REV_S_USES_VAUX3 0x8
@@ -52,6 +54,8 @@
52#define RX51_FMTX_RESET_GPIO 163 54#define RX51_FMTX_RESET_GPIO 163
53#define RX51_FMTX_IRQ 53 55#define RX51_FMTX_IRQ 53
54 56
57#define RX51_USB_TRANSCEIVER_RST_GPIO 67
58
55/* list all spi devices here */ 59/* list all spi devices here */
56enum { 60enum {
57 RX51_SPI_WL1251, 61 RX51_SPI_WL1251,
@@ -110,10 +114,30 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
110 }, 114 },
111}; 115};
112 116
117static void rx51_charger_set_power(bool on)
118{
119 gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on);
120}
121
122static struct isp1704_charger_data rx51_charger_data = {
123 .set_power = rx51_charger_set_power,
124};
125
113static struct platform_device rx51_charger_device = { 126static struct platform_device rx51_charger_device = {
114 .name = "isp1704_charger", 127 .name = "isp1704_charger",
128 .dev = {
129 .platform_data = &rx51_charger_data,
130 },
115}; 131};
116 132
133static void __init rx51_charger_init(void)
134{
135 WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
136 GPIOF_OUT_INIT_LOW, "isp1704_reset"));
137
138 platform_device_register(&rx51_charger_device);
139}
140
117#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) 141#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
118 142
119#define RX51_GPIO_CAMERA_LENS_COVER 110 143#define RX51_GPIO_CAMERA_LENS_COVER 110
@@ -557,10 +581,8 @@ static __init void rx51_init_si4713(void)
557static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n) 581static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
558{ 582{
559 /* FIXME this gpio setup is just a placeholder for now */ 583 /* FIXME this gpio setup is just a placeholder for now */
560 gpio_request(gpio + 6, "backlight_pwm"); 584 gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm");
561 gpio_direction_output(gpio + 6, 0); 585 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en");
562 gpio_request(gpio + 7, "speaker_en");
563 gpio_direction_output(gpio + 7, 1);
564 586
565 return 0; 587 return 0;
566} 588}
@@ -730,7 +752,7 @@ static struct twl4030_resconfig twl4030_rconfig[] __initdata = {
730 { .resource = RES_RESET, .devgroup = -1, 752 { .resource = RES_RESET, .devgroup = -1,
731 .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1 753 .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
732 }, 754 },
733 { .resource = RES_Main_Ref, .devgroup = -1, 755 { .resource = RES_MAIN_REF, .devgroup = -1,
734 .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1 756 .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
735 }, 757 },
736 { 0, 0}, 758 { 0, 0},
@@ -777,15 +799,6 @@ static struct tpa6130a2_platform_data rx51_tpa6130a2_data __initdata_or_module =
777 .power_gpio = 98, 799 .power_gpio = 98,
778}; 800};
779 801
780static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
781 {
782 I2C_BOARD_INFO("twl5030", 0x48),
783 .flags = I2C_CLIENT_WAKE,
784 .irq = INT_34XX_SYS_NIRQ,
785 .platform_data = &rx51_twldata,
786 },
787};
788
789/* Audio setup data */ 802/* Audio setup data */
790static struct aic3x_setup_data rx51_aic34_setup = { 803static struct aic3x_setup_data rx51_aic34_setup = {
791 .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED, 804 .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED,
@@ -833,8 +846,7 @@ static int __init rx51_i2c_init(void)
833 rx51_twldata.vaux3 = &rx51_vaux3_cam; 846 rx51_twldata.vaux3 = &rx51_vaux3_cam;
834 } 847 }
835 rx51_twldata.vmmc2 = &rx51_vmmc2; 848 rx51_twldata.vmmc2 = &rx51_vmmc2;
836 omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1, 849 omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
837 ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
838 omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2, 850 omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
839 ARRAY_SIZE(rx51_peripherals_i2c_board_info_2)); 851 ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
840 omap_register_i2c_bus(3, 400, NULL, 0); 852 omap_register_i2c_bus(3, 400, NULL, 0);
@@ -921,26 +933,20 @@ static void rx51_wl1251_set_power(bool enable)
921 gpio_set_value(RX51_WL1251_POWER_GPIO, enable); 933 gpio_set_value(RX51_WL1251_POWER_GPIO, enable);
922} 934}
923 935
936static struct gpio rx51_wl1251_gpios[] __initdata = {
937 { RX51_WL1251_POWER_GPIO, GPIOF_OUT_INIT_LOW, "wl1251 power" },
938 { RX51_WL1251_IRQ_GPIO, GPIOF_IN, "wl1251 irq" },
939};
940
924static void __init rx51_init_wl1251(void) 941static void __init rx51_init_wl1251(void)
925{ 942{
926 int irq, ret; 943 int irq, ret;
927 944
928 ret = gpio_request(RX51_WL1251_POWER_GPIO, "wl1251 power"); 945 ret = gpio_request_array(rx51_wl1251_gpios,
946 ARRAY_SIZE(rx51_wl1251_gpios));
929 if (ret < 0) 947 if (ret < 0)
930 goto error; 948 goto error;
931 949
932 ret = gpio_direction_output(RX51_WL1251_POWER_GPIO, 0);
933 if (ret < 0)
934 goto err_power;
935
936 ret = gpio_request(RX51_WL1251_IRQ_GPIO, "wl1251 irq");
937 if (ret < 0)
938 goto err_power;
939
940 ret = gpio_direction_input(RX51_WL1251_IRQ_GPIO);
941 if (ret < 0)
942 goto err_irq;
943
944 irq = gpio_to_irq(RX51_WL1251_IRQ_GPIO); 950 irq = gpio_to_irq(RX51_WL1251_IRQ_GPIO);
945 if (irq < 0) 951 if (irq < 0)
946 goto err_irq; 952 goto err_irq;
@@ -952,10 +958,7 @@ static void __init rx51_init_wl1251(void)
952 958
953err_irq: 959err_irq:
954 gpio_free(RX51_WL1251_IRQ_GPIO); 960 gpio_free(RX51_WL1251_IRQ_GPIO);
955
956err_power:
957 gpio_free(RX51_WL1251_POWER_GPIO); 961 gpio_free(RX51_WL1251_POWER_GPIO);
958
959error: 962error:
960 printk(KERN_ERR "wl1251 board initialisation failed\n"); 963 printk(KERN_ERR "wl1251 board initialisation failed\n");
961 wl1251_pdata.set_power = NULL; 964 wl1251_pdata.set_power = NULL;
@@ -981,6 +984,6 @@ void __init rx51_peripherals_init(void)
981 if (partition) 984 if (partition)
982 omap2_hsmmc_init(mmc); 985 omap2_hsmmc_init(mmc);
983 986
984 platform_device_register(&rx51_charger_device); 987 rx51_charger_init();
985} 988}
986 989
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index 89a66db8b77d..2c1289bd5e6a 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -15,7 +15,7 @@
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <plat/display.h> 18#include <video/omapdss.h>
19#include <plat/vram.h> 19#include <plat/vram.h>
20#include <plat/mcspi.h> 20#include <plat/mcspi.h>
21 21
@@ -76,13 +76,12 @@ static int __init rx51_video_init(void)
76 return 0; 76 return 0;
77 } 77 }
78 78
79 if (gpio_request(RX51_LCD_RESET_GPIO, "LCD ACX565AKM reset")) { 79 if (gpio_request_one(RX51_LCD_RESET_GPIO, GPIOF_OUT_INIT_HIGH,
80 "LCD ACX565AKM reset")) {
80 pr_err("%s failed to get LCD Reset GPIO\n", __func__); 81 pr_err("%s failed to get LCD Reset GPIO\n", __func__);
81 return 0; 82 return 0;
82 } 83 }
83 84
84 gpio_direction_output(RX51_LCD_RESET_GPIO, 1);
85
86 omap_display_init(&rx51_dss_board_info); 85 omap_display_init(&rx51_dss_board_info);
87 return 0; 86 return 0;
88} 87}
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index f8ba20a14e62..fec4cac8fa0a 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -58,21 +58,25 @@ static struct platform_device leds_gpio = {
58 }, 58 },
59}; 59};
60 60
61/*
62 * cpuidle C-states definition override from the default values.
63 * The 'exit_latency' field is the sum of sleep and wake-up latencies.
64 */
61static struct cpuidle_params rx51_cpuidle_params[] = { 65static struct cpuidle_params rx51_cpuidle_params[] = {
62 /* C1 */ 66 /* C1 */
63 {1, 110, 162, 5}, 67 {110 + 162, 5 , 1},
64 /* C2 */ 68 /* C2 */
65 {1, 106, 180, 309}, 69 {106 + 180, 309, 1},
66 /* C3 */ 70 /* C3 */
67 {0, 107, 410, 46057}, 71 {107 + 410, 46057, 0},
68 /* C4 */ 72 /* C4 */
69 {0, 121, 3374, 46057}, 73 {121 + 3374, 46057, 0},
70 /* C5 */ 74 /* C5 */
71 {1, 855, 1146, 46057}, 75 {855 + 1146, 46057, 1},
72 /* C6 */ 76 /* C6 */
73 {0, 7580, 4134, 484329}, 77 {7580 + 4134, 484329, 0},
74 /* C7 */ 78 /* C7 */
75 {1, 7505, 15274, 484329}, 79 {7505 + 15274, 484329, 1},
76}; 80};
77 81
78static struct omap_lcd_config rx51_lcd_config = { 82static struct omap_lcd_config rx51_lcd_config = {
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index 007ebdc6c993..6402e781c458 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -15,6 +15,7 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16
17#include <plat/gpmc.h> 17#include <plat/gpmc.h>
18#include <plat/gpmc-smsc911x.h>
18 19
19#include <mach/board-zoom.h> 20#include <mach/board-zoom.h>
20 21
@@ -26,60 +27,16 @@
26#define DEBUG_BASE 0x08000000 27#define DEBUG_BASE 0x08000000
27#define ZOOM_ETHR_START DEBUG_BASE 28#define ZOOM_ETHR_START DEBUG_BASE
28 29
29static struct resource zoom_smsc911x_resources[] = { 30static struct omap_smsc911x_platform_data zoom_smsc911x_cfg = {
30 [0] = { 31 .cs = ZOOM_SMSC911X_CS,
31 .start = ZOOM_ETHR_START, 32 .gpio_irq = ZOOM_SMSC911X_GPIO,
32 .end = ZOOM_ETHR_START + SZ_4K, 33 .gpio_reset = -EINVAL,
33 .flags = IORESOURCE_MEM,
34 },
35 [1] = {
36 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
37 },
38};
39
40static struct smsc911x_platform_config zoom_smsc911x_config = {
41 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
42 .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
43 .flags = SMSC911X_USE_32BIT, 34 .flags = SMSC911X_USE_32BIT,
44 .phy_interface = PHY_INTERFACE_MODE_MII,
45};
46
47static struct platform_device zoom_smsc911x_device = {
48 .name = "smsc911x",
49 .id = -1,
50 .num_resources = ARRAY_SIZE(zoom_smsc911x_resources),
51 .resource = zoom_smsc911x_resources,
52 .dev = {
53 .platform_data = &zoom_smsc911x_config,
54 },
55}; 35};
56 36
57static inline void __init zoom_init_smsc911x(void) 37static inline void __init zoom_init_smsc911x(void)
58{ 38{
59 int eth_cs; 39 gpmc_smsc911x_init(&zoom_smsc911x_cfg);
60 unsigned long cs_mem_base;
61 int eth_gpio = 0;
62
63 eth_cs = ZOOM_SMSC911X_CS;
64
65 if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) {
66 printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n");
67 return;
68 }
69
70 zoom_smsc911x_resources[0].start = cs_mem_base + 0x0;
71 zoom_smsc911x_resources[0].end = cs_mem_base + 0xff;
72
73 eth_gpio = ZOOM_SMSC911X_GPIO;
74
75 zoom_smsc911x_resources[1].start = OMAP_GPIO_IRQ(eth_gpio);
76
77 if (gpio_request(eth_gpio, "smsc911x irq") < 0) {
78 printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
79 eth_gpio);
80 return;
81 }
82 gpio_direction_input(eth_gpio);
83} 40}
84 41
85static struct plat_serial8250_port serial_platform_data[] = { 42static struct plat_serial8250_port serial_platform_data[] = {
@@ -120,12 +77,9 @@ static inline void __init zoom_init_quaduart(void)
120 77
121 quart_gpio = ZOOM_QUADUART_GPIO; 78 quart_gpio = ZOOM_QUADUART_GPIO;
122 79
123 if (gpio_request(quart_gpio, "TL16CP754C GPIO") < 0) { 80 if (gpio_request_one(quart_gpio, GPIOF_IN, "TL16CP754C GPIO") < 0)
124 printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n", 81 printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n",
125 quart_gpio); 82 quart_gpio);
126 return;
127 }
128 gpio_direction_input(quart_gpio);
129} 83}
130 84
131static inline int omap_zoom_debugboard_detect(void) 85static inline int omap_zoom_debugboard_detect(void)
@@ -135,12 +89,12 @@ static inline int omap_zoom_debugboard_detect(void)
135 89
136 debug_board_detect = ZOOM_SMSC911X_GPIO; 90 debug_board_detect = ZOOM_SMSC911X_GPIO;
137 91
138 if (gpio_request(debug_board_detect, "Zoom debug board detect") < 0) { 92 if (gpio_request_one(debug_board_detect, GPIOF_IN,
93 "Zoom debug board detect") < 0) {
139 printk(KERN_ERR "Failed to request GPIO%d for Zoom debug" 94 printk(KERN_ERR "Failed to request GPIO%d for Zoom debug"
140 "board detect\n", debug_board_detect); 95 "board detect\n", debug_board_detect);
141 return 0; 96 return 0;
142 } 97 }
143 gpio_direction_input(debug_board_detect);
144 98
145 if (!gpio_get_value(debug_board_detect)) { 99 if (!gpio_get_value(debug_board_detect)) {
146 ret = 0; 100 ret = 0;
@@ -150,7 +104,6 @@ static inline int omap_zoom_debugboard_detect(void)
150} 104}
151 105
152static struct platform_device *zoom_devices[] __initdata = { 106static struct platform_device *zoom_devices[] __initdata = {
153 &zoom_smsc911x_device,
154 &zoom_debugboard_serial_device, 107 &zoom_debugboard_serial_device,
155}; 108};
156 109
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index 37b84c2b850f..c7c6beb1ec24 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -15,40 +15,25 @@
15#include <linux/i2c/twl.h> 15#include <linux/i2c/twl.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <plat/mcspi.h> 17#include <plat/mcspi.h>
18#include <plat/display.h> 18#include <video/omapdss.h>
19 19
20#define LCD_PANEL_RESET_GPIO_PROD 96 20#define LCD_PANEL_RESET_GPIO_PROD 96
21#define LCD_PANEL_RESET_GPIO_PILOT 55 21#define LCD_PANEL_RESET_GPIO_PILOT 55
22#define LCD_PANEL_QVGA_GPIO 56 22#define LCD_PANEL_QVGA_GPIO 56
23 23
24static struct gpio zoom_lcd_gpios[] __initdata = {
25 { -EINVAL, GPIOF_OUT_INIT_HIGH, "lcd reset" },
26 { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" },
27};
28
24static void zoom_lcd_panel_init(void) 29static void zoom_lcd_panel_init(void)
25{ 30{
26 int ret; 31 zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
27 unsigned char lcd_panel_reset_gpio;
28
29 lcd_panel_reset_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
30 LCD_PANEL_RESET_GPIO_PROD : 32 LCD_PANEL_RESET_GPIO_PROD :
31 LCD_PANEL_RESET_GPIO_PILOT; 33 LCD_PANEL_RESET_GPIO_PILOT;
32 34
33 ret = gpio_request(lcd_panel_reset_gpio, "lcd reset"); 35 if (gpio_request_array(zoom_lcd_gpios, ARRAY_SIZE(zoom_lcd_gpios)))
34 if (ret) { 36 pr_err("%s: Failed to get LCD GPIOs.\n", __func__);
35 pr_err("Failed to get LCD reset GPIO (gpio%d).\n",
36 lcd_panel_reset_gpio);
37 return;
38 }
39 gpio_direction_output(lcd_panel_reset_gpio, 1);
40
41 ret = gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga");
42 if (ret) {
43 pr_err("Failed to get LCD_PANEL_QVGA_GPIO (gpio%d).\n",
44 LCD_PANEL_QVGA_GPIO);
45 goto err0;
46 }
47 gpio_direction_output(LCD_PANEL_QVGA_GPIO, 1);
48
49 return;
50err0:
51 gpio_free(lcd_panel_reset_gpio);
52} 37}
53 38
54static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev) 39static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev)
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 8dee7549fbdf..118c6f53c5eb 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -31,6 +31,7 @@
31 31
32#include "mux.h" 32#include "mux.h"
33#include "hsmmc.h" 33#include "hsmmc.h"
34#include "common-board-devices.h"
34 35
35#define OMAP_ZOOM_WLAN_PMENA_GPIO (101) 36#define OMAP_ZOOM_WLAN_PMENA_GPIO (101)
36#define OMAP_ZOOM_WLAN_IRQ_GPIO (162) 37#define OMAP_ZOOM_WLAN_IRQ_GPIO (162)
@@ -276,13 +277,11 @@ static int zoom_twl_gpio_setup(struct device *dev,
276 zoom_vsim_supply.dev = mmc[0].dev; 277 zoom_vsim_supply.dev = mmc[0].dev;
277 zoom_vmmc2_supply.dev = mmc[1].dev; 278 zoom_vmmc2_supply.dev = mmc[1].dev;
278 279
279 ret = gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd enable"); 280 ret = gpio_request_one(LCD_PANEL_ENABLE_GPIO, GPIOF_OUT_INIT_LOW,
280 if (ret) { 281 "lcd enable");
282 if (ret)
281 pr_err("Failed to get LCD_PANEL_ENABLE_GPIO (gpio%d).\n", 283 pr_err("Failed to get LCD_PANEL_ENABLE_GPIO (gpio%d).\n",
282 LCD_PANEL_ENABLE_GPIO); 284 LCD_PANEL_ENABLE_GPIO);
283 return ret;
284 }
285 gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0);
286 285
287 return ret; 286 return ret;
288} 287}
@@ -349,15 +348,6 @@ static struct twl4030_platform_data zoom_twldata = {
349 .vdac = &zoom_vdac, 348 .vdac = &zoom_vdac,
350}; 349};
351 350
352static struct i2c_board_info __initdata zoom_i2c_boardinfo[] = {
353 {
354 I2C_BOARD_INFO("twl5030", 0x48),
355 .flags = I2C_CLIENT_WAKE,
356 .irq = INT_34XX_SYS_NIRQ,
357 .platform_data = &zoom_twldata,
358 },
359};
360
361static int __init omap_i2c_init(void) 351static int __init omap_i2c_init(void)
362{ 352{
363 if (machine_is_omap_zoom2()) { 353 if (machine_is_omap_zoom2()) {
@@ -365,19 +355,12 @@ static int __init omap_i2c_init(void)
365 zoom_audio_data.hs_extmute = 1; 355 zoom_audio_data.hs_extmute = 1;
366 zoom_audio_data.set_hs_extmute = zoom2_set_hs_extmute; 356 zoom_audio_data.set_hs_extmute = zoom2_set_hs_extmute;
367 } 357 }
368 omap_register_i2c_bus(1, 2400, zoom_i2c_boardinfo, 358 omap_pmic_init(1, 2400, "twl5030", INT_34XX_SYS_NIRQ, &zoom_twldata);
369 ARRAY_SIZE(zoom_i2c_boardinfo));
370 omap_register_i2c_bus(2, 400, NULL, 0); 359 omap_register_i2c_bus(2, 400, NULL, 0);
371 omap_register_i2c_bus(3, 400, NULL, 0); 360 omap_register_i2c_bus(3, 400, NULL, 0);
372 return 0; 361 return 0;
373} 362}
374 363
375static struct omap_musb_board_data musb_board_data = {
376 .interface_type = MUSB_INTERFACE_ULPI,
377 .mode = MUSB_OTG,
378 .power = 100,
379};
380
381static void enable_board_wakeup_source(void) 364static void enable_board_wakeup_source(void)
382{ 365{
383 /* T2 interrupt line (keypad) */ 366 /* T2 interrupt line (keypad) */
@@ -392,7 +375,7 @@ void __init zoom_peripherals_init(void)
392 375
393 omap_i2c_init(); 376 omap_i2c_init();
394 platform_device_register(&omap_vwlan_device); 377 platform_device_register(&omap_vwlan_device);
395 usb_musb_init(&musb_board_data); 378 usb_musb_init(NULL);
396 enable_board_wakeup_source(); 379 enable_board_wakeup_source();
397 omap_serial_init(); 380 omap_serial_init();
398} 381}
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
new file mode 100644
index 000000000000..e94903b2c65b
--- /dev/null
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -0,0 +1,163 @@
1/*
2 * common-board-devices.c
3 *
4 * Copyright (C) 2011 CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include <linux/i2c.h>
24#include <linux/i2c/twl.h>
25
26#include <linux/gpio.h>
27#include <linux/spi/spi.h>
28#include <linux/spi/ads7846.h>
29
30#include <plat/i2c.h>
31#include <plat/mcspi.h>
32#include <plat/nand.h>
33
34#include "common-board-devices.h"
35
36static struct i2c_board_info __initdata pmic_i2c_board_info = {
37 .addr = 0x48,
38 .flags = I2C_CLIENT_WAKE,
39};
40
41void __init omap_pmic_init(int bus, u32 clkrate,
42 const char *pmic_type, int pmic_irq,
43 struct twl4030_platform_data *pmic_data)
44{
45 strncpy(pmic_i2c_board_info.type, pmic_type,
46 sizeof(pmic_i2c_board_info.type));
47 pmic_i2c_board_info.irq = pmic_irq;
48 pmic_i2c_board_info.platform_data = pmic_data;
49
50 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
51}
52
53#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
54 defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
55static struct omap2_mcspi_device_config ads7846_mcspi_config = {
56 .turbo_mode = 0,
57 .single_channel = 1, /* 0: slave, 1: master */
58};
59
60static struct ads7846_platform_data ads7846_config = {
61 .x_max = 0x0fff,
62 .y_max = 0x0fff,
63 .x_plate_ohms = 180,
64 .pressure_max = 255,
65 .debounce_max = 10,
66 .debounce_tol = 3,
67 .debounce_rep = 1,
68 .gpio_pendown = -EINVAL,
69 .keep_vref_on = 1,
70};
71
72static struct spi_board_info ads7846_spi_board_info __initdata = {
73 .modalias = "ads7846",
74 .bus_num = -EINVAL,
75 .chip_select = 0,
76 .max_speed_hz = 1500000,
77 .controller_data = &ads7846_mcspi_config,
78 .irq = -EINVAL,
79 .platform_data = &ads7846_config,
80};
81
82void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
83 struct ads7846_platform_data *board_pdata)
84{
85 struct spi_board_info *spi_bi = &ads7846_spi_board_info;
86 int err;
87
88 err = gpio_request(gpio_pendown, "TS PenDown");
89 if (err) {
90 pr_err("Could not obtain gpio for TS PenDown: %d\n", err);
91 return;
92 }
93
94 gpio_direction_input(gpio_pendown);
95 gpio_export(gpio_pendown, 0);
96
97 if (gpio_debounce)
98 gpio_set_debounce(gpio_pendown, gpio_debounce);
99
100 ads7846_config.gpio_pendown = gpio_pendown;
101
102 spi_bi->bus_num = bus_num;
103 spi_bi->irq = OMAP_GPIO_IRQ(gpio_pendown);
104
105 if (board_pdata)
106 spi_bi->platform_data = board_pdata;
107
108 spi_register_board_info(&ads7846_spi_board_info, 1);
109}
110#else
111void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
112 struct ads7846_platform_data *board_pdata)
113{
114}
115#endif
116
117#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
118static struct omap_nand_platform_data nand_data = {
119 .dma_channel = -1, /* disable DMA in OMAP NAND driver */
120};
121
122void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
123 int nr_parts)
124{
125 u8 cs = 0;
126 u8 nandcs = GPMC_CS_NUM + 1;
127
128 /* find out the chip-select on which NAND exists */
129 while (cs < GPMC_CS_NUM) {
130 u32 ret = 0;
131 ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
132
133 if ((ret & 0xC00) == 0x800) {
134 printk(KERN_INFO "Found NAND on CS%d\n", cs);
135 if (nandcs > GPMC_CS_NUM)
136 nandcs = cs;
137 }
138 cs++;
139 }
140
141 if (nandcs > GPMC_CS_NUM) {
142 printk(KERN_INFO "NAND: Unable to find configuration "
143 "in GPMC\n ");
144 return;
145 }
146
147 if (nandcs < GPMC_CS_NUM) {
148 nand_data.cs = nandcs;
149 nand_data.parts = parts;
150 nand_data.nr_parts = nr_parts;
151 nand_data.options = options;
152
153 printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
154 if (gpmc_nand_init(&nand_data) < 0)
155 printk(KERN_ERR "Unable to register NAND device\n");
156 }
157}
158#else
159void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
160 int nr_parts)
161{
162}
163#endif
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
new file mode 100644
index 000000000000..eb80b3b0ef47
--- /dev/null
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -0,0 +1,35 @@
1#ifndef __OMAP_COMMON_BOARD_DEVICES__
2#define __OMAP_COMMON_BOARD_DEVICES__
3
4struct twl4030_platform_data;
5struct mtd_partition;
6
7void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
8 struct twl4030_platform_data *pmic_data);
9
10static inline void omap2_pmic_init(const char *pmic_type,
11 struct twl4030_platform_data *pmic_data)
12{
13 omap_pmic_init(2, 2600, pmic_type, INT_24XX_SYS_NIRQ, pmic_data);
14}
15
16static inline void omap3_pmic_init(const char *pmic_type,
17 struct twl4030_platform_data *pmic_data)
18{
19 omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data);
20}
21
22static inline void omap4_pmic_init(const char *pmic_type,
23 struct twl4030_platform_data *pmic_data)
24{
25 /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */
26 omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data);
27}
28
29struct ads7846_platform_data;
30
31void omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
32 struct ads7846_platform_data *board_pdata);
33void omap_nand_flash_init(int opts, struct mtd_partition *parts, int n_parts);
34
35#endif /* __OMAP_COMMON_BOARD_DEVICES__ */
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 1c240eff3918..4bf6e6e8b100 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -36,36 +36,6 @@
36 36
37#ifdef CONFIG_CPU_IDLE 37#ifdef CONFIG_CPU_IDLE
38 38
39#define OMAP3_MAX_STATES 7
40#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
41#define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
42#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
43#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
44#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
45#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
46#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
47
48#define OMAP3_STATE_MAX OMAP3_STATE_C7
49
50#define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */
51
52struct omap3_processor_cx {
53 u8 valid;
54 u8 type;
55 u32 sleep_latency;
56 u32 wakeup_latency;
57 u32 mpu_state;
58 u32 core_state;
59 u32 threshold;
60 u32 flags;
61 const char *desc;
62};
63
64struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
65struct omap3_processor_cx current_cx_state;
66struct powerdomain *mpu_pd, *core_pd, *per_pd;
67struct powerdomain *cam_pd;
68
69/* 39/*
70 * The latencies/thresholds for various C states have 40 * The latencies/thresholds for various C states have
71 * to be configured from the respective board files. 41 * to be configured from the respective board files.
@@ -75,27 +45,31 @@ struct powerdomain *cam_pd;
75 */ 45 */
76static struct cpuidle_params cpuidle_params_table[] = { 46static struct cpuidle_params cpuidle_params_table[] = {
77 /* C1 */ 47 /* C1 */
78 {1, 2, 2, 5}, 48 {2 + 2, 5, 1},
79 /* C2 */ 49 /* C2 */
80 {1, 10, 10, 30}, 50 {10 + 10, 30, 1},
81 /* C3 */ 51 /* C3 */
82 {1, 50, 50, 300}, 52 {50 + 50, 300, 1},
83 /* C4 */ 53 /* C4 */
84 {1, 1500, 1800, 4000}, 54 {1500 + 1800, 4000, 1},
85 /* C5 */ 55 /* C5 */
86 {1, 2500, 7500, 12000}, 56 {2500 + 7500, 12000, 1},
87 /* C6 */ 57 /* C6 */
88 {1, 3000, 8500, 15000}, 58 {3000 + 8500, 15000, 1},
89 /* C7 */ 59 /* C7 */
90 {1, 10000, 30000, 300000}, 60 {10000 + 30000, 300000, 1},
91}; 61};
62#define OMAP3_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
92 63
93static int omap3_idle_bm_check(void) 64/* Mach specific information to be recorded in the C-state driver_data */
94{ 65struct omap3_idle_statedata {
95 if (!omap3_can_sleep()) 66 u32 mpu_state;
96 return 1; 67 u32 core_state;
97 return 0; 68 u8 valid;
98} 69};
70struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES];
71
72struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
99 73
100static int _cpuidle_allow_idle(struct powerdomain *pwrdm, 74static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
101 struct clockdomain *clkdm) 75 struct clockdomain *clkdm)
@@ -122,12 +96,10 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
122static int omap3_enter_idle(struct cpuidle_device *dev, 96static int omap3_enter_idle(struct cpuidle_device *dev,
123 struct cpuidle_state *state) 97 struct cpuidle_state *state)
124{ 98{
125 struct omap3_processor_cx *cx = cpuidle_get_statedata(state); 99 struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
126 struct timespec ts_preidle, ts_postidle, ts_idle; 100 struct timespec ts_preidle, ts_postidle, ts_idle;
127 u32 mpu_state = cx->mpu_state, core_state = cx->core_state; 101 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
128 102
129 current_cx_state = *cx;
130
131 /* Used to keep track of the total time in idle */ 103 /* Used to keep track of the total time in idle */
132 getnstimeofday(&ts_preidle); 104 getnstimeofday(&ts_preidle);
133 105
@@ -140,7 +112,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
140 if (omap_irq_pending() || need_resched()) 112 if (omap_irq_pending() || need_resched())
141 goto return_sleep_time; 113 goto return_sleep_time;
142 114
143 if (cx->type == OMAP3_STATE_C1) { 115 /* Deny idle for C1 */
116 if (state == &dev->states[0]) {
144 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); 117 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
145 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); 118 pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
146 } 119 }
@@ -148,7 +121,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
148 /* Execute ARM wfi */ 121 /* Execute ARM wfi */
149 omap_sram_idle(); 122 omap_sram_idle();
150 123
151 if (cx->type == OMAP3_STATE_C1) { 124 /* Re-allow idle for C1 */
125 if (state == &dev->states[0]) {
152 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); 126 pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
153 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); 127 pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
154 } 128 }
@@ -164,41 +138,53 @@ return_sleep_time:
164} 138}
165 139
166/** 140/**
167 * next_valid_state - Find next valid c-state 141 * next_valid_state - Find next valid C-state
168 * @dev: cpuidle device 142 * @dev: cpuidle device
169 * @state: Currently selected c-state 143 * @state: Currently selected C-state
170 * 144 *
171 * If the current state is valid, it is returned back to the caller. 145 * If the current state is valid, it is returned back to the caller.
172 * Else, this function searches for a lower c-state which is still 146 * Else, this function searches for a lower c-state which is still
173 * valid (as defined in omap3_power_states[]). 147 * valid.
148 *
149 * A state is valid if the 'valid' field is enabled and
150 * if it satisfies the enable_off_mode condition.
174 */ 151 */
175static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, 152static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
176 struct cpuidle_state *curr) 153 struct cpuidle_state *curr)
177{ 154{
178 struct cpuidle_state *next = NULL; 155 struct cpuidle_state *next = NULL;
179 struct omap3_processor_cx *cx; 156 struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
157 u32 mpu_deepest_state = PWRDM_POWER_RET;
158 u32 core_deepest_state = PWRDM_POWER_RET;
180 159
181 cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr); 160 if (enable_off_mode) {
161 mpu_deepest_state = PWRDM_POWER_OFF;
162 /*
163 * Erratum i583: valable for ES rev < Es1.2 on 3630.
164 * CORE OFF mode is not supported in a stable form, restrict
165 * instead the CORE state to RET.
166 */
167 if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
168 core_deepest_state = PWRDM_POWER_OFF;
169 }
182 170
183 /* Check if current state is valid */ 171 /* Check if current state is valid */
184 if (cx->valid) { 172 if ((cx->valid) &&
173 (cx->mpu_state >= mpu_deepest_state) &&
174 (cx->core_state >= core_deepest_state)) {
185 return curr; 175 return curr;
186 } else { 176 } else {
187 u8 idx = OMAP3_STATE_MAX; 177 int idx = OMAP3_NUM_STATES - 1;
188 178
189 /* 179 /* Reach the current state starting at highest C-state */
190 * Reach the current state starting at highest C-state 180 for (; idx >= 0; idx--) {
191 */
192 for (; idx >= OMAP3_STATE_C1; idx--) {
193 if (&dev->states[idx] == curr) { 181 if (&dev->states[idx] == curr) {
194 next = &dev->states[idx]; 182 next = &dev->states[idx];
195 break; 183 break;
196 } 184 }
197 } 185 }
198 186
199 /* 187 /* Should never hit this condition */
200 * Should never hit this condition.
201 */
202 WARN_ON(next == NULL); 188 WARN_ON(next == NULL);
203 189
204 /* 190 /*
@@ -206,17 +192,17 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
206 * Start search from the next (lower) state. 192 * Start search from the next (lower) state.
207 */ 193 */
208 idx--; 194 idx--;
209 for (; idx >= OMAP3_STATE_C1; idx--) { 195 for (; idx >= 0; idx--) {
210 struct omap3_processor_cx *cx;
211
212 cx = cpuidle_get_statedata(&dev->states[idx]); 196 cx = cpuidle_get_statedata(&dev->states[idx]);
213 if (cx->valid) { 197 if ((cx->valid) &&
198 (cx->mpu_state >= mpu_deepest_state) &&
199 (cx->core_state >= core_deepest_state)) {
214 next = &dev->states[idx]; 200 next = &dev->states[idx];
215 break; 201 break;
216 } 202 }
217 } 203 }
218 /* 204 /*
219 * C1 and C2 are always valid. 205 * C1 is always valid.
220 * So, no need to check for 'next==NULL' outside this loop. 206 * So, no need to check for 'next==NULL' outside this loop.
221 */ 207 */
222 } 208 }
@@ -229,36 +215,22 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
229 * @dev: cpuidle device 215 * @dev: cpuidle device
230 * @state: The target state to be programmed 216 * @state: The target state to be programmed
231 * 217 *
232 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This 218 * This function checks for any pending activity and then programs
233 * function checks for any pending activity and then programs the 219 * the device to the specified or a safer state.
234 * device to the specified or a safer state.
235 */ 220 */
236static int omap3_enter_idle_bm(struct cpuidle_device *dev, 221static int omap3_enter_idle_bm(struct cpuidle_device *dev,
237 struct cpuidle_state *state) 222 struct cpuidle_state *state)
238{ 223{
239 struct cpuidle_state *new_state = next_valid_state(dev, state); 224 struct cpuidle_state *new_state;
240 u32 core_next_state, per_next_state = 0, per_saved_state = 0; 225 u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
241 u32 cam_state; 226 struct omap3_idle_statedata *cx;
242 struct omap3_processor_cx *cx;
243 int ret; 227 int ret;
244 228
245 if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) { 229 if (!omap3_can_sleep()) {
246 BUG_ON(!dev->safe_state);
247 new_state = dev->safe_state; 230 new_state = dev->safe_state;
248 goto select_state; 231 goto select_state;
249 } 232 }
250 233
251 cx = cpuidle_get_statedata(state);
252 core_next_state = cx->core_state;
253
254 /*
255 * FIXME: we currently manage device-specific idle states
256 * for PER and CORE in combination with CPU-specific
257 * idle states. This is wrong, and device-specific
258 * idle management needs to be separated out into
259 * its own code.
260 */
261
262 /* 234 /*
263 * Prevent idle completely if CAM is active. 235 * Prevent idle completely if CAM is active.
264 * CAM does not have wakeup capability in OMAP3. 236 * CAM does not have wakeup capability in OMAP3.
@@ -270,9 +242,19 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
270 } 242 }
271 243
272 /* 244 /*
245 * FIXME: we currently manage device-specific idle states
246 * for PER and CORE in combination with CPU-specific
247 * idle states. This is wrong, and device-specific
248 * idle management needs to be separated out into
249 * its own code.
250 */
251
252 /*
273 * Prevent PER off if CORE is not in retention or off as this 253 * Prevent PER off if CORE is not in retention or off as this
274 * would disable PER wakeups completely. 254 * would disable PER wakeups completely.
275 */ 255 */
256 cx = cpuidle_get_statedata(state);
257 core_next_state = cx->core_state;
276 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); 258 per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
277 if ((per_next_state == PWRDM_POWER_OFF) && 259 if ((per_next_state == PWRDM_POWER_OFF) &&
278 (core_next_state > PWRDM_POWER_RET)) 260 (core_next_state > PWRDM_POWER_RET))
@@ -282,6 +264,8 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
282 if (per_next_state != per_saved_state) 264 if (per_next_state != per_saved_state)
283 pwrdm_set_next_pwrst(per_pd, per_next_state); 265 pwrdm_set_next_pwrst(per_pd, per_next_state);
284 266
267 new_state = next_valid_state(dev, state);
268
285select_state: 269select_state:
286 dev->last_state = new_state; 270 dev->last_state = new_state;
287 ret = omap3_enter_idle(dev, new_state); 271 ret = omap3_enter_idle(dev, new_state);
@@ -295,31 +279,6 @@ select_state:
295 279
296DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); 280DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
297 281
298/**
299 * omap3_cpuidle_update_states() - Update the cpuidle states
300 * @mpu_deepest_state: Enable states up to and including this for mpu domain
301 * @core_deepest_state: Enable states up to and including this for core domain
302 *
303 * This goes through the list of states available and enables and disables the
304 * validity of C states based on deepest state that can be achieved for the
305 * variable domain
306 */
307void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
308{
309 int i;
310
311 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
312 struct omap3_processor_cx *cx = &omap3_power_states[i];
313
314 if ((cx->mpu_state >= mpu_deepest_state) &&
315 (cx->core_state >= core_deepest_state)) {
316 cx->valid = 1;
317 } else {
318 cx->valid = 0;
319 }
320 }
321}
322
323void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params) 282void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
324{ 283{
325 int i; 284 int i;
@@ -327,212 +286,109 @@ void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
327 if (!cpuidle_board_params) 286 if (!cpuidle_board_params)
328 return; 287 return;
329 288
330 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 289 for (i = 0; i < OMAP3_NUM_STATES; i++) {
331 cpuidle_params_table[i].valid = 290 cpuidle_params_table[i].valid = cpuidle_board_params[i].valid;
332 cpuidle_board_params[i].valid; 291 cpuidle_params_table[i].exit_latency =
333 cpuidle_params_table[i].sleep_latency = 292 cpuidle_board_params[i].exit_latency;
334 cpuidle_board_params[i].sleep_latency; 293 cpuidle_params_table[i].target_residency =
335 cpuidle_params_table[i].wake_latency = 294 cpuidle_board_params[i].target_residency;
336 cpuidle_board_params[i].wake_latency;
337 cpuidle_params_table[i].threshold =
338 cpuidle_board_params[i].threshold;
339 } 295 }
340 return; 296 return;
341} 297}
342 298
343/* omap3_init_power_states - Initialises the OMAP3 specific C states.
344 *
345 * Below is the desciption of each C state.
346 * C1 . MPU WFI + Core active
347 * C2 . MPU WFI + Core inactive
348 * C3 . MPU CSWR + Core inactive
349 * C4 . MPU OFF + Core inactive
350 * C5 . MPU CSWR + Core CSWR
351 * C6 . MPU OFF + Core CSWR
352 * C7 . MPU OFF + Core OFF
353 */
354void omap_init_power_states(void)
355{
356 /* C1 . MPU WFI + Core active */
357 omap3_power_states[OMAP3_STATE_C1].valid =
358 cpuidle_params_table[OMAP3_STATE_C1].valid;
359 omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
360 omap3_power_states[OMAP3_STATE_C1].sleep_latency =
361 cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
362 omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
363 cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
364 omap3_power_states[OMAP3_STATE_C1].threshold =
365 cpuidle_params_table[OMAP3_STATE_C1].threshold;
366 omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
367 omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
368 omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
369 omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON";
370
371 /* C2 . MPU WFI + Core inactive */
372 omap3_power_states[OMAP3_STATE_C2].valid =
373 cpuidle_params_table[OMAP3_STATE_C2].valid;
374 omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
375 omap3_power_states[OMAP3_STATE_C2].sleep_latency =
376 cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
377 omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
378 cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
379 omap3_power_states[OMAP3_STATE_C2].threshold =
380 cpuidle_params_table[OMAP3_STATE_C2].threshold;
381 omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
382 omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
383 omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
384 CPUIDLE_FLAG_CHECK_BM;
385 omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON";
386
387 /* C3 . MPU CSWR + Core inactive */
388 omap3_power_states[OMAP3_STATE_C3].valid =
389 cpuidle_params_table[OMAP3_STATE_C3].valid;
390 omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
391 omap3_power_states[OMAP3_STATE_C3].sleep_latency =
392 cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
393 omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
394 cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
395 omap3_power_states[OMAP3_STATE_C3].threshold =
396 cpuidle_params_table[OMAP3_STATE_C3].threshold;
397 omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
398 omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
399 omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
400 CPUIDLE_FLAG_CHECK_BM;
401 omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON";
402
403 /* C4 . MPU OFF + Core inactive */
404 omap3_power_states[OMAP3_STATE_C4].valid =
405 cpuidle_params_table[OMAP3_STATE_C4].valid;
406 omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
407 omap3_power_states[OMAP3_STATE_C4].sleep_latency =
408 cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
409 omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
410 cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
411 omap3_power_states[OMAP3_STATE_C4].threshold =
412 cpuidle_params_table[OMAP3_STATE_C4].threshold;
413 omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
414 omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
415 omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
416 CPUIDLE_FLAG_CHECK_BM;
417 omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON";
418
419 /* C5 . MPU CSWR + Core CSWR*/
420 omap3_power_states[OMAP3_STATE_C5].valid =
421 cpuidle_params_table[OMAP3_STATE_C5].valid;
422 omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
423 omap3_power_states[OMAP3_STATE_C5].sleep_latency =
424 cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
425 omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
426 cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
427 omap3_power_states[OMAP3_STATE_C5].threshold =
428 cpuidle_params_table[OMAP3_STATE_C5].threshold;
429 omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
430 omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
431 omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
432 CPUIDLE_FLAG_CHECK_BM;
433 omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET";
434
435 /* C6 . MPU OFF + Core CSWR */
436 omap3_power_states[OMAP3_STATE_C6].valid =
437 cpuidle_params_table[OMAP3_STATE_C6].valid;
438 omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
439 omap3_power_states[OMAP3_STATE_C6].sleep_latency =
440 cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
441 omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
442 cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
443 omap3_power_states[OMAP3_STATE_C6].threshold =
444 cpuidle_params_table[OMAP3_STATE_C6].threshold;
445 omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
446 omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
447 omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
448 CPUIDLE_FLAG_CHECK_BM;
449 omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET";
450
451 /* C7 . MPU OFF + Core OFF */
452 omap3_power_states[OMAP3_STATE_C7].valid =
453 cpuidle_params_table[OMAP3_STATE_C7].valid;
454 omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
455 omap3_power_states[OMAP3_STATE_C7].sleep_latency =
456 cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
457 omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
458 cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
459 omap3_power_states[OMAP3_STATE_C7].threshold =
460 cpuidle_params_table[OMAP3_STATE_C7].threshold;
461 omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
462 omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
463 omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
464 CPUIDLE_FLAG_CHECK_BM;
465 omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF";
466
467 /*
468 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
469 * enable OFF mode in a stable form for previous revisions.
470 * we disable C7 state as a result.
471 */
472 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
473 omap3_power_states[OMAP3_STATE_C7].valid = 0;
474 cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
475 pr_warn("%s: core off state C7 disabled due to i583\n",
476 __func__);
477 }
478}
479
480struct cpuidle_driver omap3_idle_driver = { 299struct cpuidle_driver omap3_idle_driver = {
481 .name = "omap3_idle", 300 .name = "omap3_idle",
482 .owner = THIS_MODULE, 301 .owner = THIS_MODULE,
483}; 302};
484 303
304/* Helper to fill the C-state common data and register the driver_data */
305static inline struct omap3_idle_statedata *_fill_cstate(
306 struct cpuidle_device *dev,
307 int idx, const char *descr)
308{
309 struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
310 struct cpuidle_state *state = &dev->states[idx];
311
312 state->exit_latency = cpuidle_params_table[idx].exit_latency;
313 state->target_residency = cpuidle_params_table[idx].target_residency;
314 state->flags = CPUIDLE_FLAG_TIME_VALID;
315 state->enter = omap3_enter_idle_bm;
316 cx->valid = cpuidle_params_table[idx].valid;
317 sprintf(state->name, "C%d", idx + 1);
318 strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
319 cpuidle_set_statedata(state, cx);
320
321 return cx;
322}
323
485/** 324/**
486 * omap3_idle_init - Init routine for OMAP3 idle 325 * omap3_idle_init - Init routine for OMAP3 idle
487 * 326 *
488 * Registers the OMAP3 specific cpuidle driver with the cpuidle 327 * Registers the OMAP3 specific cpuidle driver to the cpuidle
489 * framework with the valid set of states. 328 * framework with the valid set of states.
490 */ 329 */
491int __init omap3_idle_init(void) 330int __init omap3_idle_init(void)
492{ 331{
493 int i, count = 0;
494 struct omap3_processor_cx *cx;
495 struct cpuidle_state *state;
496 struct cpuidle_device *dev; 332 struct cpuidle_device *dev;
333 struct omap3_idle_statedata *cx;
497 334
498 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 335 mpu_pd = pwrdm_lookup("mpu_pwrdm");
499 core_pd = pwrdm_lookup("core_pwrdm"); 336 core_pd = pwrdm_lookup("core_pwrdm");
500 per_pd = pwrdm_lookup("per_pwrdm"); 337 per_pd = pwrdm_lookup("per_pwrdm");
501 cam_pd = pwrdm_lookup("cam_pwrdm"); 338 cam_pd = pwrdm_lookup("cam_pwrdm");
502 339
503 omap_init_power_states();
504 cpuidle_register_driver(&omap3_idle_driver); 340 cpuidle_register_driver(&omap3_idle_driver);
505
506 dev = &per_cpu(omap3_idle_dev, smp_processor_id()); 341 dev = &per_cpu(omap3_idle_dev, smp_processor_id());
507 342
508 for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { 343 /* C1 . MPU WFI + Core active */
509 cx = &omap3_power_states[i]; 344 cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
510 state = &dev->states[count]; 345 (&dev->states[0])->enter = omap3_enter_idle;
511 346 dev->safe_state = &dev->states[0];
512 if (!cx->valid) 347 cx->valid = 1; /* C1 is always valid */
513 continue; 348 cx->mpu_state = PWRDM_POWER_ON;
514 cpuidle_set_statedata(state, cx); 349 cx->core_state = PWRDM_POWER_ON;
515 state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
516 state->target_residency = cx->threshold;
517 state->flags = cx->flags;
518 state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
519 omap3_enter_idle_bm : omap3_enter_idle;
520 if (cx->type == OMAP3_STATE_C1)
521 dev->safe_state = state;
522 sprintf(state->name, "C%d", count+1);
523 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
524 count++;
525 }
526 350
527 if (!count) 351 /* C2 . MPU WFI + Core inactive */
528 return -EINVAL; 352 cx = _fill_cstate(dev, 1, "MPU ON + CORE ON");
529 dev->state_count = count; 353 cx->mpu_state = PWRDM_POWER_ON;
354 cx->core_state = PWRDM_POWER_ON;
355
356 /* C3 . MPU CSWR + Core inactive */
357 cx = _fill_cstate(dev, 2, "MPU RET + CORE ON");
358 cx->mpu_state = PWRDM_POWER_RET;
359 cx->core_state = PWRDM_POWER_ON;
530 360
531 if (enable_off_mode) 361 /* C4 . MPU OFF + Core inactive */
532 omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF); 362 cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON");
533 else 363 cx->mpu_state = PWRDM_POWER_OFF;
534 omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET); 364 cx->core_state = PWRDM_POWER_ON;
365
366 /* C5 . MPU RET + Core RET */
367 cx = _fill_cstate(dev, 4, "MPU RET + CORE RET");
368 cx->mpu_state = PWRDM_POWER_RET;
369 cx->core_state = PWRDM_POWER_RET;
370
371 /* C6 . MPU OFF + Core RET */
372 cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET");
373 cx->mpu_state = PWRDM_POWER_OFF;
374 cx->core_state = PWRDM_POWER_RET;
375
376 /* C7 . MPU OFF + Core OFF */
377 cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF");
378 /*
379 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
380 * enable OFF mode in a stable form for previous revisions.
381 * We disable C7 state as a result.
382 */
383 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
384 cx->valid = 0;
385 pr_warn("%s: core off state C7 disabled due to i583\n",
386 __func__);
387 }
388 cx->mpu_state = PWRDM_POWER_OFF;
389 cx->core_state = PWRDM_POWER_OFF;
535 390
391 dev->state_count = OMAP3_NUM_STATES;
536 if (cpuidle_register_device(dev)) { 392 if (cpuidle_register_device(dev)) {
537 printk(KERN_ERR "%s: CPUidle register device failed\n", 393 printk(KERN_ERR "%s: CPUidle register device failed\n",
538 __func__); 394 __func__);
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 256d23fb79ab..543fcb8b518c 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -22,7 +22,7 @@
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/err.h> 23#include <linux/err.h>
24 24
25#include <plat/display.h> 25#include <video/omapdss.h>
26#include <plat/omap_hwmod.h> 26#include <plat/omap_hwmod.h>
27#include <plat/omap_device.h> 27#include <plat/omap_device.h>
28 28
@@ -56,37 +56,58 @@ static bool opt_clock_available(const char *clk_role)
56 return false; 56 return false;
57} 57}
58 58
59struct omap_dss_hwmod_data {
60 const char *oh_name;
61 const char *dev_name;
62 const int id;
63};
64
65static const struct omap_dss_hwmod_data omap2_dss_hwmod_data[] __initdata = {
66 { "dss_core", "omapdss_dss", -1 },
67 { "dss_dispc", "omapdss_dispc", -1 },
68 { "dss_rfbi", "omapdss_rfbi", -1 },
69 { "dss_venc", "omapdss_venc", -1 },
70};
71
72static const struct omap_dss_hwmod_data omap3_dss_hwmod_data[] __initdata = {
73 { "dss_core", "omapdss_dss", -1 },
74 { "dss_dispc", "omapdss_dispc", -1 },
75 { "dss_rfbi", "omapdss_rfbi", -1 },
76 { "dss_venc", "omapdss_venc", -1 },
77 { "dss_dsi1", "omapdss_dsi1", -1 },
78};
79
80static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initdata = {
81 { "dss_core", "omapdss_dss", -1 },
82 { "dss_dispc", "omapdss_dispc", -1 },
83 { "dss_rfbi", "omapdss_rfbi", -1 },
84 { "dss_venc", "omapdss_venc", -1 },
85 { "dss_dsi1", "omapdss_dsi1", -1 },
86 { "dss_dsi2", "omapdss_dsi2", -1 },
87 { "dss_hdmi", "omapdss_hdmi", -1 },
88};
89
59int __init omap_display_init(struct omap_dss_board_info *board_data) 90int __init omap_display_init(struct omap_dss_board_info *board_data)
60{ 91{
61 int r = 0; 92 int r = 0;
62 struct omap_hwmod *oh; 93 struct omap_hwmod *oh;
63 struct omap_device *od; 94 struct omap_device *od;
64 int i; 95 int i, oh_count;
65 struct omap_display_platform_data pdata; 96 struct omap_display_platform_data pdata;
66 97 const struct omap_dss_hwmod_data *curr_dss_hwmod;
67 /*
68 * omap: valid DSS hwmod names
69 * omap2,3,4: dss_core, dss_dispc, dss_rfbi, dss_venc
70 * omap3,4: dss_dsi1
71 * omap4: dss_dsi2, dss_hdmi
72 */
73 char *oh_name[] = { "dss_core", "dss_dispc", "dss_rfbi", "dss_venc",
74 "dss_dsi1", "dss_dsi2", "dss_hdmi" };
75 char *dev_name[] = { "omapdss_dss", "omapdss_dispc", "omapdss_rfbi",
76 "omapdss_venc", "omapdss_dsi1", "omapdss_dsi2",
77 "omapdss_hdmi" };
78 int oh_count;
79 98
80 memset(&pdata, 0, sizeof(pdata)); 99 memset(&pdata, 0, sizeof(pdata));
81 100
82 if (cpu_is_omap24xx()) 101 if (cpu_is_omap24xx()) {
83 oh_count = ARRAY_SIZE(oh_name) - 3; 102 curr_dss_hwmod = omap2_dss_hwmod_data;
84 /* last 3 hwmod dev in oh_name are not available for omap2 */ 103 oh_count = ARRAY_SIZE(omap2_dss_hwmod_data);
85 else if (cpu_is_omap44xx()) 104 } else if (cpu_is_omap34xx()) {
86 oh_count = ARRAY_SIZE(oh_name); 105 curr_dss_hwmod = omap3_dss_hwmod_data;
87 else 106 oh_count = ARRAY_SIZE(omap3_dss_hwmod_data);
88 oh_count = ARRAY_SIZE(oh_name) - 2; 107 } else {
89 /* last 2 hwmod dev in oh_name are not available for omap3 */ 108 curr_dss_hwmod = omap4_dss_hwmod_data;
109 oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
110 }
90 111
91 /* opt_clks are always associated with dss hwmod */ 112 /* opt_clks are always associated with dss hwmod */
92 oh_core = omap_hwmod_lookup("dss_core"); 113 oh_core = omap_hwmod_lookup("dss_core");
@@ -100,19 +121,21 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
100 pdata.opt_clock_available = opt_clock_available; 121 pdata.opt_clock_available = opt_clock_available;
101 122
102 for (i = 0; i < oh_count; i++) { 123 for (i = 0; i < oh_count; i++) {
103 oh = omap_hwmod_lookup(oh_name[i]); 124 oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name);
104 if (!oh) { 125 if (!oh) {
105 pr_err("Could not look up %s\n", oh_name[i]); 126 pr_err("Could not look up %s\n",
127 curr_dss_hwmod[i].oh_name);
106 return -ENODEV; 128 return -ENODEV;
107 } 129 }
108 130
109 od = omap_device_build(dev_name[i], -1, oh, &pdata, 131 od = omap_device_build(curr_dss_hwmod[i].dev_name,
132 curr_dss_hwmod[i].id, oh, &pdata,
110 sizeof(struct omap_display_platform_data), 133 sizeof(struct omap_display_platform_data),
111 omap_dss_latency, 134 omap_dss_latency,
112 ARRAY_SIZE(omap_dss_latency), 0); 135 ARRAY_SIZE(omap_dss_latency), 0);
113 136
114 if (WARN((IS_ERR(od)), "Could not build omap_device for %s\n", 137 if (WARN((IS_ERR(od)), "Could not build omap_device for %s\n",
115 oh_name[i])) 138 curr_dss_hwmod[i].oh_name))
116 return -ENODEV; 139 return -ENODEV;
117 } 140 }
118 omap_display_device.dev.platform_data = board_data; 141 omap_display_device.dev.platform_data = board_data;
diff --git a/arch/arm/mach-omap2/gpmc-smc91x.c b/arch/arm/mach-omap2/gpmc-smc91x.c
index 877c6f5807b7..ba10c24f3d8d 100644
--- a/arch/arm/mach-omap2/gpmc-smc91x.c
+++ b/arch/arm/mach-omap2/gpmc-smc91x.c
@@ -147,25 +147,24 @@ void __init gpmc_smc91x_init(struct omap_smc91x_platform_data *board_data)
147 goto free1; 147 goto free1;
148 } 148 }
149 149
150 if (gpio_request(gpmc_cfg->gpio_irq, "SMC91X irq") < 0) 150 if (gpio_request_one(gpmc_cfg->gpio_irq, GPIOF_IN, "SMC91X irq") < 0)
151 goto free1; 151 goto free1;
152 152
153 gpio_direction_input(gpmc_cfg->gpio_irq);
154 gpmc_smc91x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq); 153 gpmc_smc91x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq);
155 154
156 if (gpmc_cfg->gpio_pwrdwn) { 155 if (gpmc_cfg->gpio_pwrdwn) {
157 ret = gpio_request(gpmc_cfg->gpio_pwrdwn, "SMC91X powerdown"); 156 ret = gpio_request_one(gpmc_cfg->gpio_pwrdwn,
157 GPIOF_OUT_INIT_LOW, "SMC91X powerdown");
158 if (ret) 158 if (ret)
159 goto free2; 159 goto free2;
160 gpio_direction_output(gpmc_cfg->gpio_pwrdwn, 0);
161 } 160 }
162 161
163 if (gpmc_cfg->gpio_reset) { 162 if (gpmc_cfg->gpio_reset) {
164 ret = gpio_request(gpmc_cfg->gpio_reset, "SMC91X reset"); 163 ret = gpio_request_one(gpmc_cfg->gpio_reset,
164 GPIOF_OUT_INIT_LOW, "SMC91X reset");
165 if (ret) 165 if (ret)
166 goto free3; 166 goto free3;
167 167
168 gpio_direction_output(gpmc_cfg->gpio_reset, 0);
169 gpio_set_value(gpmc_cfg->gpio_reset, 1); 168 gpio_set_value(gpmc_cfg->gpio_reset, 1);
170 msleep(100); 169 msleep(100);
171 gpio_set_value(gpmc_cfg->gpio_reset, 0); 170 gpio_set_value(gpmc_cfg->gpio_reset, 0);
diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.c b/arch/arm/mach-omap2/gpmc-smsc911x.c
index 703f150dd01d..997033129d26 100644
--- a/arch/arm/mach-omap2/gpmc-smsc911x.c
+++ b/arch/arm/mach-omap2/gpmc-smsc911x.c
@@ -10,6 +10,7 @@
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13#define pr_fmt(fmt) "%s: " fmt, __func__
13 14
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
@@ -30,7 +31,7 @@ static struct resource gpmc_smsc911x_resources[] = {
30 .flags = IORESOURCE_MEM, 31 .flags = IORESOURCE_MEM,
31 }, 32 },
32 [1] = { 33 [1] = {
33 .flags = IORESOURCE_IRQ, 34 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
34 }, 35 },
35}; 36};
36 37
@@ -41,16 +42,6 @@ static struct smsc911x_platform_config gpmc_smsc911x_config = {
41 .flags = SMSC911X_USE_16BIT, 42 .flags = SMSC911X_USE_16BIT,
42}; 43};
43 44
44static struct platform_device gpmc_smsc911x_device = {
45 .name = "smsc911x",
46 .id = -1,
47 .num_resources = ARRAY_SIZE(gpmc_smsc911x_resources),
48 .resource = gpmc_smsc911x_resources,
49 .dev = {
50 .platform_data = &gpmc_smsc911x_config,
51 },
52};
53
54/* 45/*
55 * Initialize smsc911x device connected to the GPMC. Note that we 46 * Initialize smsc911x device connected to the GPMC. Note that we
56 * assume that pin multiplexing is done in the board-*.c file, 47 * assume that pin multiplexing is done in the board-*.c file,
@@ -58,46 +49,49 @@ static struct platform_device gpmc_smsc911x_device = {
58 */ 49 */
59void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data) 50void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data)
60{ 51{
52 struct platform_device *pdev;
61 unsigned long cs_mem_base; 53 unsigned long cs_mem_base;
62 int ret; 54 int ret;
63 55
64 gpmc_cfg = board_data; 56 gpmc_cfg = board_data;
65 57
66 if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) { 58 if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) {
67 printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n"); 59 pr_err("Failed to request GPMC mem region\n");
68 return; 60 return;
69 } 61 }
70 62
71 gpmc_smsc911x_resources[0].start = cs_mem_base + 0x0; 63 gpmc_smsc911x_resources[0].start = cs_mem_base + 0x0;
72 gpmc_smsc911x_resources[0].end = cs_mem_base + 0xff; 64 gpmc_smsc911x_resources[0].end = cs_mem_base + 0xff;
73 65
74 if (gpio_request(gpmc_cfg->gpio_irq, "smsc911x irq") < 0) { 66 if (gpio_request_one(gpmc_cfg->gpio_irq, GPIOF_IN, "smsc911x irq")) {
75 printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n", 67 pr_err("Failed to request IRQ GPIO%d\n", gpmc_cfg->gpio_irq);
76 gpmc_cfg->gpio_irq);
77 goto free1; 68 goto free1;
78 } 69 }
79 70
80 gpio_direction_input(gpmc_cfg->gpio_irq);
81 gpmc_smsc911x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq); 71 gpmc_smsc911x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq);
82 gpmc_smsc911x_resources[1].flags |=
83 (gpmc_cfg->flags & IRQF_TRIGGER_MASK);
84 72
85 if (gpio_is_valid(gpmc_cfg->gpio_reset)) { 73 if (gpio_is_valid(gpmc_cfg->gpio_reset)) {
86 ret = gpio_request(gpmc_cfg->gpio_reset, "smsc911x reset"); 74 ret = gpio_request_one(gpmc_cfg->gpio_reset,
75 GPIOF_OUT_INIT_HIGH, "smsc911x reset");
87 if (ret) { 76 if (ret) {
88 printk(KERN_ERR "Failed to request GPIO%d for smsc911x reset\n", 77 pr_err("Failed to request reset GPIO%d\n",
89 gpmc_cfg->gpio_reset); 78 gpmc_cfg->gpio_reset);
90 goto free2; 79 goto free2;
91 } 80 }
92 81
93 gpio_direction_output(gpmc_cfg->gpio_reset, 1);
94 gpio_set_value(gpmc_cfg->gpio_reset, 0); 82 gpio_set_value(gpmc_cfg->gpio_reset, 0);
95 msleep(100); 83 msleep(100);
96 gpio_set_value(gpmc_cfg->gpio_reset, 1); 84 gpio_set_value(gpmc_cfg->gpio_reset, 1);
97 } 85 }
98 86
99 if (platform_device_register(&gpmc_smsc911x_device) < 0) { 87 if (gpmc_cfg->flags)
100 printk(KERN_ERR "Unable to register smsc911x device\n"); 88 gpmc_smsc911x_config.flags = gpmc_cfg->flags;
89
90 pdev = platform_device_register_resndata(NULL, "smsc911x", gpmc_cfg->id,
91 gpmc_smsc911x_resources, ARRAY_SIZE(gpmc_smsc911x_resources),
92 &gpmc_smsc911x_config, sizeof(gpmc_smsc911x_config));
93 if (!pdev) {
94 pr_err("Unable to register platform device\n");
101 gpio_free(gpmc_cfg->gpio_reset); 95 gpio_free(gpmc_cfg->gpio_reset);
102 goto free2; 96 goto free2;
103 } 97 }
@@ -109,5 +103,5 @@ free2:
109free1: 103free1:
110 gpmc_cs_free(gpmc_cfg->cs); 104 gpmc_cs_free(gpmc_cfg->cs);
111 105
112 printk(KERN_ERR "Could not initialize smsc911x\n"); 106 pr_err("Could not initialize smsc911x device\n");
113} 107}
diff --git a/arch/arm/mach-omap2/include/mach/board-zoom.h b/arch/arm/mach-omap2/include/mach/board-zoom.h
index d20bd9c1a106..775fdc3b000b 100644
--- a/arch/arm/mach-omap2/include/mach/board-zoom.h
+++ b/arch/arm/mach-omap2/include/mach/board-zoom.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Defines for zoom boards 2 * Defines for zoom boards
3 */ 3 */
4#include <plat/display.h> 4#include <video/omapdss.h>
5 5
6#define ZOOM_NAND_CS 0 6#define ZOOM_NAND_CS 0
7 7
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c
index 82632c24076f..7b9f1909ddb2 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.c
+++ b/arch/arm/mach-omap2/omap_l3_noc.c
@@ -63,10 +63,7 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
63 char *source_name; 63 char *source_name;
64 64
65 /* Get the Type of interrupt */ 65 /* Get the Type of interrupt */
66 if (irq == l3->app_irq) 66 inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
67 inttype = L3_APPLICATION_ERROR;
68 else
69 inttype = L3_DEBUG_ERROR;
70 67
71 for (i = 0; i < L3_MODULES; i++) { 68 for (i = 0; i < L3_MODULES; i++) {
72 /* 69 /*
@@ -84,10 +81,10 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
84 81
85 err_src = j; 82 err_src = j;
86 /* Read the stderrlog_main_source from clk domain */ 83 /* Read the stderrlog_main_source from clk domain */
87 std_err_main_addr = base + (*(l3_targ[i] + err_src)); 84 std_err_main_addr = base + *(l3_targ[i] + err_src);
88 std_err_main = readl(std_err_main_addr); 85 std_err_main = readl(std_err_main_addr);
89 86
90 switch ((std_err_main & CUSTOM_ERROR)) { 87 switch (std_err_main & CUSTOM_ERROR) {
91 case STANDARD_ERROR: 88 case STANDARD_ERROR:
92 source_name = 89 source_name =
93 l3_targ_stderrlog_main_name[i][err_src]; 90 l3_targ_stderrlog_main_name[i][err_src];
@@ -132,49 +129,49 @@ static int __init omap4_l3_probe(struct platform_device *pdev)
132 129
133 l3 = kzalloc(sizeof(*l3), GFP_KERNEL); 130 l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
134 if (!l3) 131 if (!l3)
135 ret = -ENOMEM; 132 return -ENOMEM;
136 133
137 platform_set_drvdata(pdev, l3); 134 platform_set_drvdata(pdev, l3);
138 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 135 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
139 if (!res) { 136 if (!res) {
140 dev_err(&pdev->dev, "couldn't find resource 0\n"); 137 dev_err(&pdev->dev, "couldn't find resource 0\n");
141 ret = -ENODEV; 138 ret = -ENODEV;
142 goto err1; 139 goto err0;
143 } 140 }
144 141
145 l3->l3_base[0] = ioremap(res->start, resource_size(res)); 142 l3->l3_base[0] = ioremap(res->start, resource_size(res));
146 if (!(l3->l3_base[0])) { 143 if (!l3->l3_base[0]) {
147 dev_err(&pdev->dev, "ioremap failed\n"); 144 dev_err(&pdev->dev, "ioremap failed\n");
148 ret = -ENOMEM; 145 ret = -ENOMEM;
149 goto err2; 146 goto err0;
150 } 147 }
151 148
152 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 149 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
153 if (!res) { 150 if (!res) {
154 dev_err(&pdev->dev, "couldn't find resource 1\n"); 151 dev_err(&pdev->dev, "couldn't find resource 1\n");
155 ret = -ENODEV; 152 ret = -ENODEV;
156 goto err3; 153 goto err1;
157 } 154 }
158 155
159 l3->l3_base[1] = ioremap(res->start, resource_size(res)); 156 l3->l3_base[1] = ioremap(res->start, resource_size(res));
160 if (!(l3->l3_base[1])) { 157 if (!l3->l3_base[1]) {
161 dev_err(&pdev->dev, "ioremap failed\n"); 158 dev_err(&pdev->dev, "ioremap failed\n");
162 ret = -ENOMEM; 159 ret = -ENOMEM;
163 goto err4; 160 goto err1;
164 } 161 }
165 162
166 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 163 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
167 if (!res) { 164 if (!res) {
168 dev_err(&pdev->dev, "couldn't find resource 2\n"); 165 dev_err(&pdev->dev, "couldn't find resource 2\n");
169 ret = -ENODEV; 166 ret = -ENODEV;
170 goto err5; 167 goto err2;
171 } 168 }
172 169
173 l3->l3_base[2] = ioremap(res->start, resource_size(res)); 170 l3->l3_base[2] = ioremap(res->start, resource_size(res));
174 if (!(l3->l3_base[2])) { 171 if (!l3->l3_base[2]) {
175 dev_err(&pdev->dev, "ioremap failed\n"); 172 dev_err(&pdev->dev, "ioremap failed\n");
176 ret = -ENOMEM; 173 ret = -ENOMEM;
177 goto err6; 174 goto err2;
178 } 175 }
179 176
180 /* 177 /*
@@ -187,7 +184,7 @@ static int __init omap4_l3_probe(struct platform_device *pdev)
187 if (ret) { 184 if (ret) {
188 pr_crit("L3: request_irq failed to register for 0x%x\n", 185 pr_crit("L3: request_irq failed to register for 0x%x\n",
189 OMAP44XX_IRQ_L3_DBG); 186 OMAP44XX_IRQ_L3_DBG);
190 goto err7; 187 goto err3;
191 } 188 }
192 l3->debug_irq = irq; 189 l3->debug_irq = irq;
193 190
@@ -198,24 +195,22 @@ static int __init omap4_l3_probe(struct platform_device *pdev)
198 if (ret) { 195 if (ret) {
199 pr_crit("L3: request_irq failed to register for 0x%x\n", 196 pr_crit("L3: request_irq failed to register for 0x%x\n",
200 OMAP44XX_IRQ_L3_APP); 197 OMAP44XX_IRQ_L3_APP);
201 goto err8; 198 goto err4;
202 } 199 }
203 l3->app_irq = irq; 200 l3->app_irq = irq;
204 201
205 goto err0; 202 return 0;
206err8: 203
207err7:
208 iounmap(l3->l3_base[2]);
209err6:
210err5:
211 iounmap(l3->l3_base[1]);
212err4: 204err4:
205 free_irq(l3->debug_irq, l3);
213err3: 206err3:
214 iounmap(l3->l3_base[0]); 207 iounmap(l3->l3_base[2]);
215err2: 208err2:
209 iounmap(l3->l3_base[1]);
216err1: 210err1:
217 kfree(l3); 211 iounmap(l3->l3_base[0]);
218err0: 212err0:
213 kfree(l3);
219 return ret; 214 return ret;
220} 215}
221 216
diff --git a/arch/arm/mach-omap2/omap_l3_smx.c b/arch/arm/mach-omap2/omap_l3_smx.c
index 4321e7938929..873c0e33b512 100644
--- a/arch/arm/mach-omap2/omap_l3_smx.c
+++ b/arch/arm/mach-omap2/omap_l3_smx.c
@@ -155,7 +155,7 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
155 u8 multi = error & L3_ERROR_LOG_MULTI; 155 u8 multi = error & L3_ERROR_LOG_MULTI;
156 u32 address = omap3_l3_decode_addr(error_addr); 156 u32 address = omap3_l3_decode_addr(error_addr);
157 157
158 WARN(true, "%s Error seen by %s %s at address %x\n", 158 WARN(true, "%s seen by %s %s at address %x\n",
159 omap3_l3_code_string(code), 159 omap3_l3_code_string(code),
160 omap3_l3_initiator_string(initid), 160 omap3_l3_initiator_string(initid),
161 multi ? "Multiple Errors" : "", 161 multi ? "Multiple Errors" : "",
@@ -167,21 +167,15 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
167static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) 167static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
168{ 168{
169 struct omap3_l3 *l3 = _l3; 169 struct omap3_l3 *l3 = _l3;
170
171 u64 status, clear; 170 u64 status, clear;
172 u64 error; 171 u64 error;
173 u64 error_addr; 172 u64 error_addr;
174 u64 err_source = 0; 173 u64 err_source = 0;
175 void __iomem *base; 174 void __iomem *base;
176 int int_type; 175 int int_type;
177
178 irqreturn_t ret = IRQ_NONE; 176 irqreturn_t ret = IRQ_NONE;
179 177
180 if (irq == l3->app_irq) 178 int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
181 int_type = L3_APPLICATION_ERROR;
182 else
183 int_type = L3_DEBUG_ERROR;
184
185 if (!int_type) { 179 if (!int_type) {
186 status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0); 180 status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0);
187 /* 181 /*
@@ -202,7 +196,6 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
202 196
203 base = l3->rt + *(omap3_l3_bases[int_type] + err_source); 197 base = l3->rt + *(omap3_l3_bases[int_type] + err_source);
204 error = omap3_l3_readll(base, L3_ERROR_LOG); 198 error = omap3_l3_readll(base, L3_ERROR_LOG);
205
206 if (error) { 199 if (error) {
207 error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR); 200 error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR);
208 201
@@ -210,9 +203,8 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
210 } 203 }
211 204
212 /* Clear the status register */ 205 /* Clear the status register */
213 clear = ((L3_AGENT_STATUS_CLEAR_IA << int_type) | 206 clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) |
214 (L3_AGENT_STATUS_CLEAR_TA)); 207 L3_AGENT_STATUS_CLEAR_TA;
215
216 omap3_l3_writell(base, L3_AGENT_STATUS, clear); 208 omap3_l3_writell(base, L3_AGENT_STATUS, clear);
217 209
218 /* clear the error log register */ 210 /* clear the error log register */
@@ -228,10 +220,8 @@ static int __init omap3_l3_probe(struct platform_device *pdev)
228 int ret; 220 int ret;
229 221
230 l3 = kzalloc(sizeof(*l3), GFP_KERNEL); 222 l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
231 if (!l3) { 223 if (!l3)
232 ret = -ENOMEM; 224 return -ENOMEM;
233 goto err0;
234 }
235 225
236 platform_set_drvdata(pdev, l3); 226 platform_set_drvdata(pdev, l3);
237 227
@@ -239,13 +229,13 @@ static int __init omap3_l3_probe(struct platform_device *pdev)
239 if (!res) { 229 if (!res) {
240 dev_err(&pdev->dev, "couldn't find resource\n"); 230 dev_err(&pdev->dev, "couldn't find resource\n");
241 ret = -ENODEV; 231 ret = -ENODEV;
242 goto err1; 232 goto err0;
243 } 233 }
244 l3->rt = ioremap(res->start, resource_size(res)); 234 l3->rt = ioremap(res->start, resource_size(res));
245 if (!(l3->rt)) { 235 if (!l3->rt) {
246 dev_err(&pdev->dev, "ioremap failed\n"); 236 dev_err(&pdev->dev, "ioremap failed\n");
247 ret = -ENOMEM; 237 ret = -ENOMEM;
248 goto err2; 238 goto err0;
249 } 239 }
250 240
251 l3->debug_irq = platform_get_irq(pdev, 0); 241 l3->debug_irq = platform_get_irq(pdev, 0);
@@ -254,28 +244,26 @@ static int __init omap3_l3_probe(struct platform_device *pdev)
254 "l3-debug-irq", l3); 244 "l3-debug-irq", l3);
255 if (ret) { 245 if (ret) {
256 dev_err(&pdev->dev, "couldn't request debug irq\n"); 246 dev_err(&pdev->dev, "couldn't request debug irq\n");
257 goto err3; 247 goto err1;
258 } 248 }
259 249
260 l3->app_irq = platform_get_irq(pdev, 1); 250 l3->app_irq = platform_get_irq(pdev, 1);
261 ret = request_irq(l3->app_irq, omap3_l3_app_irq, 251 ret = request_irq(l3->app_irq, omap3_l3_app_irq,
262 IRQF_DISABLED | IRQF_TRIGGER_RISING, 252 IRQF_DISABLED | IRQF_TRIGGER_RISING,
263 "l3-app-irq", l3); 253 "l3-app-irq", l3);
264
265 if (ret) { 254 if (ret) {
266 dev_err(&pdev->dev, "couldn't request app irq\n"); 255 dev_err(&pdev->dev, "couldn't request app irq\n");
267 goto err4; 256 goto err2;
268 } 257 }
269 258
270 goto err0; 259 return 0;
271 260
272err4:
273err3:
274 iounmap(l3->rt);
275err2: 261err2:
262 free_irq(l3->debug_irq, l3);
276err1: 263err1:
277 kfree(l3); 264 iounmap(l3->rt);
278err0: 265err0:
266 kfree(l3);
279 return ret; 267 return ret;
280} 268}
281 269
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 05f6abc96b0d..f47813edd951 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -50,13 +50,16 @@ int omap4430_phy_init(struct device *dev)
50{ 50{
51 ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K); 51 ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K);
52 if (!ctrl_base) { 52 if (!ctrl_base) {
53 dev_err(dev, "control module ioremap failed\n"); 53 pr_err("control module ioremap failed\n");
54 return -ENOMEM; 54 return -ENOMEM;
55 } 55 }
56 /* Power down the phy */ 56 /* Power down the phy */
57 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); 57 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
58 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
59 58
59 if (!dev)
60 return 0;
61
62 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
60 if (IS_ERR(phyclk)) { 63 if (IS_ERR(phyclk)) {
61 dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n"); 64 dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n");
62 iounmap(ctrl_base); 65 iounmap(ctrl_base);
@@ -228,7 +231,7 @@ void am35x_musb_clear_irq(void)
228 regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); 231 regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
229} 232}
230 233
231void am35x_musb_set_mode(u8 musb_mode) 234void am35x_set_mode(u8 musb_mode)
232{ 235{
233 u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); 236 u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
234 237
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 797bfd12b643..45bcfce77352 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -36,11 +36,16 @@ static inline int omap4_opp_init(void)
36} 36}
37#endif 37#endif
38 38
39/*
40 * cpuidle mach specific parameters
41 *
42 * The board code can override the default C-states definition using
43 * omap3_pm_init_cpuidle
44 */
39struct cpuidle_params { 45struct cpuidle_params {
40 u8 valid; 46 u32 exit_latency; /* exit_latency = sleep + wake-up latencies */
41 u32 sleep_latency; 47 u32 target_residency;
42 u32 wake_latency; 48 u8 valid; /* validates the C-state */
43 u32 threshold;
44}; 49};
45 50
46#if defined(CONFIG_PM) && defined(CONFIG_CPU_IDLE) 51#if defined(CONFIG_PM) && defined(CONFIG_CPU_IDLE)
@@ -73,10 +78,6 @@ extern u32 sleep_while_idle;
73#define sleep_while_idle 0 78#define sleep_while_idle 0
74#endif 79#endif
75 80
76#if defined(CONFIG_CPU_IDLE)
77extern void omap3_cpuidle_update_states(u32, u32);
78#endif
79
80#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) 81#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
81extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev); 82extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
82extern int pm_dbg_regset_save(int reg_set); 83extern int pm_dbg_regset_save(int reg_set);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 0c5e3a46a3ad..c155c9d1c82c 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -779,18 +779,6 @@ void omap3_pm_off_mode_enable(int enable)
779 else 779 else
780 state = PWRDM_POWER_RET; 780 state = PWRDM_POWER_RET;
781 781
782#ifdef CONFIG_CPU_IDLE
783 /*
784 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
785 * enable OFF mode in a stable form for previous revisions, restrict
786 * instead to RET
787 */
788 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
789 omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
790 else
791 omap3_cpuidle_update_states(state, state);
792#endif
793
794 list_for_each_entry(pwrst, &pwrst_list, node) { 782 list_for_each_entry(pwrst, &pwrst_list, node) {
795 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) && 783 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
796 pwrst->pwrdm == core_pwrdm && 784 pwrst->pwrdm == core_pwrdm &&
@@ -895,8 +883,6 @@ static int __init omap3_pm_init(void)
895 883
896 pm_errata_configure(); 884 pm_errata_configure();
897 885
898 printk(KERN_ERR "Power Management for TI OMAP3.\n");
899
900 /* XXX prcm_setup_regs needs to be before enabling hw 886 /* XXX prcm_setup_regs needs to be before enabling hw
901 * supervised mode for powerdomains */ 887 * supervised mode for powerdomains */
902 prcm_setup_regs(); 888 prcm_setup_regs();
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 76cfff2db514..59a870be8390 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -105,13 +105,11 @@ static int __init omap4_pm_init(void)
105 105
106 pr_err("Power Management for TI OMAP4.\n"); 106 pr_err("Power Management for TI OMAP4.\n");
107 107
108#ifdef CONFIG_PM
109 ret = pwrdm_for_each(pwrdms_setup, NULL); 108 ret = pwrdm_for_each(pwrdms_setup, NULL);
110 if (ret) { 109 if (ret) {
111 pr_err("Failed to setup powerdomains\n"); 110 pr_err("Failed to setup powerdomains\n");
112 goto err2; 111 goto err2;
113 } 112 }
114#endif
115 113
116#ifdef CONFIG_SUSPEND 114#ifdef CONFIG_SUSPEND
117 suspend_set_ops(&omap_pm_ops); 115 suspend_set_ops(&omap_pm_ops);
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 13e24f913dd4..fb7dc52394a8 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -847,6 +847,14 @@ static int __init omap_sr_probe(struct platform_device *pdev)
847 goto err_free_devinfo; 847 goto err_free_devinfo;
848 } 848 }
849 849
850 mem = request_mem_region(mem->start, resource_size(mem),
851 dev_name(&pdev->dev));
852 if (!mem) {
853 dev_err(&pdev->dev, "%s: no mem region\n", __func__);
854 ret = -EBUSY;
855 goto err_free_devinfo;
856 }
857
850 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 858 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
851 859
852 pm_runtime_enable(&pdev->dev); 860 pm_runtime_enable(&pdev->dev);
@@ -883,7 +891,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
883 ret = sr_late_init(sr_info); 891 ret = sr_late_init(sr_info);
884 if (ret) { 892 if (ret) {
885 pr_warning("%s: Error in SR late init\n", __func__); 893 pr_warning("%s: Error in SR late init\n", __func__);
886 goto err_release_region; 894 return ret;
887 } 895 }
888 } 896 }
889 897
@@ -896,7 +904,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
896 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); 904 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
897 if (!vdd_dbg_dir) { 905 if (!vdd_dbg_dir) {
898 ret = -EINVAL; 906 ret = -EINVAL;
899 goto err_release_region; 907 goto err_iounmap;
900 } 908 }
901 909
902 sr_info->dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); 910 sr_info->dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
@@ -904,7 +912,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
904 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", 912 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
905 __func__); 913 __func__);
906 ret = PTR_ERR(sr_info->dbg_dir); 914 ret = PTR_ERR(sr_info->dbg_dir);
907 goto err_release_region; 915 goto err_iounmap;
908 } 916 }
909 917
910 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, 918 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR,
@@ -921,7 +929,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
921 dev_err(&pdev->dev, "%s: Unable to create debugfs directory" 929 dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
922 "for n-values\n", __func__); 930 "for n-values\n", __func__);
923 ret = PTR_ERR(nvalue_dir); 931 ret = PTR_ERR(nvalue_dir);
924 goto err_release_region; 932 goto err_debugfs;
925 } 933 }
926 934
927 omap_voltage_get_volttable(sr_info->voltdm, &volt_data); 935 omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@@ -931,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
931 "entries for n-values\n", 939 "entries for n-values\n",
932 __func__, sr_info->voltdm->name); 940 __func__, sr_info->voltdm->name);
933 ret = -ENODATA; 941 ret = -ENODATA;
934 goto err_release_region; 942 goto err_debugfs;
935 } 943 }
936 944
937 for (i = 0; i < sr_info->nvalue_count; i++) { 945 for (i = 0; i < sr_info->nvalue_count; i++) {
@@ -945,6 +953,11 @@ static int __init omap_sr_probe(struct platform_device *pdev)
945 953
946 return ret; 954 return ret;
947 955
956err_debugfs:
957 debugfs_remove_recursive(sr_info->dbg_dir);
958err_iounmap:
959 list_del(&sr_info->node);
960 iounmap(sr_info->base);
948err_release_region: 961err_release_region:
949 release_mem_region(mem->start, resource_size(mem)); 962 release_mem_region(mem->start, resource_size(mem));
950err_free_devinfo: 963err_free_devinfo:
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 35559f77e2de..c7ed540d868d 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -108,7 +108,13 @@ static void usb_musb_mux_init(struct omap_musb_board_data *board_data)
108 } 108 }
109} 109}
110 110
111void __init usb_musb_init(struct omap_musb_board_data *board_data) 111static struct omap_musb_board_data musb_default_board_data = {
112 .interface_type = MUSB_INTERFACE_ULPI,
113 .mode = MUSB_OTG,
114 .power = 100,
115};
116
117void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
112{ 118{
113 struct omap_hwmod *oh; 119 struct omap_hwmod *oh;
114 struct omap_device *od; 120 struct omap_device *od;
@@ -116,11 +122,12 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
116 struct device *dev; 122 struct device *dev;
117 int bus_id = -1; 123 int bus_id = -1;
118 const char *oh_name, *name; 124 const char *oh_name, *name;
125 struct omap_musb_board_data *board_data;
119 126
120 if (cpu_is_omap3517() || cpu_is_omap3505()) { 127 if (musb_board_data)
121 } else if (cpu_is_omap44xx()) { 128 board_data = musb_board_data;
122 usb_musb_mux_init(board_data); 129 else
123 } 130 board_data = &musb_default_board_data;
124 131
125 /* 132 /*
126 * REVISIT: This line can be removed once all the platforms using 133 * REVISIT: This line can be removed once all the platforms using
@@ -164,10 +171,15 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
164 dev->dma_mask = &musb_dmamask; 171 dev->dma_mask = &musb_dmamask;
165 dev->coherent_dma_mask = musb_dmamask; 172 dev->coherent_dma_mask = musb_dmamask;
166 put_device(dev); 173 put_device(dev);
174
175 if (cpu_is_omap44xx())
176 omap4430_phy_init(dev);
167} 177}
168 178
169#else 179#else
170void __init usb_musb_init(struct omap_musb_board_data *board_data) 180void __init usb_musb_init(struct omap_musb_board_data *board_data)
171{ 181{
182 if (cpu_is_omap44xx())
183 omap4430_phy_init(NULL);
172} 184}
173#endif /* CONFIG_USB_MUSB_SOC */ 185#endif /* CONFIG_USB_MUSB_SOC */
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index 8a3c05f3c1d6..8dd26b765b7d 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -293,12 +293,11 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
293 ); 293 );
294 294
295 /* IRQ */ 295 /* IRQ */
296 status = gpio_request(irq, "TUSB6010 irq"); 296 status = gpio_request_one(irq, GPIOF_IN, "TUSB6010 irq");
297 if (status < 0) { 297 if (status < 0) {
298 printk(error, 3, status); 298 printk(error, 3, status);
299 return status; 299 return status;
300 } 300 }
301 gpio_direction_input(irq);
302 tusb_resources[2].start = irq + IH_GPIO_BASE; 301 tusb_resources[2].start = irq + IH_GPIO_BASE;
303 302
304 /* set up memory timings ... can speed them up later */ 303 /* set up memory timings ... can speed them up later */
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index 0c1552d9d995..9ef3789ded4b 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -148,7 +148,6 @@ static int vp_volt_debug_get(void *data, u64 *val)
148 } 148 }
149 149
150 vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage); 150 vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage);
151 pr_notice("curr_vsel = %x\n", vsel);
152 151
153 if (!vdd->pmic_info->vsel_to_uv) { 152 if (!vdd->pmic_info->vsel_to_uv) {
154 pr_warning("PMIC function to convert vsel to voltage" 153 pr_warning("PMIC function to convert vsel to voltage"
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index e2507f66f9d5..612b27000c3e 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -30,6 +30,11 @@ obj-$(CONFIG_ARCH_SH7377) += entry-intc.o
30obj-$(CONFIG_ARCH_SH7372) += entry-intc.o 30obj-$(CONFIG_ARCH_SH7372) += entry-intc.o
31obj-$(CONFIG_ARCH_SH73A0) += entry-gic.o 31obj-$(CONFIG_ARCH_SH73A0) += entry-gic.o
32 32
33# PM objects
34obj-$(CONFIG_SUSPEND) += suspend.o
35obj-$(CONFIG_CPU_IDLE) += cpuidle.o
36obj-$(CONFIG_ARCH_SH7372) += pm-sh7372.o sleep-sh7372.o
37
33# Board objects 38# Board objects
34obj-$(CONFIG_MACH_G3EVM) += board-g3evm.o 39obj-$(CONFIG_MACH_G3EVM) += board-g3evm.o
35obj-$(CONFIG_MACH_G4EVM) += board-g4evm.o 40obj-$(CONFIG_MACH_G4EVM) += board-g4evm.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 3e6f0aab460b..c95258c274c1 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -34,6 +34,8 @@
34#include <linux/input/sh_keysc.h> 34#include <linux/input/sh_keysc.h>
35#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
36#include <linux/mmc/sh_mmcif.h> 36#include <linux/mmc/sh_mmcif.h>
37#include <linux/mmc/sh_mobile_sdhi.h>
38#include <linux/mfd/tmio.h>
37#include <linux/sh_clk.h> 39#include <linux/sh_clk.h>
38#include <video/sh_mobile_lcdc.h> 40#include <video/sh_mobile_lcdc.h>
39#include <video/sh_mipi_dsi.h> 41#include <video/sh_mipi_dsi.h>
@@ -156,10 +158,19 @@ static struct resource sh_mmcif_resources[] = {
156 }, 158 },
157}; 159};
158 160
161static struct sh_mmcif_dma sh_mmcif_dma = {
162 .chan_priv_rx = {
163 .slave_id = SHDMA_SLAVE_MMCIF_RX,
164 },
165 .chan_priv_tx = {
166 .slave_id = SHDMA_SLAVE_MMCIF_TX,
167 },
168};
159static struct sh_mmcif_plat_data sh_mmcif_platdata = { 169static struct sh_mmcif_plat_data sh_mmcif_platdata = {
160 .sup_pclk = 0, 170 .sup_pclk = 0,
161 .ocr = MMC_VDD_165_195, 171 .ocr = MMC_VDD_165_195,
162 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, 172 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
173 .dma = &sh_mmcif_dma,
163}; 174};
164 175
165static struct platform_device mmc_device = { 176static struct platform_device mmc_device = {
@@ -296,11 +307,13 @@ static struct platform_device lcdc0_device = {
296/* MIPI-DSI */ 307/* MIPI-DSI */
297static struct resource mipidsi0_resources[] = { 308static struct resource mipidsi0_resources[] = {
298 [0] = { 309 [0] = {
310 .name = "DSI0",
299 .start = 0xfeab0000, 311 .start = 0xfeab0000,
300 .end = 0xfeab3fff, 312 .end = 0xfeab3fff,
301 .flags = IORESOURCE_MEM, 313 .flags = IORESOURCE_MEM,
302 }, 314 },
303 [1] = { 315 [1] = {
316 .name = "DSI0",
304 .start = 0xfeab4000, 317 .start = 0xfeab4000,
305 .end = 0xfeab7fff, 318 .end = 0xfeab7fff,
306 .flags = IORESOURCE_MEM, 319 .flags = IORESOURCE_MEM,
@@ -325,6 +338,89 @@ static struct platform_device mipidsi0_device = {
325 }, 338 },
326}; 339};
327 340
341static struct sh_mobile_sdhi_info sdhi0_info = {
342 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
343 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
344 .tmio_caps = MMC_CAP_SD_HIGHSPEED,
345 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
346};
347
348static struct resource sdhi0_resources[] = {
349 [0] = {
350 .name = "SDHI0",
351 .start = 0xee100000,
352 .end = 0xee1000ff,
353 .flags = IORESOURCE_MEM,
354 },
355 [1] = {
356 .start = gic_spi(83),
357 .flags = IORESOURCE_IRQ,
358 },
359 [2] = {
360 .start = gic_spi(84),
361 .flags = IORESOURCE_IRQ,
362 },
363 [3] = {
364 .start = gic_spi(85),
365 .flags = IORESOURCE_IRQ,
366 },
367};
368
369static struct platform_device sdhi0_device = {
370 .name = "sh_mobile_sdhi",
371 .id = 0,
372 .num_resources = ARRAY_SIZE(sdhi0_resources),
373 .resource = sdhi0_resources,
374 .dev = {
375 .platform_data = &sdhi0_info,
376 },
377};
378
379void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
380{
381 gpio_set_value(GPIO_PORT114, state);
382}
383
384static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
385 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
386 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
387 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
388 .tmio_caps = MMC_CAP_NONREMOVABLE,
389 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
390 .set_pwr = ag5evm_sdhi1_set_pwr,
391};
392
393static struct resource sdhi1_resources[] = {
394 [0] = {
395 .name = "SDHI1",
396 .start = 0xee120000,
397 .end = 0xee1200ff,
398 .flags = IORESOURCE_MEM,
399 },
400 [1] = {
401 .start = gic_spi(87),
402 .flags = IORESOURCE_IRQ,
403 },
404 [2] = {
405 .start = gic_spi(88),
406 .flags = IORESOURCE_IRQ,
407 },
408 [3] = {
409 .start = gic_spi(89),
410 .flags = IORESOURCE_IRQ,
411 },
412};
413
414static struct platform_device sdhi1_device = {
415 .name = "sh_mobile_sdhi",
416 .id = 1,
417 .dev = {
418 .platform_data = &sh_sdhi1_platdata,
419 },
420 .num_resources = ARRAY_SIZE(sdhi1_resources),
421 .resource = sdhi1_resources,
422};
423
328static struct platform_device *ag5evm_devices[] __initdata = { 424static struct platform_device *ag5evm_devices[] __initdata = {
329 &eth_device, 425 &eth_device,
330 &keysc_device, 426 &keysc_device,
@@ -333,6 +429,8 @@ static struct platform_device *ag5evm_devices[] __initdata = {
333 &irda_device, 429 &irda_device,
334 &lcdc0_device, 430 &lcdc0_device,
335 &mipidsi0_device, 431 &mipidsi0_device,
432 &sdhi0_device,
433 &sdhi1_device,
336}; 434};
337 435
338static struct map_desc ag5evm_io_desc[] __initdata = { 436static struct map_desc ag5evm_io_desc[] __initdata = {
@@ -454,6 +552,26 @@ static void __init ag5evm_init(void)
454 /* MIPI-DSI clock setup */ 552 /* MIPI-DSI clock setup */
455 __raw_writel(0x2a809010, DSI0PHYCR); 553 __raw_writel(0x2a809010, DSI0PHYCR);
456 554
555 /* enable SDHI0 on CN15 [SD I/F] */
556 gpio_request(GPIO_FN_SDHICD0, NULL);
557 gpio_request(GPIO_FN_SDHIWP0, NULL);
558 gpio_request(GPIO_FN_SDHICMD0, NULL);
559 gpio_request(GPIO_FN_SDHICLK0, NULL);
560 gpio_request(GPIO_FN_SDHID0_3, NULL);
561 gpio_request(GPIO_FN_SDHID0_2, NULL);
562 gpio_request(GPIO_FN_SDHID0_1, NULL);
563 gpio_request(GPIO_FN_SDHID0_0, NULL);
564
565 /* enable SDHI1 on CN4 [WLAN I/F] */
566 gpio_request(GPIO_FN_SDHICLK1, NULL);
567 gpio_request(GPIO_FN_SDHICMD1_PU, NULL);
568 gpio_request(GPIO_FN_SDHID1_3_PU, NULL);
569 gpio_request(GPIO_FN_SDHID1_2_PU, NULL);
570 gpio_request(GPIO_FN_SDHID1_1_PU, NULL);
571 gpio_request(GPIO_FN_SDHID1_0_PU, NULL);
572 gpio_request(GPIO_PORT114, "sdhi1_power");
573 gpio_direction_output(GPIO_PORT114, 0);
574
457#ifdef CONFIG_CACHE_L2X0 575#ifdef CONFIG_CACHE_L2X0
458 /* Shared attribute override enable, 64K*8way */ 576 /* Shared attribute override enable, 64K*8way */
459 l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff); 577 l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff);
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 1e35fa976d64..08acb6ec8139 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -316,8 +316,16 @@ static struct resource sdhi0_resources[] = {
316 .flags = IORESOURCE_MEM, 316 .flags = IORESOURCE_MEM,
317 }, 317 },
318 [1] = { 318 [1] = {
319 .start = evt2irq(0x0e00) /* SDHI0 */, 319 .start = evt2irq(0x0e00) /* SDHI0_SDHI0I0 */,
320 .flags = IORESOURCE_IRQ, 320 .flags = IORESOURCE_IRQ,
321 },
322 [2] = {
323 .start = evt2irq(0x0e20) /* SDHI0_SDHI0I1 */,
324 .flags = IORESOURCE_IRQ,
325 },
326 [3] = {
327 .start = evt2irq(0x0e40) /* SDHI0_SDHI0I2 */,
328 .flags = IORESOURCE_IRQ,
321 }, 329 },
322}; 330};
323 331
@@ -349,8 +357,16 @@ static struct resource sdhi1_resources[] = {
349 .flags = IORESOURCE_MEM, 357 .flags = IORESOURCE_MEM,
350 }, 358 },
351 [1] = { 359 [1] = {
352 .start = evt2irq(0x0e80), 360 .start = evt2irq(0x0e80), /* SDHI1_SDHI1I0 */
353 .flags = IORESOURCE_IRQ, 361 .flags = IORESOURCE_IRQ,
362 },
363 [2] = {
364 .start = evt2irq(0x0ea0), /* SDHI1_SDHI1I1 */
365 .flags = IORESOURCE_IRQ,
366 },
367 [3] = {
368 .start = evt2irq(0x0ec0), /* SDHI1_SDHI1I2 */
369 .flags = IORESOURCE_IRQ,
354 }, 370 },
355}; 371};
356 372
@@ -980,11 +996,6 @@ static void __init hdmi_init_pm_clock(void)
980 goto out; 996 goto out;
981 } 997 }
982 998
983 ret = clk_enable(&sh7372_pllc2_clk);
984 if (ret < 0) {
985 pr_err("Cannot enable pllc2 clock\n");
986 goto out;
987 }
988 pr_debug("PLLC2 set frequency %lu\n", rate); 999 pr_debug("PLLC2 set frequency %lu\n", rate);
989 1000
990 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk); 1001 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
@@ -1343,6 +1354,7 @@ static void __init ap4evb_init(void)
1343 1354
1344 hdmi_init_pm_clock(); 1355 hdmi_init_pm_clock();
1345 fsi_init_pm_clock(); 1356 fsi_init_pm_clock();
1357 sh7372_pm_init();
1346} 1358}
1347 1359
1348static void __init ap4evb_timer_init(void) 1360static void __init ap4evb_timer_init(void)
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index c87a7b7c5832..8e3c5559f27f 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -205,7 +205,7 @@ static struct resource sdhi0_resources[] = {
205 [0] = { 205 [0] = {
206 .name = "SDHI0", 206 .name = "SDHI0",
207 .start = 0xe6d50000, 207 .start = 0xe6d50000,
208 .end = 0xe6d50nff, 208 .end = 0xe6d500ff,
209 .flags = IORESOURCE_MEM, 209 .flags = IORESOURCE_MEM,
210 }, 210 },
211 [1] = { 211 [1] = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 7da2ca24229d..448ddbe43335 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -43,6 +43,7 @@
43#include <linux/sh_intc.h> 43#include <linux/sh_intc.h>
44#include <linux/tca6416_keypad.h> 44#include <linux/tca6416_keypad.h>
45#include <linux/usb/r8a66597.h> 45#include <linux/usb/r8a66597.h>
46#include <linux/usb/renesas_usbhs.h>
46 47
47#include <video/sh_mobile_hdmi.h> 48#include <video/sh_mobile_hdmi.h>
48#include <video/sh_mobile_lcdc.h> 49#include <video/sh_mobile_lcdc.h>
@@ -143,7 +144,30 @@
143 * open | external VBUS | Function 144 * open | external VBUS | Function
144 * 145 *
145 * *1 146 * *1
146 * CN31 is used as Host in Linux. 147 * CN31 is used as
148 * CONFIG_USB_R8A66597_HCD Host
149 * CONFIG_USB_RENESAS_USBHS Function
150 *
151 * CAUTION
152 *
153 * renesas_usbhs driver can use external interrupt mode
154 * (which come from USB-PHY) or autonomy mode (it use own interrupt)
155 * for detecting connection/disconnection when Function.
156 * USB will be power OFF while it has been disconnecting
157 * if external interrupt mode, and it is always power ON if autonomy mode,
158 *
159 * mackerel can not use external interrupt (IRQ7-PORT167) mode on "USB0",
160 * because Touchscreen is using IRQ7-PORT40.
161 * It is impossible to use IRQ7 demux on this board.
162 *
163 * We can use external interrupt mode USB-Function on "USB1".
164 * USB1 can become Host by r8a66597, and become Function by renesas_usbhs.
165 * But don't select both drivers in same time.
166 * These uses same IRQ number for request_irq(), and aren't supporting
167 * IRQF_SHARD / IORESOURCE_IRQ_SHAREABLE.
168 *
169 * Actually these are old/new version of USB driver.
170 * This mean its register will be broken if it supports SHARD IRQ,
147 */ 171 */
148 172
149/* 173/*
@@ -185,6 +209,7 @@
185 * FIXME !! 209 * FIXME !!
186 * 210 *
187 * gpio_no_direction 211 * gpio_no_direction
212 * gpio_pull_down
188 * are quick_hack. 213 * are quick_hack.
189 * 214 *
190 * current gpio frame work doesn't have 215 * current gpio frame work doesn't have
@@ -196,6 +221,16 @@ static void __init gpio_no_direction(u32 addr)
196 __raw_writeb(0x00, addr); 221 __raw_writeb(0x00, addr);
197} 222}
198 223
224static void __init gpio_pull_down(u32 addr)
225{
226 u8 data = __raw_readb(addr);
227
228 data &= 0x0F;
229 data |= 0xA0;
230
231 __raw_writeb(data, addr);
232}
233
199/* MTD */ 234/* MTD */
200static struct mtd_partition nor_flash_partitions[] = { 235static struct mtd_partition nor_flash_partitions[] = {
201 { 236 {
@@ -458,12 +493,6 @@ static void __init hdmi_init_pm_clock(void)
458 goto out; 493 goto out;
459 } 494 }
460 495
461 ret = clk_enable(&sh7372_pllc2_clk);
462 if (ret < 0) {
463 pr_err("Cannot enable pllc2 clock\n");
464 goto out;
465 }
466
467 pr_debug("PLLC2 set frequency %lu\n", rate); 496 pr_debug("PLLC2 set frequency %lu\n", rate);
468 497
469 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk); 498 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
@@ -515,6 +544,157 @@ static struct platform_device usb1_host_device = {
515 .resource = usb1_host_resources, 544 .resource = usb1_host_resources,
516}; 545};
517 546
547/* USB1 (Function) */
548#define USB_PHY_MODE (1 << 4)
549#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
550#define USB_PHY_ON (1 << 1)
551#define USB_PHY_OFF (1 << 0)
552#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
553
554struct usbhs_private {
555 unsigned int irq;
556 unsigned int usbphyaddr;
557 unsigned int usbcrcaddr;
558 struct renesas_usbhs_platform_info info;
559};
560
561#define usbhs_get_priv(pdev) \
562 container_of(renesas_usbhs_get_info(pdev), \
563 struct usbhs_private, info)
564
565#define usbhs_is_connected(priv) \
566 (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
567
568static int usbhs1_get_id(struct platform_device *pdev)
569{
570 return USBHS_GADGET;
571}
572
573static int usbhs1_get_vbus(struct platform_device *pdev)
574{
575 return usbhs_is_connected(usbhs_get_priv(pdev));
576}
577
578static irqreturn_t usbhs1_interrupt(int irq, void *data)
579{
580 struct platform_device *pdev = data;
581 struct usbhs_private *priv = usbhs_get_priv(pdev);
582
583 dev_dbg(&pdev->dev, "%s\n", __func__);
584
585 renesas_usbhs_call_notify_hotplug(pdev);
586
587 /* clear status */
588 __raw_writew(__raw_readw(priv->usbphyaddr) | USB_PHY_INT_CLR,
589 priv->usbphyaddr);
590
591 return IRQ_HANDLED;
592}
593
594static int usbhs1_hardware_init(struct platform_device *pdev)
595{
596 struct usbhs_private *priv = usbhs_get_priv(pdev);
597 int ret;
598
599 irq_set_irq_type(priv->irq, IRQ_TYPE_LEVEL_HIGH);
600
601 /* clear interrupt status */
602 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
603
604 ret = request_irq(priv->irq, usbhs1_interrupt, 0,
605 dev_name(&pdev->dev), pdev);
606 if (ret) {
607 dev_err(&pdev->dev, "request_irq err\n");
608 return ret;
609 }
610
611 /* enable USB phy interrupt */
612 __raw_writew(USB_PHY_MODE | USB_PHY_INT_EN, priv->usbphyaddr);
613
614 return 0;
615}
616
617static void usbhs1_hardware_exit(struct platform_device *pdev)
618{
619 struct usbhs_private *priv = usbhs_get_priv(pdev);
620
621 /* clear interrupt status */
622 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
623
624 free_irq(priv->irq, pdev);
625}
626
627static void usbhs1_phy_reset(struct platform_device *pdev)
628{
629 struct usbhs_private *priv = usbhs_get_priv(pdev);
630
631 /* init phy */
632 __raw_writew(0x8a0a, priv->usbcrcaddr);
633}
634
635static u32 usbhs1_pipe_cfg[] = {
636 USB_ENDPOINT_XFER_CONTROL,
637 USB_ENDPOINT_XFER_ISOC,
638 USB_ENDPOINT_XFER_ISOC,
639 USB_ENDPOINT_XFER_BULK,
640 USB_ENDPOINT_XFER_BULK,
641 USB_ENDPOINT_XFER_BULK,
642 USB_ENDPOINT_XFER_INT,
643 USB_ENDPOINT_XFER_INT,
644 USB_ENDPOINT_XFER_INT,
645 USB_ENDPOINT_XFER_BULK,
646 USB_ENDPOINT_XFER_BULK,
647 USB_ENDPOINT_XFER_BULK,
648 USB_ENDPOINT_XFER_BULK,
649 USB_ENDPOINT_XFER_BULK,
650 USB_ENDPOINT_XFER_BULK,
651 USB_ENDPOINT_XFER_BULK,
652};
653
654static struct usbhs_private usbhs1_private = {
655 .irq = evt2irq(0x0300), /* IRQ8 */
656 .usbphyaddr = 0xE60581E2, /* USBPHY1INTAP */
657 .usbcrcaddr = 0xE6058130, /* USBCR4 */
658 .info = {
659 .platform_callback = {
660 .hardware_init = usbhs1_hardware_init,
661 .hardware_exit = usbhs1_hardware_exit,
662 .phy_reset = usbhs1_phy_reset,
663 .get_id = usbhs1_get_id,
664 .get_vbus = usbhs1_get_vbus,
665 },
666 .driver_param = {
667 .buswait_bwait = 4,
668 .pipe_type = usbhs1_pipe_cfg,
669 .pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
670 },
671 },
672};
673
674static struct resource usbhs1_resources[] = {
675 [0] = {
676 .name = "USBHS",
677 .start = 0xE68B0000,
678 .end = 0xE68B00E6 - 1,
679 .flags = IORESOURCE_MEM,
680 },
681 [1] = {
682 .start = evt2irq(0x1ce0) /* USB1_USB1I0 */,
683 .flags = IORESOURCE_IRQ,
684 },
685};
686
687static struct platform_device usbhs1_device = {
688 .name = "renesas_usbhs",
689 .id = 1,
690 .dev = {
691 .platform_data = &usbhs1_private.info,
692 },
693 .num_resources = ARRAY_SIZE(usbhs1_resources),
694 .resource = usbhs1_resources,
695};
696
697
518/* LED */ 698/* LED */
519static struct gpio_led mackerel_leds[] = { 699static struct gpio_led mackerel_leds[] = {
520 { 700 {
@@ -690,7 +870,15 @@ static struct resource sdhi0_resources[] = {
690 .flags = IORESOURCE_MEM, 870 .flags = IORESOURCE_MEM,
691 }, 871 },
692 [1] = { 872 [1] = {
693 .start = evt2irq(0x0e00) /* SDHI0 */, 873 .start = evt2irq(0x0e00) /* SDHI0_SDHI0I0 */,
874 .flags = IORESOURCE_IRQ,
875 },
876 [2] = {
877 .start = evt2irq(0x0e20) /* SDHI0_SDHI0I1 */,
878 .flags = IORESOURCE_IRQ,
879 },
880 [3] = {
881 .start = evt2irq(0x0e40) /* SDHI0_SDHI0I2 */,
694 .flags = IORESOURCE_IRQ, 882 .flags = IORESOURCE_IRQ,
695 }, 883 },
696}; 884};
@@ -705,7 +893,7 @@ static struct platform_device sdhi0_device = {
705 }, 893 },
706}; 894};
707 895
708#if !defined(CONFIG_MMC_SH_MMCIF) 896#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
709/* SDHI1 */ 897/* SDHI1 */
710static struct sh_mobile_sdhi_info sdhi1_info = { 898static struct sh_mobile_sdhi_info sdhi1_info = {
711 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, 899 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
@@ -725,7 +913,15 @@ static struct resource sdhi1_resources[] = {
725 .flags = IORESOURCE_MEM, 913 .flags = IORESOURCE_MEM,
726 }, 914 },
727 [1] = { 915 [1] = {
728 .start = evt2irq(0x0e80), 916 .start = evt2irq(0x0e80), /* SDHI1_SDHI1I0 */
917 .flags = IORESOURCE_IRQ,
918 },
919 [2] = {
920 .start = evt2irq(0x0ea0), /* SDHI1_SDHI1I1 */
921 .flags = IORESOURCE_IRQ,
922 },
923 [3] = {
924 .start = evt2irq(0x0ec0), /* SDHI1_SDHI1I2 */
729 .flags = IORESOURCE_IRQ, 925 .flags = IORESOURCE_IRQ,
730 }, 926 },
731}; 927};
@@ -768,7 +964,15 @@ static struct resource sdhi2_resources[] = {
768 .flags = IORESOURCE_MEM, 964 .flags = IORESOURCE_MEM,
769 }, 965 },
770 [1] = { 966 [1] = {
771 .start = evt2irq(0x1200), 967 .start = evt2irq(0x1200), /* SDHI2_SDHI2I0 */
968 .flags = IORESOURCE_IRQ,
969 },
970 [2] = {
971 .start = evt2irq(0x1220), /* SDHI2_SDHI2I1 */
972 .flags = IORESOURCE_IRQ,
973 },
974 [3] = {
975 .start = evt2irq(0x1240), /* SDHI2_SDHI2I2 */
772 .flags = IORESOURCE_IRQ, 976 .flags = IORESOURCE_IRQ,
773 }, 977 },
774}; 978};
@@ -803,6 +1007,15 @@ static struct resource sh_mmcif_resources[] = {
803 }, 1007 },
804}; 1008};
805 1009
1010static struct sh_mmcif_dma sh_mmcif_dma = {
1011 .chan_priv_rx = {
1012 .slave_id = SHDMA_SLAVE_MMCIF_RX,
1013 },
1014 .chan_priv_tx = {
1015 .slave_id = SHDMA_SLAVE_MMCIF_TX,
1016 },
1017};
1018
806static struct sh_mmcif_plat_data sh_mmcif_plat = { 1019static struct sh_mmcif_plat_data sh_mmcif_plat = {
807 .sup_pclk = 0, 1020 .sup_pclk = 0,
808 .ocr = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34, 1021 .ocr = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
@@ -810,6 +1023,7 @@ static struct sh_mmcif_plat_data sh_mmcif_plat = {
810 MMC_CAP_8_BIT_DATA | 1023 MMC_CAP_8_BIT_DATA |
811 MMC_CAP_NEEDS_POLL, 1024 MMC_CAP_NEEDS_POLL,
812 .get_cd = slot_cn7_get_cd, 1025 .get_cd = slot_cn7_get_cd,
1026 .dma = &sh_mmcif_dma,
813}; 1027};
814 1028
815static struct platform_device sh_mmcif_device = { 1029static struct platform_device sh_mmcif_device = {
@@ -858,37 +1072,23 @@ static struct soc_camera_link camera_link = {
858 .priv = &camera_info, 1072 .priv = &camera_info,
859}; 1073};
860 1074
861static void dummy_release(struct device *dev) 1075static struct platform_device *camera_device;
1076
1077static void mackerel_camera_release(struct device *dev)
862{ 1078{
1079 soc_camera_platform_release(&camera_device);
863} 1080}
864 1081
865static struct platform_device camera_device = {
866 .name = "soc_camera_platform",
867 .dev = {
868 .platform_data = &camera_info,
869 .release = dummy_release,
870 },
871};
872
873static int mackerel_camera_add(struct soc_camera_link *icl, 1082static int mackerel_camera_add(struct soc_camera_link *icl,
874 struct device *dev) 1083 struct device *dev)
875{ 1084{
876 if (icl != &camera_link) 1085 return soc_camera_platform_add(icl, dev, &camera_device, &camera_link,
877 return -ENODEV; 1086 mackerel_camera_release, 0);
878
879 camera_info.dev = dev;
880
881 return platform_device_register(&camera_device);
882} 1087}
883 1088
884static void mackerel_camera_del(struct soc_camera_link *icl) 1089static void mackerel_camera_del(struct soc_camera_link *icl)
885{ 1090{
886 if (icl != &camera_link) 1091 soc_camera_platform_del(icl, camera_device, &camera_link);
887 return;
888
889 platform_device_unregister(&camera_device);
890 memset(&camera_device.dev.kobj, 0,
891 sizeof(camera_device.dev.kobj));
892} 1092}
893 1093
894static struct sh_mobile_ceu_info sh_mobile_ceu_info = { 1094static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
@@ -935,12 +1135,13 @@ static struct platform_device *mackerel_devices[] __initdata = {
935 &smc911x_device, 1135 &smc911x_device,
936 &lcdc_device, 1136 &lcdc_device,
937 &usb1_host_device, 1137 &usb1_host_device,
1138 &usbhs1_device,
938 &leds_device, 1139 &leds_device,
939 &fsi_device, 1140 &fsi_device,
940 &fsi_ak4643_device, 1141 &fsi_ak4643_device,
941 &fsi_hdmi_device, 1142 &fsi_hdmi_device,
942 &sdhi0_device, 1143 &sdhi0_device,
943#if !defined(CONFIG_MMC_SH_MMCIF) 1144#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
944 &sdhi1_device, 1145 &sdhi1_device,
945#endif 1146#endif
946 &sdhi2_device, 1147 &sdhi2_device,
@@ -1030,6 +1231,7 @@ static void __init mackerel_map_io(void)
1030 1231
1031#define GPIO_PORT9CR 0xE6051009 1232#define GPIO_PORT9CR 0xE6051009
1032#define GPIO_PORT10CR 0xE605100A 1233#define GPIO_PORT10CR 0xE605100A
1234#define GPIO_PORT168CR 0xE60520A8
1033#define SRCR4 0xe61580bc 1235#define SRCR4 0xe61580bc
1034#define USCCR1 0xE6058144 1236#define USCCR1 0xE6058144
1035static void __init mackerel_init(void) 1237static void __init mackerel_init(void)
@@ -1088,6 +1290,7 @@ static void __init mackerel_init(void)
1088 gpio_request(GPIO_FN_OVCN_1_114, NULL); 1290 gpio_request(GPIO_FN_OVCN_1_114, NULL);
1089 gpio_request(GPIO_FN_EXTLP_1, NULL); 1291 gpio_request(GPIO_FN_EXTLP_1, NULL);
1090 gpio_request(GPIO_FN_OVCN2_1, NULL); 1292 gpio_request(GPIO_FN_OVCN2_1, NULL);
1293 gpio_pull_down(GPIO_PORT168CR);
1091 1294
1092 /* setup USB phy */ 1295 /* setup USB phy */
1093 __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */ 1296 __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */
@@ -1140,7 +1343,7 @@ static void __init mackerel_init(void)
1140 gpio_request(GPIO_FN_SDHID0_1, NULL); 1343 gpio_request(GPIO_FN_SDHID0_1, NULL);
1141 gpio_request(GPIO_FN_SDHID0_0, NULL); 1344 gpio_request(GPIO_FN_SDHID0_0, NULL);
1142 1345
1143#if !defined(CONFIG_MMC_SH_MMCIF) 1346#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1144 /* enable SDHI1 */ 1347 /* enable SDHI1 */
1145 gpio_request(GPIO_FN_SDHICMD1, NULL); 1348 gpio_request(GPIO_FN_SDHICMD1, NULL);
1146 gpio_request(GPIO_FN_SDHICLK1, NULL); 1349 gpio_request(GPIO_FN_SDHICLK1, NULL);
@@ -1216,6 +1419,7 @@ static void __init mackerel_init(void)
1216 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); 1419 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
1217 1420
1218 hdmi_init_pm_clock(); 1421 hdmi_init_pm_clock();
1422 sh7372_pm_init();
1219} 1423}
1220 1424
1221static void __init mackerel_timer_init(void) 1425static void __init mackerel_timer_init(void)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index e9731b5a73ed..d17eb66f4ac2 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -44,6 +44,11 @@
44#define DSI1PCKCR 0xe6150098 44#define DSI1PCKCR 0xe6150098
45#define PLLC01CR 0xe6150028 45#define PLLC01CR 0xe6150028
46#define PLLC2CR 0xe615002c 46#define PLLC2CR 0xe615002c
47#define RMSTPCR0 0xe6150110
48#define RMSTPCR1 0xe6150114
49#define RMSTPCR2 0xe6150118
50#define RMSTPCR3 0xe615011c
51#define RMSTPCR4 0xe6150120
47#define SMSTPCR0 0xe6150130 52#define SMSTPCR0 0xe6150130
48#define SMSTPCR1 0xe6150134 53#define SMSTPCR1 0xe6150134
49#define SMSTPCR2 0xe6150138 54#define SMSTPCR2 0xe6150138
@@ -421,9 +426,6 @@ static unsigned long fsidiv_recalc(struct clk *clk)
421 426
422 value = __raw_readl(clk->mapping->base); 427 value = __raw_readl(clk->mapping->base);
423 428
424 if ((value & 0x3) != 0x3)
425 return 0;
426
427 value >>= 16; 429 value >>= 16;
428 if (value < 2) 430 if (value < 2)
429 return 0; 431 return 0;
@@ -504,7 +506,7 @@ static struct clk *late_main_clks[] = {
504enum { MSTP001, 506enum { MSTP001,
505 MSTP131, MSTP130, 507 MSTP131, MSTP130,
506 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, 508 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
507 MSTP118, MSTP117, MSTP116, 509 MSTP118, MSTP117, MSTP116, MSTP113,
508 MSTP106, MSTP101, MSTP100, 510 MSTP106, MSTP101, MSTP100,
509 MSTP223, 511 MSTP223,
510 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 512 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
@@ -527,6 +529,7 @@ static struct clk mstp_clks[MSTP_NR] = {
527 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ 529 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
528 [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ 530 [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
529 [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ 531 [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
532 [MSTP113] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 13, 0), /* MERAM */
530 [MSTP106] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 6, 0), /* JPU */ 533 [MSTP106] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 6, 0), /* JPU */
531 [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */ 534 [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */
532 [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ 535 [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
@@ -617,6 +620,7 @@ static struct clk_lookup lookups[] = {
617 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX0 */ 620 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX0 */
618 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ 621 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
619 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ 622 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
623 CLKDEV_DEV_ID("sh_mobile_meram.0", &mstp_clks[MSTP113]), /* MERAM */
620 CLKDEV_DEV_ID("uio_pdrv_genirq.5", &mstp_clks[MSTP106]), /* JPU */ 624 CLKDEV_DEV_ID("uio_pdrv_genirq.5", &mstp_clks[MSTP106]), /* JPU */
621 CLKDEV_DEV_ID("uio_pdrv_genirq.0", &mstp_clks[MSTP101]), /* VPU */ 625 CLKDEV_DEV_ID("uio_pdrv_genirq.0", &mstp_clks[MSTP101]), /* VPU */
622 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 626 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
@@ -634,6 +638,7 @@ static struct clk_lookup lookups[] = {
634 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */ 638 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
635 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ 639 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
636 CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */ 640 CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
641 CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */
637 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ 642 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
638 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ 643 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
639 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ 644 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
@@ -644,6 +649,7 @@ static struct clk_lookup lookups[] = {
644 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */ 649 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */
645 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */ 650 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
646 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */ 651 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
652 CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */
647 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ 653 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
648 654
649 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), 655 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
@@ -655,6 +661,13 @@ void __init sh7372_clock_init(void)
655{ 661{
656 int k, ret = 0; 662 int k, ret = 0;
657 663
664 /* make sure MSTP bits on the RT/SH4AL-DSP side are off */
665 __raw_writel(0xe4ef8087, RMSTPCR0);
666 __raw_writel(0xffffffff, RMSTPCR1);
667 __raw_writel(0x37c7f7ff, RMSTPCR2);
668 __raw_writel(0xffffffff, RMSTPCR3);
669 __raw_writel(0xffe0fffd, RMSTPCR4);
670
658 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) 671 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
659 ret = clk_register(main_clks[k]); 672 ret = clk_register(main_clks[k]);
660 673
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 7e58904c1c8c..bcacb1e8cf85 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -266,7 +266,8 @@ enum { MSTP001,
266 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, 266 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
267 MSTP219, 267 MSTP219,
268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, 269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
270 MSTP314, MSTP313, MSTP312, MSTP311,
270 MSTP411, MSTP410, MSTP403, 271 MSTP411, MSTP410, MSTP403,
271 MSTP_NR }; 272 MSTP_NR };
272 273
@@ -295,7 +296,11 @@ static struct clk mstp_clks[MSTP_NR] = {
295 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ 296 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
296 [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */ 297 [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
297 [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */ 298 [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
299 [MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
300 [MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
301 [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
298 [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */ 302 [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
303 [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
299 [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */ 304 [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
300 [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */ 305 [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
301 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ 306 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -313,6 +318,9 @@ static struct clk_lookup lookups[] = {
313 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), 318 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
314 CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), 319 CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
315 CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), 320 CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
321 CLKDEV_CON_ID("sdhi0_clk", &div6_clks[DIV6_SDHI0]),
322 CLKDEV_CON_ID("sdhi1_clk", &div6_clks[DIV6_SDHI1]),
323 CLKDEV_CON_ID("sdhi2_clk", &div6_clks[DIV6_SDHI2]),
316 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), 324 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
317 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), 325 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
318 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), 326 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
@@ -341,7 +349,11 @@ static struct clk_lookup lookups[] = {
341 CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */ 349 CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
342 CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */ 350 CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
343 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */ 351 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
352 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
353 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
354 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
344 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */ 355 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
356 CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
345 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */ 357 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
346 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */ 358 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
347 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ 359 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
@@ -351,6 +363,11 @@ void __init sh73a0_clock_init(void)
351{ 363{
352 int k, ret = 0; 364 int k, ret = 0;
353 365
366 /* Set SDHI clocks to a known state */
367 __raw_writel(0x108, SD0CKCR);
368 __raw_writel(0x108, SD1CKCR);
369 __raw_writel(0x108, SD2CKCR);
370
354 /* detect main clock parent */ 371 /* detect main clock parent */
355 switch ((__raw_readl(CKSCR) >> 24) & 0x03) { 372 switch ((__raw_readl(CKSCR) >> 24) & 0x03) {
356 case 0: 373 case 0:
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
new file mode 100644
index 000000000000..2e44f11f592e
--- /dev/null
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -0,0 +1,92 @@
1/*
2 * CPUIdle support code for SH-Mobile ARM
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pm.h>
12#include <linux/cpuidle.h>
13#include <linux/suspend.h>
14#include <linux/module.h>
15#include <linux/err.h>
16#include <asm/system.h>
17#include <asm/io.h>
18
19static void shmobile_enter_wfi(void)
20{
21 cpu_do_idle();
22}
23
24void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = {
25 shmobile_enter_wfi, /* regular sleep mode */
26};
27
28static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
29 struct cpuidle_state *state)
30{
31 ktime_t before, after;
32 int requested_state = state - &dev->states[0];
33
34 dev->last_state = &dev->states[requested_state];
35 before = ktime_get();
36
37 local_irq_disable();
38 local_fiq_disable();
39
40 shmobile_cpuidle_modes[requested_state]();
41
42 local_irq_enable();
43 local_fiq_enable();
44
45 after = ktime_get();
46 return ktime_to_ns(ktime_sub(after, before)) >> 10;
47}
48
49static struct cpuidle_device shmobile_cpuidle_dev;
50static struct cpuidle_driver shmobile_cpuidle_driver = {
51 .name = "shmobile_cpuidle",
52 .owner = THIS_MODULE,
53};
54
55void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev);
56
57static int shmobile_cpuidle_init(void)
58{
59 struct cpuidle_device *dev = &shmobile_cpuidle_dev;
60 struct cpuidle_state *state;
61 int i;
62
63 cpuidle_register_driver(&shmobile_cpuidle_driver);
64
65 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
66 dev->states[i].name[0] = '\0';
67 dev->states[i].desc[0] = '\0';
68 dev->states[i].enter = shmobile_cpuidle_enter;
69 }
70
71 i = CPUIDLE_DRIVER_STATE_START;
72
73 state = &dev->states[i++];
74 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
75 strncpy(state->desc, "WFI", CPUIDLE_DESC_LEN);
76 state->exit_latency = 1;
77 state->target_residency = 1 * 2;
78 state->power_usage = 3;
79 state->flags = 0;
80 state->flags |= CPUIDLE_FLAG_TIME_VALID;
81
82 dev->safe_state = state;
83 dev->state_count = i;
84
85 if (shmobile_cpuidle_setup)
86 shmobile_cpuidle_setup(dev);
87
88 cpuidle_register_device(dev);
89
90 return 0;
91}
92late_initcall(shmobile_cpuidle_init);
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index d4cec6b4c7d9..26079d933d91 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -24,4 +24,4 @@
24 .align 12 24 .align 12
25ENTRY(shmobile_secondary_vector) 25ENTRY(shmobile_secondary_vector)
26 ldr pc, 1f 26 ldr pc, 1f
271: .long secondary_startup - PAGE_OFFSET + PHYS_OFFSET 271: .long secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 013ac0ee8256..06aecb31d9c7 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -8,6 +8,10 @@ struct clk;
8extern int clk_init(void); 8extern int clk_init(void);
9extern void shmobile_handle_irq_intc(struct pt_regs *); 9extern void shmobile_handle_irq_intc(struct pt_regs *);
10extern void shmobile_handle_irq_gic(struct pt_regs *); 10extern void shmobile_handle_irq_gic(struct pt_regs *);
11extern struct platform_suspend_ops shmobile_suspend_ops;
12struct cpuidle_device;
13extern void (*shmobile_cpuidle_modes[])(void);
14extern void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev);
11 15
12extern void sh7367_init_irq(void); 16extern void sh7367_init_irq(void);
13extern void sh7367_add_early_devices(void); 17extern void sh7367_add_early_devices(void);
@@ -30,6 +34,9 @@ extern void sh7372_add_early_devices(void);
30extern void sh7372_add_standard_devices(void); 34extern void sh7372_add_standard_devices(void);
31extern void sh7372_clock_init(void); 35extern void sh7372_clock_init(void);
32extern void sh7372_pinmux_init(void); 36extern void sh7372_pinmux_init(void);
37extern void sh7372_pm_init(void);
38extern void sh7372_cpu_suspend(void);
39extern void sh7372_cpu_resume(void);
33extern struct clk sh7372_extal1_clk; 40extern struct clk sh7372_extal1_clk;
34extern struct clk sh7372_extal2_clk; 41extern struct clk sh7372_extal2_clk;
35 42
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
index 3029aba38688..9f134dfeffdc 100644
--- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
@@ -87,8 +87,7 @@ WAIT 1, 0xFE40009C
87ED 0xFE400354, 0x01AD8002 87ED 0xFE400354, 0x01AD8002
88 88
89LIST "SCIF0 - Serial port for earlyprintk" 89LIST "SCIF0 - Serial port for earlyprintk"
90EB 0xE6053098, 0x11
91EB 0xE6053098, 0xe1 90EB 0xE6053098, 0xe1
92EW 0xE6C40000, 0x0000 91EW 0xE6C40000, 0x0000
93EB 0xE6C40004, 0x19 92EB 0xE6C40004, 0x19
94EW 0xE6C40008, 0x3000 93EW 0xE6C40008, 0x0030
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
index 3029aba38688..9f134dfeffdc 100644
--- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
@@ -87,8 +87,7 @@ WAIT 1, 0xFE40009C
87ED 0xFE400354, 0x01AD8002 87ED 0xFE400354, 0x01AD8002
88 88
89LIST "SCIF0 - Serial port for earlyprintk" 89LIST "SCIF0 - Serial port for earlyprintk"
90EB 0xE6053098, 0x11
91EB 0xE6053098, 0xe1 90EB 0xE6053098, 0xe1
92EW 0xE6C40000, 0x0000 91EW 0xE6C40000, 0x0000
93EB 0xE6C40004, 0x19 92EB 0xE6C40004, 0x19
94EW 0xE6C40008, 0x3000 93EW 0xE6C40008, 0x0030
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 5736efcca60c..df20d7670172 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -435,6 +435,7 @@ enum {
435 435
436/* DMA slave IDs */ 436/* DMA slave IDs */
437enum { 437enum {
438 SHDMA_SLAVE_INVALID,
438 SHDMA_SLAVE_SCIF0_TX, 439 SHDMA_SLAVE_SCIF0_TX,
439 SHDMA_SLAVE_SCIF0_RX, 440 SHDMA_SLAVE_SCIF0_RX,
440 SHDMA_SLAVE_SCIF1_TX, 441 SHDMA_SLAVE_SCIF1_TX,
diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h
index ceb2cdc92bf9..216c3d695ef1 100644
--- a/arch/arm/mach-shmobile/include/mach/sh73a0.h
+++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h
@@ -463,5 +463,35 @@ enum {
463 GPIO_FN_FSIAIBT_PU, 463 GPIO_FN_FSIAIBT_PU,
464 GPIO_FN_FSIAISLD_PU, 464 GPIO_FN_FSIAISLD_PU,
465}; 465};
466/* DMA slave IDs */
467enum {
468 SHDMA_SLAVE_INVALID,
469 SHDMA_SLAVE_SCIF0_TX,
470 SHDMA_SLAVE_SCIF0_RX,
471 SHDMA_SLAVE_SCIF1_TX,
472 SHDMA_SLAVE_SCIF1_RX,
473 SHDMA_SLAVE_SCIF2_TX,
474 SHDMA_SLAVE_SCIF2_RX,
475 SHDMA_SLAVE_SCIF3_TX,
476 SHDMA_SLAVE_SCIF3_RX,
477 SHDMA_SLAVE_SCIF4_TX,
478 SHDMA_SLAVE_SCIF4_RX,
479 SHDMA_SLAVE_SCIF5_TX,
480 SHDMA_SLAVE_SCIF5_RX,
481 SHDMA_SLAVE_SCIF6_TX,
482 SHDMA_SLAVE_SCIF6_RX,
483 SHDMA_SLAVE_SCIF7_TX,
484 SHDMA_SLAVE_SCIF7_RX,
485 SHDMA_SLAVE_SCIF8_TX,
486 SHDMA_SLAVE_SCIF8_RX,
487 SHDMA_SLAVE_SDHI0_TX,
488 SHDMA_SLAVE_SDHI0_RX,
489 SHDMA_SLAVE_SDHI1_TX,
490 SHDMA_SLAVE_SDHI1_RX,
491 SHDMA_SLAVE_SDHI2_TX,
492 SHDMA_SLAVE_SDHI2_RX,
493 SHDMA_SLAVE_MMCIF_TX,
494 SHDMA_SLAVE_MMCIF_RX,
495};
466 496
467#endif /* __ASM_SH73A0_H__ */ 497#endif /* __ASM_SH73A0_H__ */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 7a4960f9c1e3..3b28743c77eb 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -27,8 +27,6 @@
27 27
28enum { 28enum {
29 UNUSED_INTCA = 0, 29 UNUSED_INTCA = 0,
30 ENABLED,
31 DISABLED,
32 30
33 /* interrupt sources INTCA */ 31 /* interrupt sources INTCA */
34 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, 32 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
@@ -49,14 +47,14 @@ enum {
49 MSIOF2, MSIOF1, 47 MSIOF2, MSIOF1,
50 SCIFA4, SCIFA5, SCIFB, 48 SCIFA4, SCIFA5, SCIFB,
51 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, 49 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
52 SDHI0, 50 SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3,
53 SDHI1, 51 SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2,
54 IRREM, 52 IRREM,
55 IRDA, 53 IRDA,
56 TPU0, 54 TPU0,
57 TTI20, 55 TTI20,
58 DDM, 56 DDM,
59 SDHI2, 57 SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3,
60 RWDT0, 58 RWDT0,
61 DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3, 59 DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3,
62 DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR, 60 DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR,
@@ -84,7 +82,7 @@ enum {
84 82
85 /* interrupt groups INTCA */ 83 /* interrupt groups INTCA */
86 DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, 84 DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT,
87 AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1 85 AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1, SDHI0, SDHI1, SDHI2
88}; 86};
89 87
90static struct intc_vect intca_vectors[] __initdata = { 88static struct intc_vect intca_vectors[] __initdata = {
@@ -125,17 +123,17 @@ static struct intc_vect intca_vectors[] __initdata = {
125 INTC_VECT(SCIFB, 0x0d60), 123 INTC_VECT(SCIFB, 0x0d60),
126 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), 124 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
127 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), 125 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
128 INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), 126 INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20),
129 INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), 127 INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60),
130 INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), 128 INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0),
131 INTC_VECT(SDHI1, 0x0ec0), 129 INTC_VECT(SDHI1_SDHI1I2, 0x0ec0),
132 INTC_VECT(IRREM, 0x0f60), 130 INTC_VECT(IRREM, 0x0f60),
133 INTC_VECT(IRDA, 0x0480), 131 INTC_VECT(IRDA, 0x0480),
134 INTC_VECT(TPU0, 0x04a0), 132 INTC_VECT(TPU0, 0x04a0),
135 INTC_VECT(TTI20, 0x1100), 133 INTC_VECT(TTI20, 0x1100),
136 INTC_VECT(DDM, 0x1140), 134 INTC_VECT(DDM, 0x1140),
137 INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220), 135 INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220),
138 INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260), 136 INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260),
139 INTC_VECT(RWDT0, 0x1280), 137 INTC_VECT(RWDT0, 0x1280),
140 INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020), 138 INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020),
141 INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060), 139 INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060),
@@ -195,6 +193,12 @@ static struct intc_group intca_groups[] __initdata = {
195 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, 193 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
196 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), 194 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
197 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), 195 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
196 INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
197 SDHI0_SDHI0I2, SDHI0_SDHI0I3),
198 INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
199 SDHI1_SDHI1I2),
200 INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1,
201 SDHI2_SDHI2I2, SDHI2_SDHI2I3),
198 INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), 202 INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM),
199}; 203};
200 204
@@ -230,10 +234,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
230 { SCIFB, SCIFA5, SCIFA4, MSIOF1, 234 { SCIFB, SCIFA5, SCIFA4, MSIOF1,
231 0, 0, MSIOF2, 0 } }, 235 0, 0, MSIOF2, 0 } },
232 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ 236 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
233 { DISABLED, ENABLED, ENABLED, ENABLED, 237 { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0,
234 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, 238 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
235 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ 239 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
236 { 0, ENABLED, ENABLED, ENABLED, 240 { 0, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0,
237 TTI20, USBHSDMAC0_USHDMI, 0, 0 } }, 241 TTI20, USBHSDMAC0_USHDMI, 0, 0 } },
238 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ 242 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
239 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, 243 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -248,7 +252,7 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
248 { 0, 0, TPU0, 0, 252 { 0, 0, TPU0, 0,
249 0, 0, 0, 0 } }, 253 0, 0, 0, 0 } },
250 { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ 254 { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
251 { DISABLED, DISABLED, ENABLED, ENABLED, 255 { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0,
252 0, CMT3, 0, RWDT0 } }, 256 0, CMT3, 0, RWDT0 } },
253 { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */ 257 { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */
254 { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0, 258 { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0,
@@ -354,14 +358,10 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = {
354 { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, 358 { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } },
355}; 359};
356 360
357static struct intc_desc intca_desc __initdata = { 361static DECLARE_INTC_DESC_ACK(intca_desc, "sh7372-intca",
358 .name = "sh7372-intca", 362 intca_vectors, intca_groups,
359 .force_enable = ENABLED, 363 intca_mask_registers, intca_prio_registers,
360 .force_disable = DISABLED, 364 intca_sense_registers, intca_ack_registers);
361 .hw = INTC_HW_DESC(intca_vectors, intca_groups,
362 intca_mask_registers, intca_prio_registers,
363 intca_sense_registers, intca_ack_registers),
364};
365 365
366enum { 366enum {
367 UNUSED_INTCS = 0, 367 UNUSED_INTCS = 0,
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
new file mode 100644
index 000000000000..8e4aadf14c9f
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -0,0 +1,108 @@
1/*
2 * sh7372 Power management support
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pm.h>
12#include <linux/suspend.h>
13#include <linux/cpuidle.h>
14#include <linux/module.h>
15#include <linux/list.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <asm/system.h>
19#include <asm/io.h>
20#include <asm/tlbflush.h>
21#include <mach/common.h>
22
23#define SMFRAM 0xe6a70000
24#define SYSTBCR 0xe6150024
25#define SBAR 0xe6180020
26#define APARMBAREA 0xe6f10020
27
28static void sh7372_enter_core_standby(void)
29{
30 void __iomem *smfram = (void __iomem *)SMFRAM;
31
32 __raw_writel(0, APARMBAREA); /* translate 4k */
33 __raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
34 __raw_writel(0x10, SYSTBCR); /* enable core standby */
35
36 __raw_writel(0, smfram + 0x3c); /* clear page table address */
37
38 sh7372_cpu_suspend();
39 cpu_init();
40
41 /* if page table address is non-NULL then we have been powered down */
42 if (__raw_readl(smfram + 0x3c)) {
43 __raw_writel(__raw_readl(smfram + 0x40),
44 __va(__raw_readl(smfram + 0x3c)));
45
46 flush_tlb_all();
47 set_cr(__raw_readl(smfram + 0x38));
48 }
49
50 __raw_writel(0, SYSTBCR); /* disable core standby */
51 __raw_writel(0, SBAR); /* disable reset vector translation */
52}
53
54#ifdef CONFIG_CPU_IDLE
55static void sh7372_cpuidle_setup(struct cpuidle_device *dev)
56{
57 struct cpuidle_state *state;
58 int i = dev->state_count;
59
60 state = &dev->states[i];
61 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
62 strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
63 state->exit_latency = 10;
64 state->target_residency = 20 + 10;
65 state->power_usage = 1; /* perhaps not */
66 state->flags = 0;
67 state->flags |= CPUIDLE_FLAG_TIME_VALID;
68 shmobile_cpuidle_modes[i] = sh7372_enter_core_standby;
69
70 dev->state_count = i + 1;
71}
72
73static void sh7372_cpuidle_init(void)
74{
75 shmobile_cpuidle_setup = sh7372_cpuidle_setup;
76}
77#else
78static void sh7372_cpuidle_init(void) {}
79#endif
80
81#ifdef CONFIG_SUSPEND
82static int sh7372_enter_suspend(suspend_state_t suspend_state)
83{
84 sh7372_enter_core_standby();
85 return 0;
86}
87
88static void sh7372_suspend_init(void)
89{
90 shmobile_suspend_ops.enter = sh7372_enter_suspend;
91}
92#else
93static void sh7372_suspend_init(void) {}
94#endif
95
96#define DBGREG1 0xe6100020
97#define DBGREG9 0xe6100040
98
99void __init sh7372_pm_init(void)
100{
101 /* enable DBG hardware block to kick SYSC */
102 __raw_writel(0x0000a500, DBGREG9);
103 __raw_writel(0x0000a501, DBGREG9);
104 __raw_writel(0x00000000, DBGREG1);
105
106 sh7372_suspend_init();
107 sh7372_cpuidle_init();
108}
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index ce28141662da..2c10190dbb55 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/uio_driver.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/input.h> 27#include <linux/input.h>
27#include <linux/io.h> 28#include <linux/io.h>
@@ -195,6 +196,214 @@ static struct platform_device cmt10_device = {
195 .num_resources = ARRAY_SIZE(cmt10_resources), 196 .num_resources = ARRAY_SIZE(cmt10_resources),
196}; 197};
197 198
199/* VPU */
200static struct uio_info vpu_platform_data = {
201 .name = "VPU5",
202 .version = "0",
203 .irq = intcs_evt2irq(0x980),
204};
205
206static struct resource vpu_resources[] = {
207 [0] = {
208 .name = "VPU",
209 .start = 0xfe900000,
210 .end = 0xfe902807,
211 .flags = IORESOURCE_MEM,
212 },
213};
214
215static struct platform_device vpu_device = {
216 .name = "uio_pdrv_genirq",
217 .id = 0,
218 .dev = {
219 .platform_data = &vpu_platform_data,
220 },
221 .resource = vpu_resources,
222 .num_resources = ARRAY_SIZE(vpu_resources),
223};
224
225/* VEU0 */
226static struct uio_info veu0_platform_data = {
227 .name = "VEU0",
228 .version = "0",
229 .irq = intcs_evt2irq(0x700),
230};
231
232static struct resource veu0_resources[] = {
233 [0] = {
234 .name = "VEU0",
235 .start = 0xfe920000,
236 .end = 0xfe9200b7,
237 .flags = IORESOURCE_MEM,
238 },
239};
240
241static struct platform_device veu0_device = {
242 .name = "uio_pdrv_genirq",
243 .id = 1,
244 .dev = {
245 .platform_data = &veu0_platform_data,
246 },
247 .resource = veu0_resources,
248 .num_resources = ARRAY_SIZE(veu0_resources),
249};
250
251/* VEU1 */
252static struct uio_info veu1_platform_data = {
253 .name = "VEU1",
254 .version = "0",
255 .irq = intcs_evt2irq(0x720),
256};
257
258static struct resource veu1_resources[] = {
259 [0] = {
260 .name = "VEU1",
261 .start = 0xfe924000,
262 .end = 0xfe9240b7,
263 .flags = IORESOURCE_MEM,
264 },
265};
266
267static struct platform_device veu1_device = {
268 .name = "uio_pdrv_genirq",
269 .id = 2,
270 .dev = {
271 .platform_data = &veu1_platform_data,
272 },
273 .resource = veu1_resources,
274 .num_resources = ARRAY_SIZE(veu1_resources),
275};
276
277/* VEU2 */
278static struct uio_info veu2_platform_data = {
279 .name = "VEU2",
280 .version = "0",
281 .irq = intcs_evt2irq(0x740),
282};
283
284static struct resource veu2_resources[] = {
285 [0] = {
286 .name = "VEU2",
287 .start = 0xfe928000,
288 .end = 0xfe9280b7,
289 .flags = IORESOURCE_MEM,
290 },
291};
292
293static struct platform_device veu2_device = {
294 .name = "uio_pdrv_genirq",
295 .id = 3,
296 .dev = {
297 .platform_data = &veu2_platform_data,
298 },
299 .resource = veu2_resources,
300 .num_resources = ARRAY_SIZE(veu2_resources),
301};
302
303/* VEU3 */
304static struct uio_info veu3_platform_data = {
305 .name = "VEU3",
306 .version = "0",
307 .irq = intcs_evt2irq(0x760),
308};
309
310static struct resource veu3_resources[] = {
311 [0] = {
312 .name = "VEU3",
313 .start = 0xfe92c000,
314 .end = 0xfe92c0b7,
315 .flags = IORESOURCE_MEM,
316 },
317};
318
319static struct platform_device veu3_device = {
320 .name = "uio_pdrv_genirq",
321 .id = 4,
322 .dev = {
323 .platform_data = &veu3_platform_data,
324 },
325 .resource = veu3_resources,
326 .num_resources = ARRAY_SIZE(veu3_resources),
327};
328
329/* VEU2H */
330static struct uio_info veu2h_platform_data = {
331 .name = "VEU2H",
332 .version = "0",
333 .irq = intcs_evt2irq(0x520),
334};
335
336static struct resource veu2h_resources[] = {
337 [0] = {
338 .name = "VEU2H",
339 .start = 0xfe93c000,
340 .end = 0xfe93c27b,
341 .flags = IORESOURCE_MEM,
342 },
343};
344
345static struct platform_device veu2h_device = {
346 .name = "uio_pdrv_genirq",
347 .id = 5,
348 .dev = {
349 .platform_data = &veu2h_platform_data,
350 },
351 .resource = veu2h_resources,
352 .num_resources = ARRAY_SIZE(veu2h_resources),
353};
354
355/* JPU */
356static struct uio_info jpu_platform_data = {
357 .name = "JPU",
358 .version = "0",
359 .irq = intcs_evt2irq(0x560),
360};
361
362static struct resource jpu_resources[] = {
363 [0] = {
364 .name = "JPU",
365 .start = 0xfe980000,
366 .end = 0xfe9902d3,
367 .flags = IORESOURCE_MEM,
368 },
369};
370
371static struct platform_device jpu_device = {
372 .name = "uio_pdrv_genirq",
373 .id = 6,
374 .dev = {
375 .platform_data = &jpu_platform_data,
376 },
377 .resource = jpu_resources,
378 .num_resources = ARRAY_SIZE(jpu_resources),
379};
380
381/* SPU1 */
382static struct uio_info spu1_platform_data = {
383 .name = "SPU1",
384 .version = "0",
385 .irq = evt2irq(0xfc0),
386};
387
388static struct resource spu1_resources[] = {
389 [0] = {
390 .name = "SPU1",
391 .start = 0xfe300000,
392 .end = 0xfe3fffff,
393 .flags = IORESOURCE_MEM,
394 },
395};
396
397static struct platform_device spu1_device = {
398 .name = "uio_pdrv_genirq",
399 .id = 7,
400 .dev = {
401 .platform_data = &spu1_platform_data,
402 },
403 .resource = spu1_resources,
404 .num_resources = ARRAY_SIZE(spu1_resources),
405};
406
198static struct platform_device *sh7367_early_devices[] __initdata = { 407static struct platform_device *sh7367_early_devices[] __initdata = {
199 &scif0_device, 408 &scif0_device,
200 &scif1_device, 409 &scif1_device,
@@ -206,10 +415,24 @@ static struct platform_device *sh7367_early_devices[] __initdata = {
206 &cmt10_device, 415 &cmt10_device,
207}; 416};
208 417
418static struct platform_device *sh7367_devices[] __initdata = {
419 &vpu_device,
420 &veu0_device,
421 &veu1_device,
422 &veu2_device,
423 &veu3_device,
424 &veu2h_device,
425 &jpu_device,
426 &spu1_device,
427};
428
209void __init sh7367_add_standard_devices(void) 429void __init sh7367_add_standard_devices(void)
210{ 430{
211 platform_add_devices(sh7367_early_devices, 431 platform_add_devices(sh7367_early_devices,
212 ARRAY_SIZE(sh7367_early_devices)); 432 ARRAY_SIZE(sh7367_early_devices));
433
434 platform_add_devices(sh7367_devices,
435 ARRAY_SIZE(sh7367_devices));
213} 436}
214 437
215#define SYMSTPCR2 0xe6158048 438#define SYMSTPCR2 0xe6158048
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index ff0494f3d00c..cd807eea69e2 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/uio_driver.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/input.h> 27#include <linux/input.h>
27#include <linux/io.h> 28#include <linux/io.h>
@@ -601,6 +602,214 @@ static struct platform_device dma2_device = {
601 }, 602 },
602}; 603};
603 604
605/* VPU */
606static struct uio_info vpu_platform_data = {
607 .name = "VPU5HG",
608 .version = "0",
609 .irq = intcs_evt2irq(0x980),
610};
611
612static struct resource vpu_resources[] = {
613 [0] = {
614 .name = "VPU",
615 .start = 0xfe900000,
616 .end = 0xfe900157,
617 .flags = IORESOURCE_MEM,
618 },
619};
620
621static struct platform_device vpu_device = {
622 .name = "uio_pdrv_genirq",
623 .id = 0,
624 .dev = {
625 .platform_data = &vpu_platform_data,
626 },
627 .resource = vpu_resources,
628 .num_resources = ARRAY_SIZE(vpu_resources),
629};
630
631/* VEU0 */
632static struct uio_info veu0_platform_data = {
633 .name = "VEU0",
634 .version = "0",
635 .irq = intcs_evt2irq(0x700),
636};
637
638static struct resource veu0_resources[] = {
639 [0] = {
640 .name = "VEU0",
641 .start = 0xfe920000,
642 .end = 0xfe9200cb,
643 .flags = IORESOURCE_MEM,
644 },
645};
646
647static struct platform_device veu0_device = {
648 .name = "uio_pdrv_genirq",
649 .id = 1,
650 .dev = {
651 .platform_data = &veu0_platform_data,
652 },
653 .resource = veu0_resources,
654 .num_resources = ARRAY_SIZE(veu0_resources),
655};
656
657/* VEU1 */
658static struct uio_info veu1_platform_data = {
659 .name = "VEU1",
660 .version = "0",
661 .irq = intcs_evt2irq(0x720),
662};
663
664static struct resource veu1_resources[] = {
665 [0] = {
666 .name = "VEU1",
667 .start = 0xfe924000,
668 .end = 0xfe9240cb,
669 .flags = IORESOURCE_MEM,
670 },
671};
672
673static struct platform_device veu1_device = {
674 .name = "uio_pdrv_genirq",
675 .id = 2,
676 .dev = {
677 .platform_data = &veu1_platform_data,
678 },
679 .resource = veu1_resources,
680 .num_resources = ARRAY_SIZE(veu1_resources),
681};
682
683/* VEU2 */
684static struct uio_info veu2_platform_data = {
685 .name = "VEU2",
686 .version = "0",
687 .irq = intcs_evt2irq(0x740),
688};
689
690static struct resource veu2_resources[] = {
691 [0] = {
692 .name = "VEU2",
693 .start = 0xfe928000,
694 .end = 0xfe928307,
695 .flags = IORESOURCE_MEM,
696 },
697};
698
699static struct platform_device veu2_device = {
700 .name = "uio_pdrv_genirq",
701 .id = 3,
702 .dev = {
703 .platform_data = &veu2_platform_data,
704 },
705 .resource = veu2_resources,
706 .num_resources = ARRAY_SIZE(veu2_resources),
707};
708
709/* VEU3 */
710static struct uio_info veu3_platform_data = {
711 .name = "VEU3",
712 .version = "0",
713 .irq = intcs_evt2irq(0x760),
714};
715
716static struct resource veu3_resources[] = {
717 [0] = {
718 .name = "VEU3",
719 .start = 0xfe92c000,
720 .end = 0xfe92c307,
721 .flags = IORESOURCE_MEM,
722 },
723};
724
725static struct platform_device veu3_device = {
726 .name = "uio_pdrv_genirq",
727 .id = 4,
728 .dev = {
729 .platform_data = &veu3_platform_data,
730 },
731 .resource = veu3_resources,
732 .num_resources = ARRAY_SIZE(veu3_resources),
733};
734
735/* JPU */
736static struct uio_info jpu_platform_data = {
737 .name = "JPU",
738 .version = "0",
739 .irq = intcs_evt2irq(0x560),
740};
741
742static struct resource jpu_resources[] = {
743 [0] = {
744 .name = "JPU",
745 .start = 0xfe980000,
746 .end = 0xfe9902d3,
747 .flags = IORESOURCE_MEM,
748 },
749};
750
751static struct platform_device jpu_device = {
752 .name = "uio_pdrv_genirq",
753 .id = 5,
754 .dev = {
755 .platform_data = &jpu_platform_data,
756 },
757 .resource = jpu_resources,
758 .num_resources = ARRAY_SIZE(jpu_resources),
759};
760
761/* SPU2DSP0 */
762static struct uio_info spu0_platform_data = {
763 .name = "SPU2DSP0",
764 .version = "0",
765 .irq = evt2irq(0x1800),
766};
767
768static struct resource spu0_resources[] = {
769 [0] = {
770 .name = "SPU2DSP0",
771 .start = 0xfe200000,
772 .end = 0xfe2fffff,
773 .flags = IORESOURCE_MEM,
774 },
775};
776
777static struct platform_device spu0_device = {
778 .name = "uio_pdrv_genirq",
779 .id = 6,
780 .dev = {
781 .platform_data = &spu0_platform_data,
782 },
783 .resource = spu0_resources,
784 .num_resources = ARRAY_SIZE(spu0_resources),
785};
786
787/* SPU2DSP1 */
788static struct uio_info spu1_platform_data = {
789 .name = "SPU2DSP1",
790 .version = "0",
791 .irq = evt2irq(0x1820),
792};
793
794static struct resource spu1_resources[] = {
795 [0] = {
796 .name = "SPU2DSP1",
797 .start = 0xfe300000,
798 .end = 0xfe3fffff,
799 .flags = IORESOURCE_MEM,
800 },
801};
802
803static struct platform_device spu1_device = {
804 .name = "uio_pdrv_genirq",
805 .id = 7,
806 .dev = {
807 .platform_data = &spu1_platform_data,
808 },
809 .resource = spu1_resources,
810 .num_resources = ARRAY_SIZE(spu1_resources),
811};
812
604static struct platform_device *sh7372_early_devices[] __initdata = { 813static struct platform_device *sh7372_early_devices[] __initdata = {
605 &scif0_device, 814 &scif0_device,
606 &scif1_device, 815 &scif1_device,
@@ -620,6 +829,14 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
620 &dma0_device, 829 &dma0_device,
621 &dma1_device, 830 &dma1_device,
622 &dma2_device, 831 &dma2_device,
832 &vpu_device,
833 &veu0_device,
834 &veu1_device,
835 &veu2_device,
836 &veu3_device,
837 &jpu_device,
838 &spu0_device,
839 &spu1_device,
623}; 840};
624 841
625void __init sh7372_add_standard_devices(void) 842void __init sh7372_add_standard_devices(void)
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index 8099b0b8a934..bb405b8e459b 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/uio_driver.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/input.h> 27#include <linux/input.h>
27#include <linux/io.h> 28#include <linux/io.h>
@@ -38,7 +39,7 @@ static struct plat_sci_port scif0_platform_data = {
38 .flags = UPF_BOOT_AUTOCONF, 39 .flags = UPF_BOOT_AUTOCONF,
39 .scscr = SCSCR_RE | SCSCR_TE, 40 .scscr = SCSCR_RE | SCSCR_TE,
40 .scbrr_algo_id = SCBRR_ALGO_4, 41 .scbrr_algo_id = SCBRR_ALGO_4,
41 .type = PORT_SCIF, 42 .type = PORT_SCIFA,
42 .irqs = { evt2irq(0xc00), evt2irq(0xc00), 43 .irqs = { evt2irq(0xc00), evt2irq(0xc00),
43 evt2irq(0xc00), evt2irq(0xc00) }, 44 evt2irq(0xc00), evt2irq(0xc00) },
44}; 45};
@@ -57,7 +58,7 @@ static struct plat_sci_port scif1_platform_data = {
57 .flags = UPF_BOOT_AUTOCONF, 58 .flags = UPF_BOOT_AUTOCONF,
58 .scscr = SCSCR_RE | SCSCR_TE, 59 .scscr = SCSCR_RE | SCSCR_TE,
59 .scbrr_algo_id = SCBRR_ALGO_4, 60 .scbrr_algo_id = SCBRR_ALGO_4,
60 .type = PORT_SCIF, 61 .type = PORT_SCIFA,
61 .irqs = { evt2irq(0xc20), evt2irq(0xc20), 62 .irqs = { evt2irq(0xc20), evt2irq(0xc20),
62 evt2irq(0xc20), evt2irq(0xc20) }, 63 evt2irq(0xc20), evt2irq(0xc20) },
63}; 64};
@@ -76,7 +77,7 @@ static struct plat_sci_port scif2_platform_data = {
76 .flags = UPF_BOOT_AUTOCONF, 77 .flags = UPF_BOOT_AUTOCONF,
77 .scscr = SCSCR_RE | SCSCR_TE, 78 .scscr = SCSCR_RE | SCSCR_TE,
78 .scbrr_algo_id = SCBRR_ALGO_4, 79 .scbrr_algo_id = SCBRR_ALGO_4,
79 .type = PORT_SCIF, 80 .type = PORT_SCIFA,
80 .irqs = { evt2irq(0xc40), evt2irq(0xc40), 81 .irqs = { evt2irq(0xc40), evt2irq(0xc40),
81 evt2irq(0xc40), evt2irq(0xc40) }, 82 evt2irq(0xc40), evt2irq(0xc40) },
82}; 83};
@@ -95,7 +96,7 @@ static struct plat_sci_port scif3_platform_data = {
95 .flags = UPF_BOOT_AUTOCONF, 96 .flags = UPF_BOOT_AUTOCONF,
96 .scscr = SCSCR_RE | SCSCR_TE, 97 .scscr = SCSCR_RE | SCSCR_TE,
97 .scbrr_algo_id = SCBRR_ALGO_4, 98 .scbrr_algo_id = SCBRR_ALGO_4,
98 .type = PORT_SCIF, 99 .type = PORT_SCIFA,
99 .irqs = { evt2irq(0xc60), evt2irq(0xc60), 100 .irqs = { evt2irq(0xc60), evt2irq(0xc60),
100 evt2irq(0xc60), evt2irq(0xc60) }, 101 evt2irq(0xc60), evt2irq(0xc60) },
101}; 102};
@@ -114,7 +115,7 @@ static struct plat_sci_port scif4_platform_data = {
114 .flags = UPF_BOOT_AUTOCONF, 115 .flags = UPF_BOOT_AUTOCONF,
115 .scscr = SCSCR_RE | SCSCR_TE, 116 .scscr = SCSCR_RE | SCSCR_TE,
116 .scbrr_algo_id = SCBRR_ALGO_4, 117 .scbrr_algo_id = SCBRR_ALGO_4,
117 .type = PORT_SCIF, 118 .type = PORT_SCIFA,
118 .irqs = { evt2irq(0xd20), evt2irq(0xd20), 119 .irqs = { evt2irq(0xd20), evt2irq(0xd20),
119 evt2irq(0xd20), evt2irq(0xd20) }, 120 evt2irq(0xd20), evt2irq(0xd20) },
120}; 121};
@@ -133,7 +134,7 @@ static struct plat_sci_port scif5_platform_data = {
133 .flags = UPF_BOOT_AUTOCONF, 134 .flags = UPF_BOOT_AUTOCONF,
134 .scscr = SCSCR_RE | SCSCR_TE, 135 .scscr = SCSCR_RE | SCSCR_TE,
135 .scbrr_algo_id = SCBRR_ALGO_4, 136 .scbrr_algo_id = SCBRR_ALGO_4,
136 .type = PORT_SCIF, 137 .type = PORT_SCIFA,
137 .irqs = { evt2irq(0xd40), evt2irq(0xd40), 138 .irqs = { evt2irq(0xd40), evt2irq(0xd40),
138 evt2irq(0xd40), evt2irq(0xd40) }, 139 evt2irq(0xd40), evt2irq(0xd40) },
139}; 140};
@@ -152,7 +153,7 @@ static struct plat_sci_port scif6_platform_data = {
152 .flags = UPF_BOOT_AUTOCONF, 153 .flags = UPF_BOOT_AUTOCONF,
153 .scscr = SCSCR_RE | SCSCR_TE, 154 .scscr = SCSCR_RE | SCSCR_TE,
154 .scbrr_algo_id = SCBRR_ALGO_4, 155 .scbrr_algo_id = SCBRR_ALGO_4,
155 .type = PORT_SCIF, 156 .type = PORT_SCIFA,
156 .irqs = { intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80), 157 .irqs = { intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80),
157 intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80) }, 158 intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80) },
158}; 159};
@@ -171,7 +172,7 @@ static struct plat_sci_port scif7_platform_data = {
171 .flags = UPF_BOOT_AUTOCONF, 172 .flags = UPF_BOOT_AUTOCONF,
172 .scscr = SCSCR_RE | SCSCR_TE, 173 .scscr = SCSCR_RE | SCSCR_TE,
173 .scbrr_algo_id = SCBRR_ALGO_4, 174 .scbrr_algo_id = SCBRR_ALGO_4,
174 .type = PORT_SCIF, 175 .type = PORT_SCIFB,
175 .irqs = { evt2irq(0xd60), evt2irq(0xd60), 176 .irqs = { evt2irq(0xd60), evt2irq(0xd60),
176 evt2irq(0xd60), evt2irq(0xd60) }, 177 evt2irq(0xd60), evt2irq(0xd60) },
177}; 178};
@@ -215,6 +216,214 @@ static struct platform_device cmt10_device = {
215 .num_resources = ARRAY_SIZE(cmt10_resources), 216 .num_resources = ARRAY_SIZE(cmt10_resources),
216}; 217};
217 218
219/* VPU */
220static struct uio_info vpu_platform_data = {
221 .name = "VPU5HG",
222 .version = "0",
223 .irq = intcs_evt2irq(0x980),
224};
225
226static struct resource vpu_resources[] = {
227 [0] = {
228 .name = "VPU",
229 .start = 0xfe900000,
230 .end = 0xfe900157,
231 .flags = IORESOURCE_MEM,
232 },
233};
234
235static struct platform_device vpu_device = {
236 .name = "uio_pdrv_genirq",
237 .id = 0,
238 .dev = {
239 .platform_data = &vpu_platform_data,
240 },
241 .resource = vpu_resources,
242 .num_resources = ARRAY_SIZE(vpu_resources),
243};
244
245/* VEU0 */
246static struct uio_info veu0_platform_data = {
247 .name = "VEU0",
248 .version = "0",
249 .irq = intcs_evt2irq(0x700),
250};
251
252static struct resource veu0_resources[] = {
253 [0] = {
254 .name = "VEU0",
255 .start = 0xfe920000,
256 .end = 0xfe9200cb,
257 .flags = IORESOURCE_MEM,
258 },
259};
260
261static struct platform_device veu0_device = {
262 .name = "uio_pdrv_genirq",
263 .id = 1,
264 .dev = {
265 .platform_data = &veu0_platform_data,
266 },
267 .resource = veu0_resources,
268 .num_resources = ARRAY_SIZE(veu0_resources),
269};
270
271/* VEU1 */
272static struct uio_info veu1_platform_data = {
273 .name = "VEU1",
274 .version = "0",
275 .irq = intcs_evt2irq(0x720),
276};
277
278static struct resource veu1_resources[] = {
279 [0] = {
280 .name = "VEU1",
281 .start = 0xfe924000,
282 .end = 0xfe9240cb,
283 .flags = IORESOURCE_MEM,
284 },
285};
286
287static struct platform_device veu1_device = {
288 .name = "uio_pdrv_genirq",
289 .id = 2,
290 .dev = {
291 .platform_data = &veu1_platform_data,
292 },
293 .resource = veu1_resources,
294 .num_resources = ARRAY_SIZE(veu1_resources),
295};
296
297/* VEU2 */
298static struct uio_info veu2_platform_data = {
299 .name = "VEU2",
300 .version = "0",
301 .irq = intcs_evt2irq(0x740),
302};
303
304static struct resource veu2_resources[] = {
305 [0] = {
306 .name = "VEU2",
307 .start = 0xfe928000,
308 .end = 0xfe928307,
309 .flags = IORESOURCE_MEM,
310 },
311};
312
313static struct platform_device veu2_device = {
314 .name = "uio_pdrv_genirq",
315 .id = 3,
316 .dev = {
317 .platform_data = &veu2_platform_data,
318 },
319 .resource = veu2_resources,
320 .num_resources = ARRAY_SIZE(veu2_resources),
321};
322
323/* VEU3 */
324static struct uio_info veu3_platform_data = {
325 .name = "VEU3",
326 .version = "0",
327 .irq = intcs_evt2irq(0x760),
328};
329
330static struct resource veu3_resources[] = {
331 [0] = {
332 .name = "VEU3",
333 .start = 0xfe92c000,
334 .end = 0xfe92c307,
335 .flags = IORESOURCE_MEM,
336 },
337};
338
339static struct platform_device veu3_device = {
340 .name = "uio_pdrv_genirq",
341 .id = 4,
342 .dev = {
343 .platform_data = &veu3_platform_data,
344 },
345 .resource = veu3_resources,
346 .num_resources = ARRAY_SIZE(veu3_resources),
347};
348
349/* JPU */
350static struct uio_info jpu_platform_data = {
351 .name = "JPU",
352 .version = "0",
353 .irq = intcs_evt2irq(0x560),
354};
355
356static struct resource jpu_resources[] = {
357 [0] = {
358 .name = "JPU",
359 .start = 0xfe980000,
360 .end = 0xfe9902d3,
361 .flags = IORESOURCE_MEM,
362 },
363};
364
365static struct platform_device jpu_device = {
366 .name = "uio_pdrv_genirq",
367 .id = 5,
368 .dev = {
369 .platform_data = &jpu_platform_data,
370 },
371 .resource = jpu_resources,
372 .num_resources = ARRAY_SIZE(jpu_resources),
373};
374
375/* SPU2DSP0 */
376static struct uio_info spu0_platform_data = {
377 .name = "SPU2DSP0",
378 .version = "0",
379 .irq = evt2irq(0x1800),
380};
381
382static struct resource spu0_resources[] = {
383 [0] = {
384 .name = "SPU2DSP0",
385 .start = 0xfe200000,
386 .end = 0xfe2fffff,
387 .flags = IORESOURCE_MEM,
388 },
389};
390
391static struct platform_device spu0_device = {
392 .name = "uio_pdrv_genirq",
393 .id = 6,
394 .dev = {
395 .platform_data = &spu0_platform_data,
396 },
397 .resource = spu0_resources,
398 .num_resources = ARRAY_SIZE(spu0_resources),
399};
400
401/* SPU2DSP1 */
402static struct uio_info spu1_platform_data = {
403 .name = "SPU2DSP1",
404 .version = "0",
405 .irq = evt2irq(0x1820),
406};
407
408static struct resource spu1_resources[] = {
409 [0] = {
410 .name = "SPU2DSP1",
411 .start = 0xfe300000,
412 .end = 0xfe3fffff,
413 .flags = IORESOURCE_MEM,
414 },
415};
416
417static struct platform_device spu1_device = {
418 .name = "uio_pdrv_genirq",
419 .id = 7,
420 .dev = {
421 .platform_data = &spu1_platform_data,
422 },
423 .resource = spu1_resources,
424 .num_resources = ARRAY_SIZE(spu1_resources),
425};
426
218static struct platform_device *sh7377_early_devices[] __initdata = { 427static struct platform_device *sh7377_early_devices[] __initdata = {
219 &scif0_device, 428 &scif0_device,
220 &scif1_device, 429 &scif1_device,
@@ -227,10 +436,24 @@ static struct platform_device *sh7377_early_devices[] __initdata = {
227 &cmt10_device, 436 &cmt10_device,
228}; 437};
229 438
439static struct platform_device *sh7377_devices[] __initdata = {
440 &vpu_device,
441 &veu0_device,
442 &veu1_device,
443 &veu2_device,
444 &veu3_device,
445 &jpu_device,
446 &spu0_device,
447 &spu1_device,
448};
449
230void __init sh7377_add_standard_devices(void) 450void __init sh7377_add_standard_devices(void)
231{ 451{
232 platform_add_devices(sh7377_early_devices, 452 platform_add_devices(sh7377_early_devices,
233 ARRAY_SIZE(sh7377_early_devices)); 453 ARRAY_SIZE(sh7377_early_devices));
454
455 platform_add_devices(sh7377_devices,
456 ARRAY_SIZE(sh7377_devices));
234} 457}
235 458
236#define SMSTPCR3 0xe615013c 459#define SMSTPCR3 0xe615013c
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 685c40a2f5e6..e46821c0a62e 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -27,9 +27,11 @@
27#include <linux/input.h> 27#include <linux/input.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/serial_sci.h> 29#include <linux/serial_sci.h>
30#include <linux/sh_dma.h>
30#include <linux/sh_intc.h> 31#include <linux/sh_intc.h>
31#include <linux/sh_timer.h> 32#include <linux/sh_timer.h>
32#include <mach/hardware.h> 33#include <mach/hardware.h>
34#include <mach/sh73a0.h>
33#include <asm/mach-types.h> 35#include <asm/mach-types.h>
34#include <asm/mach/arch.h> 36#include <asm/mach/arch.h>
35 37
@@ -392,6 +394,242 @@ static struct platform_device i2c4_device = {
392 .num_resources = ARRAY_SIZE(i2c4_resources), 394 .num_resources = ARRAY_SIZE(i2c4_resources),
393}; 395};
394 396
397/* Transmit sizes and respective CHCR register values */
398enum {
399 XMIT_SZ_8BIT = 0,
400 XMIT_SZ_16BIT = 1,
401 XMIT_SZ_32BIT = 2,
402 XMIT_SZ_64BIT = 7,
403 XMIT_SZ_128BIT = 3,
404 XMIT_SZ_256BIT = 4,
405 XMIT_SZ_512BIT = 5,
406};
407
408/* log2(size / 8) - used to calculate number of transfers */
409#define TS_SHIFT { \
410 [XMIT_SZ_8BIT] = 0, \
411 [XMIT_SZ_16BIT] = 1, \
412 [XMIT_SZ_32BIT] = 2, \
413 [XMIT_SZ_64BIT] = 3, \
414 [XMIT_SZ_128BIT] = 4, \
415 [XMIT_SZ_256BIT] = 5, \
416 [XMIT_SZ_512BIT] = 6, \
417}
418
419#define TS_INDEX2VAL(i) ((((i) & 3) << 3) | (((i) & 0xc) << (20 - 2)))
420#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
421#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
422
423static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = {
424 {
425 .slave_id = SHDMA_SLAVE_SCIF0_TX,
426 .addr = 0xe6c40020,
427 .chcr = CHCR_TX(XMIT_SZ_8BIT),
428 .mid_rid = 0x21,
429 }, {
430 .slave_id = SHDMA_SLAVE_SCIF0_RX,
431 .addr = 0xe6c40024,
432 .chcr = CHCR_RX(XMIT_SZ_8BIT),
433 .mid_rid = 0x22,
434 }, {
435 .slave_id = SHDMA_SLAVE_SCIF1_TX,
436 .addr = 0xe6c50020,
437 .chcr = CHCR_TX(XMIT_SZ_8BIT),
438 .mid_rid = 0x25,
439 }, {
440 .slave_id = SHDMA_SLAVE_SCIF1_RX,
441 .addr = 0xe6c50024,
442 .chcr = CHCR_RX(XMIT_SZ_8BIT),
443 .mid_rid = 0x26,
444 }, {
445 .slave_id = SHDMA_SLAVE_SCIF2_TX,
446 .addr = 0xe6c60020,
447 .chcr = CHCR_TX(XMIT_SZ_8BIT),
448 .mid_rid = 0x29,
449 }, {
450 .slave_id = SHDMA_SLAVE_SCIF2_RX,
451 .addr = 0xe6c60024,
452 .chcr = CHCR_RX(XMIT_SZ_8BIT),
453 .mid_rid = 0x2a,
454 }, {
455 .slave_id = SHDMA_SLAVE_SCIF3_TX,
456 .addr = 0xe6c70020,
457 .chcr = CHCR_TX(XMIT_SZ_8BIT),
458 .mid_rid = 0x2d,
459 }, {
460 .slave_id = SHDMA_SLAVE_SCIF3_RX,
461 .addr = 0xe6c70024,
462 .chcr = CHCR_RX(XMIT_SZ_8BIT),
463 .mid_rid = 0x2e,
464 }, {
465 .slave_id = SHDMA_SLAVE_SCIF4_TX,
466 .addr = 0xe6c80020,
467 .chcr = CHCR_TX(XMIT_SZ_8BIT),
468 .mid_rid = 0x39,
469 }, {
470 .slave_id = SHDMA_SLAVE_SCIF4_RX,
471 .addr = 0xe6c80024,
472 .chcr = CHCR_RX(XMIT_SZ_8BIT),
473 .mid_rid = 0x3a,
474 }, {
475 .slave_id = SHDMA_SLAVE_SCIF5_TX,
476 .addr = 0xe6cb0020,
477 .chcr = CHCR_TX(XMIT_SZ_8BIT),
478 .mid_rid = 0x35,
479 }, {
480 .slave_id = SHDMA_SLAVE_SCIF5_RX,
481 .addr = 0xe6cb0024,
482 .chcr = CHCR_RX(XMIT_SZ_8BIT),
483 .mid_rid = 0x36,
484 }, {
485 .slave_id = SHDMA_SLAVE_SCIF6_TX,
486 .addr = 0xe6cc0020,
487 .chcr = CHCR_TX(XMIT_SZ_8BIT),
488 .mid_rid = 0x1d,
489 }, {
490 .slave_id = SHDMA_SLAVE_SCIF6_RX,
491 .addr = 0xe6cc0024,
492 .chcr = CHCR_RX(XMIT_SZ_8BIT),
493 .mid_rid = 0x1e,
494 }, {
495 .slave_id = SHDMA_SLAVE_SCIF7_TX,
496 .addr = 0xe6cd0020,
497 .chcr = CHCR_TX(XMIT_SZ_8BIT),
498 .mid_rid = 0x19,
499 }, {
500 .slave_id = SHDMA_SLAVE_SCIF7_RX,
501 .addr = 0xe6cd0024,
502 .chcr = CHCR_RX(XMIT_SZ_8BIT),
503 .mid_rid = 0x1a,
504 }, {
505 .slave_id = SHDMA_SLAVE_SCIF8_TX,
506 .addr = 0xe6c30040,
507 .chcr = CHCR_TX(XMIT_SZ_8BIT),
508 .mid_rid = 0x3d,
509 }, {
510 .slave_id = SHDMA_SLAVE_SCIF8_RX,
511 .addr = 0xe6c30060,
512 .chcr = CHCR_RX(XMIT_SZ_8BIT),
513 .mid_rid = 0x3e,
514 }, {
515 .slave_id = SHDMA_SLAVE_SDHI0_TX,
516 .addr = 0xee100030,
517 .chcr = CHCR_TX(XMIT_SZ_16BIT),
518 .mid_rid = 0xc1,
519 }, {
520 .slave_id = SHDMA_SLAVE_SDHI0_RX,
521 .addr = 0xee100030,
522 .chcr = CHCR_RX(XMIT_SZ_16BIT),
523 .mid_rid = 0xc2,
524 }, {
525 .slave_id = SHDMA_SLAVE_SDHI1_TX,
526 .addr = 0xee120030,
527 .chcr = CHCR_TX(XMIT_SZ_16BIT),
528 .mid_rid = 0xc9,
529 }, {
530 .slave_id = SHDMA_SLAVE_SDHI1_RX,
531 .addr = 0xee120030,
532 .chcr = CHCR_RX(XMIT_SZ_16BIT),
533 .mid_rid = 0xca,
534 }, {
535 .slave_id = SHDMA_SLAVE_SDHI2_TX,
536 .addr = 0xee140030,
537 .chcr = CHCR_TX(XMIT_SZ_16BIT),
538 .mid_rid = 0xcd,
539 }, {
540 .slave_id = SHDMA_SLAVE_SDHI2_RX,
541 .addr = 0xee140030,
542 .chcr = CHCR_RX(XMIT_SZ_16BIT),
543 .mid_rid = 0xce,
544 }, {
545 .slave_id = SHDMA_SLAVE_MMCIF_TX,
546 .addr = 0xe6bd0034,
547 .chcr = CHCR_TX(XMIT_SZ_32BIT),
548 .mid_rid = 0xd1,
549 }, {
550 .slave_id = SHDMA_SLAVE_MMCIF_RX,
551 .addr = 0xe6bd0034,
552 .chcr = CHCR_RX(XMIT_SZ_32BIT),
553 .mid_rid = 0xd2,
554 },
555};
556
557#define DMAE_CHANNEL(_offset) \
558 { \
559 .offset = _offset - 0x20, \
560 .dmars = _offset - 0x20 + 0x40, \
561 }
562
563static const struct sh_dmae_channel sh73a0_dmae_channels[] = {
564 DMAE_CHANNEL(0x8000),
565 DMAE_CHANNEL(0x8080),
566 DMAE_CHANNEL(0x8100),
567 DMAE_CHANNEL(0x8180),
568 DMAE_CHANNEL(0x8200),
569 DMAE_CHANNEL(0x8280),
570 DMAE_CHANNEL(0x8300),
571 DMAE_CHANNEL(0x8380),
572 DMAE_CHANNEL(0x8400),
573 DMAE_CHANNEL(0x8480),
574 DMAE_CHANNEL(0x8500),
575 DMAE_CHANNEL(0x8580),
576 DMAE_CHANNEL(0x8600),
577 DMAE_CHANNEL(0x8680),
578 DMAE_CHANNEL(0x8700),
579 DMAE_CHANNEL(0x8780),
580 DMAE_CHANNEL(0x8800),
581 DMAE_CHANNEL(0x8880),
582 DMAE_CHANNEL(0x8900),
583 DMAE_CHANNEL(0x8980),
584};
585
586static const unsigned int ts_shift[] = TS_SHIFT;
587
588static struct sh_dmae_pdata sh73a0_dmae_platform_data = {
589 .slave = sh73a0_dmae_slaves,
590 .slave_num = ARRAY_SIZE(sh73a0_dmae_slaves),
591 .channel = sh73a0_dmae_channels,
592 .channel_num = ARRAY_SIZE(sh73a0_dmae_channels),
593 .ts_low_shift = 3,
594 .ts_low_mask = 0x18,
595 .ts_high_shift = (20 - 2), /* 2 bits for shifted low TS */
596 .ts_high_mask = 0x00300000,
597 .ts_shift = ts_shift,
598 .ts_shift_num = ARRAY_SIZE(ts_shift),
599 .dmaor_init = DMAOR_DME,
600};
601
602static struct resource sh73a0_dmae_resources[] = {
603 {
604 /* Registers including DMAOR and channels including DMARSx */
605 .start = 0xfe000020,
606 .end = 0xfe008a00 - 1,
607 .flags = IORESOURCE_MEM,
608 },
609 {
610 /* DMA error IRQ */
611 .start = gic_spi(129),
612 .end = gic_spi(129),
613 .flags = IORESOURCE_IRQ,
614 },
615 {
616 /* IRQ for channels 0-19 */
617 .start = gic_spi(109),
618 .end = gic_spi(128),
619 .flags = IORESOURCE_IRQ,
620 },
621};
622
623static struct platform_device dma0_device = {
624 .name = "sh-dma-engine",
625 .id = 0,
626 .resource = sh73a0_dmae_resources,
627 .num_resources = ARRAY_SIZE(sh73a0_dmae_resources),
628 .dev = {
629 .platform_data = &sh73a0_dmae_platform_data,
630 },
631};
632
395static struct platform_device *sh73a0_early_devices[] __initdata = { 633static struct platform_device *sh73a0_early_devices[] __initdata = {
396 &scif0_device, 634 &scif0_device,
397 &scif1_device, 635 &scif1_device,
@@ -413,10 +651,16 @@ static struct platform_device *sh73a0_late_devices[] __initdata = {
413 &i2c2_device, 651 &i2c2_device,
414 &i2c3_device, 652 &i2c3_device,
415 &i2c4_device, 653 &i2c4_device,
654 &dma0_device,
416}; 655};
417 656
657#define SRCR2 0xe61580b0
658
418void __init sh73a0_add_standard_devices(void) 659void __init sh73a0_add_standard_devices(void)
419{ 660{
661 /* Clear software reset bit on SY-DMAC module */
662 __raw_writel(__raw_readl(SRCR2) & ~(1 << 18), SRCR2);
663
420 platform_add_devices(sh73a0_early_devices, 664 platform_add_devices(sh73a0_early_devices,
421 ARRAY_SIZE(sh73a0_early_devices)); 665 ARRAY_SIZE(sh73a0_early_devices));
422 platform_add_devices(sh73a0_late_devices, 666 platform_add_devices(sh73a0_late_devices,
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
new file mode 100644
index 000000000000..d37d3ca4d18f
--- /dev/null
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -0,0 +1,260 @@
1/*
2 * sh7372 lowlevel sleep code for "Core Standby Mode"
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * In "Core Standby Mode" the ARM core is off, but L2 cache is still on
7 *
8 * Based on mach-omap2/sleep34xx.S
9 *
10 * (C) Copyright 2007 Texas Instruments
11 * Karthik Dasu <karthik-dp@ti.com>
12 *
13 * (C) Copyright 2004 Texas Instruments, <www.ti.com>
14 * Richard Woodruff <r-woodruff2@ti.com>
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 * MA 02111-1307 USA
30 */
31
32#include <linux/linkage.h>
33#include <asm/assembler.h>
34
35#define SMFRAM 0xe6a70000
36
37 .align
38kernel_flush:
39 .word v7_flush_dcache_all
40
41 .align 3
42ENTRY(sh7372_cpu_suspend)
43 stmfd sp!, {r0-r12, lr} @ save registers on stack
44
45 ldr r8, =SMFRAM
46
47 mov r4, sp @ Store sp
48 mrs r5, spsr @ Store spsr
49 mov r6, lr @ Store lr
50 stmia r8!, {r4-r6}
51
52 mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
53 mrc p15, 0, r5, c2, c0, 0 @ TTBR0
54 mrc p15, 0, r6, c2, c0, 1 @ TTBR1
55 mrc p15, 0, r7, c2, c0, 2 @ TTBCR
56 stmia r8!, {r4-r7}
57
58 mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
59 mrc p15, 0, r5, c10, c2, 0 @ PRRR
60 mrc p15, 0, r6, c10, c2, 1 @ NMRR
61 stmia r8!,{r4-r6}
62
63 mrc p15, 0, r4, c13, c0, 1 @ Context ID
64 mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
65 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
66 mrs r7, cpsr @ Store current cpsr
67 stmia r8!, {r4-r7}
68
69 mrc p15, 0, r4, c1, c0, 0 @ save control register
70 stmia r8!, {r4}
71
72 /*
73 * jump out to kernel flush routine
74 * - reuse that code is better
75 * - it executes in a cached space so is faster than refetch per-block
76 * - should be faster and will change with kernel
77 * - 'might' have to copy address, load and jump to it
78 * Flush all data from the L1 data cache before disabling
79 * SCTLR.C bit.
80 */
81 ldr r1, kernel_flush
82 mov lr, pc
83 bx r1
84
85 /*
86 * Clear the SCTLR.C bit to prevent further data cache
87 * allocation. Clearing SCTLR.C would make all the data accesses
88 * strongly ordered and would not hit the cache.
89 */
90 mrc p15, 0, r0, c1, c0, 0
91 bic r0, r0, #(1 << 2) @ Disable the C bit
92 mcr p15, 0, r0, c1, c0, 0
93 isb
94
95 /*
96 * Invalidate L1 data cache. Even though only invalidate is
97 * necessary exported flush API is used here. Doing clean
98 * on already clean cache would be almost NOP.
99 */
100 ldr r1, kernel_flush
101 blx r1
102 /*
103 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
104 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
105 * This sequence switches back to ARM. Note that .align may insert a
106 * nop: bx pc needs to be word-aligned in order to work.
107 */
108 THUMB( .thumb )
109 THUMB( .align )
110 THUMB( bx pc )
111 THUMB( nop )
112 .arm
113
114 /* Data memory barrier and Data sync barrier */
115 dsb
116 dmb
117
118/*
119 * ===================================
120 * == WFI instruction => Enter idle ==
121 * ===================================
122 */
123 wfi @ wait for interrupt
124
125/*
126 * ===================================
127 * == Resume path for non-OFF modes ==
128 * ===================================
129 */
130 mrc p15, 0, r0, c1, c0, 0
131 tst r0, #(1 << 2) @ Check C bit enabled?
132 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
133 mcreq p15, 0, r0, c1, c0, 0
134 isb
135
136/*
137 * ===================================
138 * == Exit point from non-OFF modes ==
139 * ===================================
140 */
141 ldmfd sp!, {r0-r12, pc} @ restore regs and return
142
143 .pool
144
145 .align 12
146 .text
147 .global sh7372_cpu_resume
148sh7372_cpu_resume:
149
150 mov r1, #0
151 /*
152 * Invalidate all instruction caches to PoU
153 * and flush branch target cache
154 */
155 mcr p15, 0, r1, c7, c5, 0
156
157 ldr r3, =SMFRAM
158
159 ldmia r3!, {r4-r6}
160 mov sp, r4 @ Restore sp
161 msr spsr_cxsf, r5 @ Restore spsr
162 mov lr, r6 @ Restore lr
163
164 ldmia r3!, {r4-r7}
165 mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
166 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
167 mcr p15, 0, r6, c2, c0, 1 @ TTBR1
168 mcr p15, 0, r7, c2, c0, 2 @ TTBCR
169
170 ldmia r3!,{r4-r6}
171 mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
172 mcr p15, 0, r5, c10, c2, 0 @ PRRR
173 mcr p15, 0, r6, c10, c2, 1 @ NMRR
174
175 ldmia r3!,{r4-r7}
176 mcr p15, 0, r4, c13, c0, 1 @ Context ID
177 mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
178 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
179 msr cpsr, r7 @ store cpsr
180
181 /* Starting to enable MMU here */
182 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
183 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
184 and r7, #0x7
185 cmp r7, #0x0
186 beq usettbr0
187ttbr_error:
188 /*
189 * More work needs to be done to support N[0:2] value other than 0
190 * So looping here so that the error can be detected
191 */
192 b ttbr_error
193
194 .align
195cache_pred_disable_mask:
196 .word 0xFFFFE7FB
197ttbrbit_mask:
198 .word 0xFFFFC000
199table_index_mask:
200 .word 0xFFF00000
201table_entry:
202 .word 0x00000C02
203usettbr0:
204
205 mrc p15, 0, r2, c2, c0, 0
206 ldr r5, ttbrbit_mask
207 and r2, r5
208 mov r4, pc
209 ldr r5, table_index_mask
210 and r4, r5 @ r4 = 31 to 20 bits of pc
211 /* Extract the value to be written to table entry */
212 ldr r6, table_entry
213 /* r6 has the value to be written to table entry */
214 add r6, r6, r4
215 /* Getting the address of table entry to modify */
216 lsr r4, #18
217 /* r2 has the location which needs to be modified */
218 add r2, r4
219 ldr r4, [r2]
220 str r6, [r2] /* modify the table entry */
221
222 mov r7, r6
223 mov r5, r2
224 mov r6, r4
225 /* r5 = original page table address */
226 /* r6 = original page table data */
227
228 mov r0, #0
229 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
230 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
231 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
232 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
233
234 /*
235 * Restore control register. This enables the MMU.
236 * The caches and prediction are not enabled here, they
237 * will be enabled after restoring the MMU table entry.
238 */
239 ldmia r3!, {r4}
240 stmia r3!, {r5} /* save original page table address */
241 stmia r3!, {r6} /* save original page table data */
242 stmia r3!, {r7} /* save modified page table data */
243
244 ldr r2, cache_pred_disable_mask
245 and r4, r2
246 mcr p15, 0, r4, c1, c0, 0
247 dsb
248 isb
249
250 ldr r0, =restoremmu_on
251 bx r0
252
253/*
254 * ==============================
255 * == Exit point from OFF mode ==
256 * ==============================
257 */
258restoremmu_on:
259
260 ldmfd sp!, {r0-r12, pc} @ restore regs and return
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index a156d2108df1..3ffdbc92ba82 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -59,6 +59,11 @@ unsigned int __init sh73a0_get_core_count(void)
59{ 59{
60 void __iomem *scu_base = scu_base_addr(); 60 void __iomem *scu_base = scu_base_addr();
61 61
62#ifdef CONFIG_HAVE_ARM_TWD
63 /* twd_base needs to be initialized before percpu_timer_setup() */
64 twd_base = (void __iomem *)0xf0000600;
65#endif
66
62 return scu_get_core_count(scu_base); 67 return scu_get_core_count(scu_base);
63} 68}
64 69
@@ -82,10 +87,6 @@ int __cpuinit sh73a0_boot_secondary(unsigned int cpu)
82 87
83void __init sh73a0_smp_prepare_cpus(void) 88void __init sh73a0_smp_prepare_cpus(void)
84{ 89{
85#ifdef CONFIG_HAVE_ARM_TWD
86 twd_base = (void __iomem *)0xf0000600;
87#endif
88
89 scu_enable(scu_base_addr()); 90 scu_enable(scu_base_addr());
90 91
91 /* Map the reset vector (in headsmp.S) */ 92 /* Map the reset vector (in headsmp.S) */
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
new file mode 100644
index 000000000000..c1febe13f709
--- /dev/null
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -0,0 +1,47 @@
1/*
2 * Suspend-to-RAM support code for SH-Mobile ARM
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pm.h>
12#include <linux/suspend.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <asm/system.h>
16#include <asm/io.h>
17
18static int shmobile_suspend_default_enter(suspend_state_t suspend_state)
19{
20 cpu_do_idle();
21 return 0;
22}
23
24static int shmobile_suspend_begin(suspend_state_t state)
25{
26 disable_hlt();
27 return 0;
28}
29
30static void shmobile_suspend_end(void)
31{
32 enable_hlt();
33}
34
35struct platform_suspend_ops shmobile_suspend_ops = {
36 .begin = shmobile_suspend_begin,
37 .end = shmobile_suspend_end,
38 .enter = shmobile_suspend_default_enter,
39 .valid = suspend_valid_only_mem,
40};
41
42static int __init shmobile_suspend_init(void)
43{
44 suspend_set_ops(&shmobile_suspend_ops);
45 return 0;
46}
47late_initcall(shmobile_suspend_init);
diff --git a/arch/arm/mach-tegra/include/mach/sdhci.h b/arch/arm/mach-tegra/include/mach/sdhci.h
index 3ad086e859c3..4231bc7b8652 100644
--- a/arch/arm/mach-tegra/include/mach/sdhci.h
+++ b/arch/arm/mach-tegra/include/mach/sdhci.h
@@ -24,6 +24,7 @@ struct tegra_sdhci_platform_data {
24 int wp_gpio; 24 int wp_gpio;
25 int power_gpio; 25 int power_gpio;
26 int is_8bit; 26 int is_8bit;
27 int pm_flags;
27}; 28};
28 29
29#endif 30#endif
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 58626013aa32..54429d015954 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -12,9 +12,12 @@ menu "Ux500 SoC"
12 12
13config UX500_SOC_DB5500 13config UX500_SOC_DB5500
14 bool "DB5500" 14 bool "DB5500"
15 select MFD_DB5500_PRCMU
15 16
16config UX500_SOC_DB8500 17config UX500_SOC_DB8500
17 bool "DB8500" 18 bool "DB8500"
19 select MFD_DB8500_PRCMU
20 select REGULATOR_DB8500_PRCMU
18 21
19endmenu 22endmenu
20 23
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index b549a8fb4231..1694916e6822 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -5,7 +5,7 @@
5obj-y := clock.o cpu.o devices.o devices-common.o \ 5obj-y := clock.o cpu.o devices.o devices-common.o \
6 id.o usb.o 6 id.o usb.o
7obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o dma-db5500.o 7obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o dma-db5500.o
8obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o prcmu.o 8obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
9obj-$(CONFIG_MACH_U8500) += board-mop500.o board-mop500-sdi.o \ 9obj-$(CONFIG_MACH_U8500) += board-mop500.o board-mop500-sdi.o \
10 board-mop500-regulators.o \ 10 board-mop500-regulators.o \
11 board-mop500-uib.o board-mop500-stuib.o \ 11 board-mop500-uib.o board-mop500-stuib.o \
@@ -17,4 +17,4 @@ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
17obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o 17obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
18obj-$(CONFIG_U5500_MODEM_IRQ) += modem-irq-db5500.o 18obj-$(CONFIG_U5500_MODEM_IRQ) += modem-irq-db5500.o
19obj-$(CONFIG_U5500_MBOX) += mbox-db5500.o 19obj-$(CONFIG_U5500_MBOX) += mbox-db5500.o
20obj-$(CONFIG_CPU_FREQ) += cpufreq.o 20
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index c9dc2eff3cb2..c01bc19e3c5e 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -188,6 +188,8 @@ void __init u5500_map_io(void)
188 ux500_map_io(); 188 ux500_map_io();
189 189
190 iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc)); 190 iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc));
191
192 _PRCMU_BASE = __io_address(U5500_PRCMU_BASE);
191} 193}
192 194
193static int usb_db5500_rx_dma_cfg[] = { 195static int usb_db5500_rx_dma_cfg[] = {
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 516126cb357d..c3c417656bd9 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -87,6 +87,8 @@ void __init u8500_map_io(void)
87 iotable_init(u8500_v1_io_desc, ARRAY_SIZE(u8500_v1_io_desc)); 87 iotable_init(u8500_v1_io_desc, ARRAY_SIZE(u8500_v1_io_desc));
88 else if (cpu_is_u8500v2()) 88 else if (cpu_is_u8500v2())
89 iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc)); 89 iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc));
90
91 _PRCMU_BASE = __io_address(U8500_PRCMU_BASE);
90} 92}
91 93
92static struct resource db8500_pmu_resources[] = { 94static struct resource db8500_pmu_resources[] = {
@@ -129,9 +131,14 @@ static struct platform_device db8500_pmu_device = {
129 .dev.platform_data = &db8500_pmu_platdata, 131 .dev.platform_data = &db8500_pmu_platdata,
130}; 132};
131 133
134static struct platform_device db8500_prcmu_device = {
135 .name = "db8500-prcmu",
136};
137
132static struct platform_device *platform_devs[] __initdata = { 138static struct platform_device *platform_devs[] __initdata = {
133 &u8500_dma40_device, 139 &u8500_dma40_device,
134 &db8500_pmu_device, 140 &db8500_pmu_device,
141 &db8500_prcmu_device,
135}; 142};
136 143
137static resource_size_t __initdata db8500_gpio_base[] = { 144static resource_size_t __initdata db8500_gpio_base[] = {
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 5a43107c6232..1da23bb87c16 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -8,6 +8,8 @@
8#include <linux/platform_device.h> 8#include <linux/platform_device.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/mfd/db8500-prcmu.h>
12#include <linux/mfd/db5500-prcmu.h>
11 13
12#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
13#include <asm/hardware/cache-l2x0.h> 15#include <asm/hardware/cache-l2x0.h>
@@ -19,10 +21,11 @@
19#include <mach/hardware.h> 21#include <mach/hardware.h>
20#include <mach/setup.h> 22#include <mach/setup.h>
21#include <mach/devices.h> 23#include <mach/devices.h>
22#include <mach/prcmu.h>
23 24
24#include "clock.h" 25#include "clock.h"
25 26
27void __iomem *_PRCMU_BASE;
28
26#ifdef CONFIG_CACHE_L2X0 29#ifdef CONFIG_CACHE_L2X0
27static void __iomem *l2x0_base; 30static void __iomem *l2x0_base;
28#endif 31#endif
@@ -47,6 +50,8 @@ void __init ux500_init_irq(void)
47 * Init clocks here so that they are available for system timer 50 * Init clocks here so that they are available for system timer
48 * initialization. 51 * initialization.
49 */ 52 */
53 if (cpu_is_u5500())
54 db5500_prcmu_early_init();
50 if (cpu_is_u8500()) 55 if (cpu_is_u8500())
51 prcmu_early_init(); 56 prcmu_early_init();
52 clk_init(); 57 clk_init();
diff --git a/arch/arm/mach-ux500/cpufreq.c b/arch/arm/mach-ux500/cpufreq.c
deleted file mode 100644
index 5c5b747f134d..000000000000
--- a/arch/arm/mach-ux500/cpufreq.c
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * CPU frequency scaling for u8500
3 * Inspired by linux/arch/arm/mach-davinci/cpufreq.c
4 *
5 * Copyright (C) STMicroelectronics 2009
6 * Copyright (C) ST-Ericsson SA 2010
7 *
8 * License Terms: GNU General Public License v2
9 *
10 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
11 * Author: Martin Persson <martin.persson@stericsson.com>
12 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
13 *
14 */
15
16#include <linux/platform_device.h>
17#include <linux/kernel.h>
18#include <linux/cpufreq.h>
19#include <linux/delay.h>
20
21#include <mach/hardware.h>
22#include <mach/prcmu.h>
23#include <mach/prcmu-defs.h>
24
25#define DRIVER_NAME "cpufreq-u8500"
26#define CPUFREQ_NAME "u8500"
27
28static struct device *dev;
29
30static struct cpufreq_frequency_table freq_table[] = {
31 [0] = {
32 .index = 0,
33 .frequency = 200000,
34 },
35 [1] = {
36 .index = 1,
37 .frequency = 300000,
38 },
39 [2] = {
40 .index = 2,
41 .frequency = 600000,
42 },
43 [3] = {
44 /* Used for CPU_OPP_MAX, if available */
45 .index = 3,
46 .frequency = CPUFREQ_TABLE_END,
47 },
48 [4] = {
49 .index = 4,
50 .frequency = CPUFREQ_TABLE_END,
51 },
52};
53
54static enum prcmu_cpu_opp index2opp[] = {
55 CPU_OPP_EXT_CLK,
56 CPU_OPP_50,
57 CPU_OPP_100,
58 CPU_OPP_MAX
59};
60
61static int u8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
62{
63 return cpufreq_frequency_table_verify(policy, freq_table);
64}
65
66static int u8500_cpufreq_target(struct cpufreq_policy *policy,
67 unsigned int target_freq,
68 unsigned int relation)
69{
70 struct cpufreq_freqs freqs;
71 unsigned int index;
72 int ret = 0;
73
74 /*
75 * Ensure desired rate is within allowed range. Some govenors
76 * (ondemand) will just pass target_freq=0 to get the minimum.
77 */
78 if (target_freq < policy->cpuinfo.min_freq)
79 target_freq = policy->cpuinfo.min_freq;
80 if (target_freq > policy->cpuinfo.max_freq)
81 target_freq = policy->cpuinfo.max_freq;
82
83 ret = cpufreq_frequency_table_target(policy, freq_table,
84 target_freq, relation, &index);
85 if (ret < 0) {
86 dev_err(dev, "Could not look up next frequency\n");
87 return ret;
88 }
89
90 freqs.old = policy->cur;
91 freqs.new = freq_table[index].frequency;
92 freqs.cpu = policy->cpu;
93
94 if (freqs.old == freqs.new) {
95 dev_dbg(dev, "Current and target frequencies are equal\n");
96 return 0;
97 }
98
99 dev_dbg(dev, "transition: %u --> %u\n", freqs.old, freqs.new);
100 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
101
102 ret = prcmu_set_cpu_opp(index2opp[index]);
103 if (ret < 0) {
104 dev_err(dev, "Failed to set OPP level\n");
105 return ret;
106 }
107
108 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
109
110 return ret;
111}
112
113static unsigned int u8500_cpufreq_getspeed(unsigned int cpu)
114{
115 int i;
116
117 for (i = 0; prcmu_get_cpu_opp() != index2opp[i]; i++)
118 ;
119 return freq_table[i].frequency;
120}
121
122static int __cpuinit u8500_cpu_init(struct cpufreq_policy *policy)
123{
124 int res;
125
126 BUILD_BUG_ON(ARRAY_SIZE(index2opp) + 1 != ARRAY_SIZE(freq_table));
127
128 if (cpu_is_u8500v2()) {
129 freq_table[1].frequency = 400000;
130 freq_table[2].frequency = 800000;
131 if (prcmu_has_arm_maxopp())
132 freq_table[3].frequency = 1000000;
133 }
134
135 /* get policy fields based on the table */
136 res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
137 if (!res)
138 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
139 else {
140 dev_err(dev, "u8500-cpufreq : Failed to read policy table\n");
141 return res;
142 }
143
144 policy->min = policy->cpuinfo.min_freq;
145 policy->max = policy->cpuinfo.max_freq;
146 policy->cur = u8500_cpufreq_getspeed(policy->cpu);
147 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
148
149 /*
150 * FIXME : Need to take time measurement across the target()
151 * function with no/some/all drivers in the notification
152 * list.
153 */
154 policy->cpuinfo.transition_latency = 200 * 1000; /* in ns */
155
156 /* policy sharing between dual CPUs */
157 cpumask_copy(policy->cpus, &cpu_present_map);
158
159 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
160
161 return res;
162}
163
164static struct freq_attr *u8500_cpufreq_attr[] = {
165 &cpufreq_freq_attr_scaling_available_freqs,
166 NULL,
167};
168static int u8500_cpu_exit(struct cpufreq_policy *policy)
169{
170 cpufreq_frequency_table_put_attr(policy->cpu);
171 return 0;
172}
173
174static struct cpufreq_driver u8500_driver = {
175 .owner = THIS_MODULE,
176 .flags = CPUFREQ_STICKY,
177 .verify = u8500_cpufreq_verify_speed,
178 .target = u8500_cpufreq_target,
179 .get = u8500_cpufreq_getspeed,
180 .init = u8500_cpu_init,
181 .exit = u8500_cpu_exit,
182 .name = CPUFREQ_NAME,
183 .attr = u8500_cpufreq_attr,
184};
185
186static int __init u8500_cpufreq_probe(struct platform_device *pdev)
187{
188 dev = &pdev->dev;
189 return cpufreq_register_driver(&u8500_driver);
190}
191
192static int __exit u8500_cpufreq_remove(struct platform_device *pdev)
193{
194 return cpufreq_unregister_driver(&u8500_driver);
195}
196
197static struct platform_driver u8500_cpufreq_driver = {
198 .driver = {
199 .name = DRIVER_NAME,
200 .owner = THIS_MODULE,
201 },
202 .remove = __exit_p(u8500_cpufreq_remove),
203};
204
205static int __init u8500_cpufreq_init(void)
206{
207 return platform_driver_probe(&u8500_cpufreq_driver,
208 &u8500_cpufreq_probe);
209}
210
211device_initcall(u8500_cpufreq_init);
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
index bd88c1e74060..6ad983294103 100644
--- a/arch/arm/mach-ux500/include/mach/db5500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -17,6 +17,8 @@
17#define U5500_GIC_DIST_BASE 0xA0411000 17#define U5500_GIC_DIST_BASE 0xA0411000
18#define U5500_GIC_CPU_BASE 0xA0410100 18#define U5500_GIC_CPU_BASE 0xA0410100
19#define U5500_DMA_BASE 0x90030000 19#define U5500_DMA_BASE 0x90030000
20#define U5500_STM_BASE 0x90020000
21#define U5500_STM_REG_BASE (U5500_STM_BASE + 0xF000)
20#define U5500_MCDE_BASE 0xA0400000 22#define U5500_MCDE_BASE 0xA0400000
21#define U5500_MODEM_BASE 0xB0000000 23#define U5500_MODEM_BASE 0xB0000000
22#define U5500_L2CC_BASE 0xA0412000 24#define U5500_L2CC_BASE 0xA0412000
@@ -29,7 +31,9 @@
29#define U5500_NAND0_BASE 0x60000000 31#define U5500_NAND0_BASE 0x60000000
30#define U5500_NAND1_BASE 0x70000000 32#define U5500_NAND1_BASE 0x70000000
31#define U5500_TWD_BASE 0xa0410600 33#define U5500_TWD_BASE 0xa0410600
34#define U5500_ICN_BASE 0xA0040000
32#define U5500_B2R2_BASE 0xa0200000 35#define U5500_B2R2_BASE 0xa0200000
36#define U5500_BOOT_ROM_BASE 0x90000000
33 37
34#define U5500_FSMC_BASE (U5500_PER1_BASE + 0x0000) 38#define U5500_FSMC_BASE (U5500_PER1_BASE + 0x0000)
35#define U5500_SDI0_BASE (U5500_PER1_BASE + 0x1000) 39#define U5500_SDI0_BASE (U5500_PER1_BASE + 0x1000)
@@ -60,6 +64,7 @@
60#define U5500_MSP1_BASE (U5500_PER4_BASE + 0x9000) 64#define U5500_MSP1_BASE (U5500_PER4_BASE + 0x9000)
61#define U5500_GPIO2_BASE (U5500_PER4_BASE + 0xA000) 65#define U5500_GPIO2_BASE (U5500_PER4_BASE + 0xA000)
62#define U5500_CDETECT_BASE (U5500_PER4_BASE + 0xF000) 66#define U5500_CDETECT_BASE (U5500_PER4_BASE + 0xF000)
67#define U5500_PRCMU_TCDM_BASE (U5500_PER4_BASE + 0x18000)
63 68
64#define U5500_SPI0_BASE (U5500_PER5_BASE + 0x0000) 69#define U5500_SPI0_BASE (U5500_PER5_BASE + 0x0000)
65#define U5500_SPI1_BASE (U5500_PER5_BASE + 0x1000) 70#define U5500_SPI1_BASE (U5500_PER5_BASE + 0x1000)
@@ -83,7 +88,7 @@
83#define U5500_HASH0_BASE (U5500_PER6_BASE + 0x1000) 88#define U5500_HASH0_BASE (U5500_PER6_BASE + 0x1000)
84#define U5500_HASH1_BASE (U5500_PER6_BASE + 0x2000) 89#define U5500_HASH1_BASE (U5500_PER6_BASE + 0x2000)
85#define U5500_PKA_BASE (U5500_PER6_BASE + 0x4000) 90#define U5500_PKA_BASE (U5500_PER6_BASE + 0x4000)
86#define U5500_PKAM_BASE (U5500_PER6_BASE + 0x5000) 91#define U5500_PKAM_BASE (U5500_PER6_BASE + 0x5100)
87#define U5500_MTU0_BASE (U5500_PER6_BASE + 0x6000) 92#define U5500_MTU0_BASE (U5500_PER6_BASE + 0x6000)
88#define U5500_MTU1_BASE (U5500_PER6_BASE + 0x7000) 93#define U5500_MTU1_BASE (U5500_PER6_BASE + 0x7000)
89#define U5500_CR_BASE (U5500_PER6_BASE + 0x8000) 94#define U5500_CR_BASE (U5500_PER6_BASE + 0x8000)
@@ -114,8 +119,19 @@
114#define U5500_MBOX2_LOCAL_START (U5500_MBOX_BASE + 0x20) 119#define U5500_MBOX2_LOCAL_START (U5500_MBOX_BASE + 0x20)
115#define U5500_MBOX2_LOCAL_END (U5500_MBOX_BASE + 0x3F) 120#define U5500_MBOX2_LOCAL_END (U5500_MBOX_BASE + 0x3F)
116 121
117#define U5500_ESRAM_BASE 0x40000000 122#define U5500_ACCCON_BASE_SEC (0xBFFF0000)
123#define U5500_ACCCON_BASE (0xBFFF1000)
124#define U5500_ACCCON_CPUVEC_RESET_ADDR_OFFSET (0x00000020)
125#define U5500_ACCCON_ACC_CPU_CTRL_OFFSET (0x000000BC)
126
127#define U5500_ESRAM_BASE 0x40000000
118#define U5500_ESRAM_DMA_LCPA_OFFSET 0x10000 128#define U5500_ESRAM_DMA_LCPA_OFFSET 0x10000
119#define U5500_DMA_LCPA_BASE (U5500_ESRAM_BASE + U5500_ESRAM_DMA_LCPA_OFFSET) 129#define U5500_DMA_LCPA_BASE (U5500_ESRAM_BASE + U5500_ESRAM_DMA_LCPA_OFFSET)
120 130
131#define U5500_MCDE_SIZE 0x1000
132#define U5500_DSI_LINK_SIZE 0x1000
133#define U5500_DSI_LINK_COUNT 0x2
134#define U5500_DSI_LINK1_BASE (U5500_MCDE_BASE + U5500_MCDE_SIZE)
135#define U5500_DSI_LINK2_BASE (U5500_DSI_LINK1_BASE + U5500_DSI_LINK_SIZE)
136
121#endif 137#endif
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
index 16647b255378..049997109cf9 100644
--- a/arch/arm/mach-ux500/include/mach/db8500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -15,8 +15,13 @@
15#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE) 15#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE)
16#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE) 16#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE)
17#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE) 17#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE)
18/* Use bank 4 for DMA LCPA */ 18/*
19#define U8500_DMA_LCPA_BASE U8500_ESRAM_BANK4 19 * on V1 DMA uses 4KB for logical parameters position is right after the 64KB
20 * reserved for security
21 */
22#define U8500_ESRAM_DMA_LCPA_OFFSET 0x10000
23
24#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK0 + U8500_ESRAM_DMA_LCPA_OFFSET)
20#define U8500_DMA_LCPA_BASE_ED (U8500_ESRAM_BANK4 + 0x4000) 25#define U8500_DMA_LCPA_BASE_ED (U8500_ESRAM_BANK4 + 0x4000)
21 26
22#define U8500_PER3_BASE 0x80000000 27#define U8500_PER3_BASE 0x80000000
@@ -27,9 +32,12 @@
27#define U8500_B2R2_BASE 0x80130000 32#define U8500_B2R2_BASE 0x80130000
28#define U8500_HSEM_BASE 0x80140000 33#define U8500_HSEM_BASE 0x80140000
29#define U8500_PER4_BASE 0x80150000 34#define U8500_PER4_BASE 0x80150000
35#define U8500_TPIU_BASE 0x80190000
30#define U8500_ICN_BASE 0x81000000 36#define U8500_ICN_BASE 0x81000000
31 37
32#define U8500_BOOT_ROM_BASE 0x90000000 38#define U8500_BOOT_ROM_BASE 0x90000000
39/* ASIC ID is at 0xbf4 offset within this region */
40#define U8500_ASIC_ID_BASE 0x9001D000
33 41
34#define U8500_PER6_BASE 0xa03c0000 42#define U8500_PER6_BASE 0xa03c0000
35#define U8500_PER5_BASE 0xa03e0000 43#define U8500_PER5_BASE 0xa03e0000
@@ -70,13 +78,15 @@
70 78
71/* per6 base addresses */ 79/* per6 base addresses */
72#define U8500_RNG_BASE (U8500_PER6_BASE + 0x0000) 80#define U8500_RNG_BASE (U8500_PER6_BASE + 0x0000)
73#define U8500_PKA_BASE (U8500_PER6_BASE + 0x1000) 81#define U8500_HASH0_BASE (U8500_PER6_BASE + 0x1000)
74#define U8500_PKAM_BASE (U8500_PER6_BASE + 0x2000) 82#define U8500_HASH1_BASE (U8500_PER6_BASE + 0x2000)
83#define U8500_PKA_BASE (U8500_PER6_BASE + 0x4000)
84#define U8500_PKAM_BASE (U8500_PER6_BASE + 0x5100)
75#define U8500_MTU0_BASE (U8500_PER6_BASE + 0x6000) /* v1 */ 85#define U8500_MTU0_BASE (U8500_PER6_BASE + 0x6000) /* v1 */
76#define U8500_MTU1_BASE (U8500_PER6_BASE + 0x7000) /* v1 */ 86#define U8500_MTU1_BASE (U8500_PER6_BASE + 0x7000) /* v1 */
77#define U8500_CR_BASE (U8500_PER6_BASE + 0x8000) /* v1 */ 87#define U8500_CR_BASE (U8500_PER6_BASE + 0x8000) /* v1 */
78#define U8500_CRYPTO0_BASE (U8500_PER6_BASE + 0xa000) 88#define U8500_CRYP0_BASE (U8500_PER6_BASE + 0xa000)
79#define U8500_CRYPTO1_BASE (U8500_PER6_BASE + 0xb000) 89#define U8500_CRYP1_BASE (U8500_PER6_BASE + 0xb000)
80#define U8500_CLKRST6_BASE (U8500_PER6_BASE + 0xf000) 90#define U8500_CLKRST6_BASE (U8500_PER6_BASE + 0xf000)
81 91
82/* per5 base addresses */ 92/* per5 base addresses */
@@ -93,7 +103,8 @@
93#define U8500_DMC_BASE (U8500_PER4_BASE + 0x06000) 103#define U8500_DMC_BASE (U8500_PER4_BASE + 0x06000)
94#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x07000) 104#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x07000)
95#define U8500_PRCMU_TCDM_BASE_V1 (U8500_PER4_BASE + 0x0f000) 105#define U8500_PRCMU_TCDM_BASE_V1 (U8500_PER4_BASE + 0x0f000)
96#define U8500_PRCMU_TCDM_BASE (U8500_PER4_BASE + 0x68000) 106#define U8500_PRCMU_TCDM_BASE (U8500_PER4_BASE + 0x68000)
107#define U8500_PRCMU_TCPM_BASE (U8500_PER4_BASE + 0x60000)
97 108
98/* per3 base addresses */ 109/* per3 base addresses */
99#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000) 110#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000)
@@ -124,6 +135,7 @@
124#define U8500_I2C1_BASE (U8500_PER1_BASE + 0x2000) 135#define U8500_I2C1_BASE (U8500_PER1_BASE + 0x2000)
125#define U8500_MSP0_BASE (U8500_PER1_BASE + 0x3000) 136#define U8500_MSP0_BASE (U8500_PER1_BASE + 0x3000)
126#define U8500_MSP1_BASE (U8500_PER1_BASE + 0x4000) 137#define U8500_MSP1_BASE (U8500_PER1_BASE + 0x4000)
138#define U8500_MSP3_BASE (U8500_PER1_BASE + 0x5000)
127#define U8500_SDI0_BASE (U8500_PER1_BASE + 0x6000) 139#define U8500_SDI0_BASE (U8500_PER1_BASE + 0x6000)
128#define U8500_I2C2_BASE (U8500_PER1_BASE + 0x8000) 140#define U8500_I2C2_BASE (U8500_PER1_BASE + 0x8000)
129#define U8500_SPI3_BASE (U8500_PER1_BASE + 0x9000) 141#define U8500_SPI3_BASE (U8500_PER1_BASE + 0x9000)
@@ -143,4 +155,15 @@
143#define U8500_GPIOBANK7_BASE (U8500_GPIO2_BASE + 0x80) 155#define U8500_GPIOBANK7_BASE (U8500_GPIO2_BASE + 0x80)
144#define U8500_GPIOBANK8_BASE U8500_GPIO3_BASE 156#define U8500_GPIOBANK8_BASE U8500_GPIO3_BASE
145 157
158#define U8500_MCDE_SIZE 0x1000
159#define U8500_DSI_LINK_SIZE 0x1000
160#define U8500_DSI_LINK1_BASE (U8500_MCDE_BASE + U8500_MCDE_SIZE)
161#define U8500_DSI_LINK2_BASE (U8500_DSI_LINK1_BASE + U8500_DSI_LINK_SIZE)
162#define U8500_DSI_LINK3_BASE (U8500_DSI_LINK2_BASE + U8500_DSI_LINK_SIZE)
163#define U8500_DSI_LINK_COUNT 0x3
164
165/* Modem and APE physical addresses */
166#define U8500_MODEM_BASE 0xe000000
167#define U8500_APE_BASE 0x6000000
168
146#endif 169#endif
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index bf63f2631ba0..2c6f71049f2e 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -35,6 +35,7 @@
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36 36
37#include <mach/id.h> 37#include <mach/id.h>
38extern void __iomem *_PRCMU_BASE;
38 39
39#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x) 40#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
40 41
diff --git a/arch/arm/mach-ux500/include/mach/id.h b/arch/arm/mach-ux500/include/mach/id.h
index f1288d10b6ab..02b541a37ee5 100644
--- a/arch/arm/mach-ux500/include/mach/id.h
+++ b/arch/arm/mach-ux500/include/mach/id.h
@@ -75,6 +75,26 @@ static inline bool __attribute_const__ cpu_is_u8500v2(void)
75 return cpu_is_u8500() && ((dbx500_revision() & 0xf0) == 0xB0); 75 return cpu_is_u8500() && ((dbx500_revision() & 0xf0) == 0xB0);
76} 76}
77 77
78static inline bool cpu_is_u8500v20(void)
79{
80 return cpu_is_u8500() && (dbx500_revision() == 0xB0);
81}
82
83static inline bool cpu_is_u8500v21(void)
84{
85 return cpu_is_u8500() && (dbx500_revision() == 0xB1);
86}
87
88static inline bool cpu_is_u8500v20_or_later(void)
89{
90 return cpu_is_u8500() && !cpu_is_u8500v10() && !cpu_is_u8500v11();
91}
92
93static inline bool ux500_is_svp(void)
94{
95 return false;
96}
97
78#define ux500_unknown_soc() BUG() 98#define ux500_unknown_soc() BUG()
79 99
80#endif 100#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
index 97ef55f84934..47969909836c 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
@@ -50,6 +50,11 @@
50 50
51#define MOP500_IRQ_END MOP500_NR_IRQS 51#define MOP500_IRQ_END MOP500_NR_IRQS
52 52
53/*
54 * We may have several boards, but only one will run at a
55 * time, so the one with most IRQs will bump this ahead,
56 * but the IRQ_BOARD_START remains the same for either board.
57 */
53#if MOP500_IRQ_END > IRQ_BOARD_END 58#if MOP500_IRQ_END > IRQ_BOARD_END
54#undef IRQ_BOARD_END 59#undef IRQ_BOARD_END
55#define IRQ_BOARD_END MOP500_IRQ_END 60#define IRQ_BOARD_END MOP500_IRQ_END
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h b/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
new file mode 100644
index 000000000000..29d972c7717b
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef __MACH_IRQS_BOARD_U5500_H
8#define __MACH_IRQS_BOARD_U5500_H
9
10#define AB5500_NR_IRQS 5
11#define IRQ_AB5500_BASE IRQ_BOARD_START
12#define IRQ_AB5500_END (IRQ_AB5500_BASE + AB5500_NR_IRQS)
13
14#define U5500_IRQ_END IRQ_AB5500_END
15
16#if IRQ_BOARD_END < U5500_IRQ_END
17#undef IRQ_BOARD_END
18#define IRQ_BOARD_END U5500_IRQ_END
19#endif
20
21#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-db5500.h b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
index bfa123dbec3b..77239776a6f2 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
@@ -83,4 +83,31 @@
83#define IRQ_DB5500_GPIO6 (IRQ_SHPI_START + 125) 83#define IRQ_DB5500_GPIO6 (IRQ_SHPI_START + 125)
84#define IRQ_DB5500_GPIO7 (IRQ_SHPI_START + 126) 84#define IRQ_DB5500_GPIO7 (IRQ_SHPI_START + 126)
85 85
86#ifdef CONFIG_UX500_SOC_DB5500
87
88/*
89 * After the GPIO ones we reserve a range of IRQ:s in which virtual
90 * IRQ:s representing modem IRQ:s can be allocated
91 */
92#define IRQ_MODEM_EVENTS_BASE IRQ_SOC_START
93#define IRQ_MODEM_EVENTS_NBR 72
94#define IRQ_MODEM_EVENTS_END (IRQ_MODEM_EVENTS_BASE + IRQ_MODEM_EVENTS_NBR)
95
96/* List of virtual IRQ:s that are allocated from the range above */
97#define MBOX_PAIR0_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 43)
98#define MBOX_PAIR1_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 45)
99#define MBOX_PAIR2_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 41)
100
101/*
102 * We may have several SoCs, but only one will run at a
103 * time, so the one with most IRQs will bump this ahead,
104 * but the IRQ_SOC_START remains the same for either SoC.
105 */
106#if IRQ_SOC_END < IRQ_MODEM_EVENTS_END
107#undef IRQ_SOC_END
108#define IRQ_SOC_END IRQ_MODEM_EVENTS_END
109#endif
110
111#endif /* CONFIG_UX500_SOC_DB5500 */
112
86#endif 113#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-db8500.h b/arch/arm/mach-ux500/include/mach/irqs-db8500.h
index 8b5d9f0a1633..68bc14974608 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-db8500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-db8500.h
@@ -93,4 +93,58 @@
93#define IRQ_DB8500_GPIO7 (IRQ_SHPI_START + 126) 93#define IRQ_DB8500_GPIO7 (IRQ_SHPI_START + 126)
94#define IRQ_DB8500_GPIO8 (IRQ_SHPI_START + 127) 94#define IRQ_DB8500_GPIO8 (IRQ_SHPI_START + 127)
95 95
96#define IRQ_CA_WAKE_REQ_ED (IRQ_SHPI_START + 71)
97#define IRQ_AC_READ_NOTIFICATION_0_ED (IRQ_SHPI_START + 66)
98#define IRQ_AC_READ_NOTIFICATION_1_ED (IRQ_SHPI_START + 64)
99#define IRQ_CA_MSG_PEND_NOTIFICATION_0_ED (IRQ_SHPI_START + 67)
100#define IRQ_CA_MSG_PEND_NOTIFICATION_1_ED (IRQ_SHPI_START + 65)
101
102#define IRQ_CA_WAKE_REQ_V1 (IRQ_SHPI_START + 83)
103#define IRQ_AC_READ_NOTIFICATION_0_V1 (IRQ_SHPI_START + 78)
104#define IRQ_AC_READ_NOTIFICATION_1_V1 (IRQ_SHPI_START + 76)
105#define IRQ_CA_MSG_PEND_NOTIFICATION_0_V1 (IRQ_SHPI_START + 79)
106#define IRQ_CA_MSG_PEND_NOTIFICATION_1_V1 (IRQ_SHPI_START + 77)
107
108#ifdef CONFIG_UX500_SOC_DB8500
109
110/* Virtual interrupts corresponding to the PRCMU wakeups. */
111#define IRQ_PRCMU_BASE IRQ_SOC_START
112#define NUM_PRCMU_WAKEUPS (IRQ_PRCMU_END - IRQ_PRCMU_BASE)
113
114#define IRQ_PRCMU_RTC (IRQ_PRCMU_BASE)
115#define IRQ_PRCMU_RTT0 (IRQ_PRCMU_BASE + 1)
116#define IRQ_PRCMU_RTT1 (IRQ_PRCMU_BASE + 2)
117#define IRQ_PRCMU_HSI0 (IRQ_PRCMU_BASE + 3)
118#define IRQ_PRCMU_HSI1 (IRQ_PRCMU_BASE + 4)
119#define IRQ_PRCMU_CA_WAKE (IRQ_PRCMU_BASE + 5)
120#define IRQ_PRCMU_USB (IRQ_PRCMU_BASE + 6)
121#define IRQ_PRCMU_ABB (IRQ_PRCMU_BASE + 7)
122#define IRQ_PRCMU_ABB_FIFO (IRQ_PRCMU_BASE + 8)
123#define IRQ_PRCMU_ARM (IRQ_PRCMU_BASE + 9)
124#define IRQ_PRCMU_MODEM_SW_RESET_REQ (IRQ_PRCMU_BASE + 10)
125#define IRQ_PRCMU_GPIO0 (IRQ_PRCMU_BASE + 11)
126#define IRQ_PRCMU_GPIO1 (IRQ_PRCMU_BASE + 12)
127#define IRQ_PRCMU_GPIO2 (IRQ_PRCMU_BASE + 13)
128#define IRQ_PRCMU_GPIO3 (IRQ_PRCMU_BASE + 14)
129#define IRQ_PRCMU_GPIO4 (IRQ_PRCMU_BASE + 15)
130#define IRQ_PRCMU_GPIO5 (IRQ_PRCMU_BASE + 16)
131#define IRQ_PRCMU_GPIO6 (IRQ_PRCMU_BASE + 17)
132#define IRQ_PRCMU_GPIO7 (IRQ_PRCMU_BASE + 18)
133#define IRQ_PRCMU_GPIO8 (IRQ_PRCMU_BASE + 19)
134#define IRQ_PRCMU_CA_SLEEP (IRQ_PRCMU_BASE + 20)
135#define IRQ_PRCMU_HOTMON_LOW (IRQ_PRCMU_BASE + 21)
136#define IRQ_PRCMU_HOTMON_HIGH (IRQ_PRCMU_BASE + 22)
137#define IRQ_PRCMU_END (IRQ_PRCMU_BASE + 23)
138
139/*
140 * We may have several SoCs, but only one will run at a
141 * time, so the one with most IRQs will bump this ahead,
142 * but the IRQ_SOC_START remains the same for either SoC.
143 */
144#if IRQ_SOC_END < IRQ_PRCMU_END
145#undef IRQ_SOC_END
146#define IRQ_SOC_END IRQ_PRCMU_END
147#endif
148
149#endif /* CONFIG_UX500_SOC_DB8500 */
96#endif 150#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index ba1294c13c4d..9db68d264c5f 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -10,49 +10,47 @@
10#ifndef ASM_ARCH_IRQS_H 10#ifndef ASM_ARCH_IRQS_H
11#define ASM_ARCH_IRQS_H 11#define ASM_ARCH_IRQS_H
12 12
13#include <mach/irqs-db5500.h> 13#include <mach/hardware.h>
14#include <mach/irqs-db8500.h>
15 14
16#define IRQ_LOCALTIMER 29 15#define IRQ_LOCALTIMER 29
17#define IRQ_LOCALWDOG 30 16#define IRQ_LOCALWDOG 30
18 17
19/* Shared Peripheral Interrupt (SHPI) */ 18/* Shared Peripheral Interrupt (SHPI) */
20#define IRQ_SHPI_START 32 19#define IRQ_SHPI_START 32
21 20
22/* Interrupt numbers generic for shared peripheral */ 21/*
22 * MTU0 preserved for now until plat-nomadik is taught not to use it. Don't
23 * add any other IRQs here, use the irqs-dbx500.h files.
24 */
23#define IRQ_MTU0 (IRQ_SHPI_START + 4) 25#define IRQ_MTU0 (IRQ_SHPI_START + 4)
24 26
25/* There are 128 shared peripheral interrupts assigned to 27#define DBX500_NR_INTERNAL_IRQS 160
26 * INTID[160:32]. The first 32 interrupts are reserved.
27 */
28#define DBX500_NR_INTERNAL_IRQS 161
29 28
30/* After chip-specific IRQ numbers we have the GPIO ones */ 29/* After chip-specific IRQ numbers we have the GPIO ones */
31#define NOMADIK_NR_GPIO 288 30#define NOMADIK_NR_GPIO 288
32#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + DBX500_NR_INTERNAL_IRQS) 31#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + DBX500_NR_INTERNAL_IRQS)
33#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - DBX500_NR_INTERNAL_IRQS) 32#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - DBX500_NR_INTERNAL_IRQS)
34#define IRQ_BOARD_START NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO) 33#define IRQ_GPIO_END NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
34
35#define IRQ_SOC_START IRQ_GPIO_END
36/* This will be overridden by SoC-specific irq headers */
37#define IRQ_SOC_END IRQ_SOC_START
35 38
39#include <mach/irqs-db5500.h>
40#include <mach/irqs-db8500.h>
41
42#define IRQ_BOARD_START IRQ_SOC_END
36/* This will be overridden by board-specific irq headers */ 43/* This will be overridden by board-specific irq headers */
37#define IRQ_BOARD_END IRQ_BOARD_START 44#define IRQ_BOARD_END IRQ_BOARD_START
38 45
39#ifdef CONFIG_MACH_U8500 46#ifdef CONFIG_MACH_U8500
40#include <mach/irqs-board-mop500.h> 47#include <mach/irqs-board-mop500.h>
41#endif 48#endif
42 49
43/* 50#ifdef CONFIG_MACH_U5500
44 * After the board specific IRQ:s we reserve a range of IRQ:s in which virtual 51#include <mach/irqs-board-u5500.h>
45 * IRQ:s representing modem IRQ:s can be allocated 52#endif
46 */
47#define IRQ_MODEM_EVENTS_BASE (IRQ_BOARD_END + 1)
48#define IRQ_MODEM_EVENTS_NBR 72
49#define IRQ_MODEM_EVENTS_END (IRQ_MODEM_EVENTS_BASE + IRQ_MODEM_EVENTS_NBR)
50
51/* List of virtual IRQ:s that are allocated from the range above */
52#define MBOX_PAIR0_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 43)
53#define MBOX_PAIR1_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 45)
54#define MBOX_PAIR2_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 41)
55 53
56#define NR_IRQS IRQ_MODEM_EVENTS_END 54#define NR_IRQS IRQ_BOARD_END
57 55
58#endif /* ASM_ARCH_IRQS_H */ 56#endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-defs.h b/arch/arm/mach-ux500/include/mach/prcmu-defs.h
deleted file mode 100644
index 848ba64b561f..000000000000
--- a/arch/arm/mach-ux500/include/mach/prcmu-defs.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
6 * Author: Martin Persson <martin.persson@stericsson.com>
7 *
8 * License Terms: GNU General Public License v2
9 *
10 * PRCM Unit definitions
11 */
12
13#ifndef __MACH_PRCMU_DEFS_H
14#define __MACH_PRCMU_DEFS_H
15
16enum prcmu_cpu_opp {
17 CPU_OPP_INIT = 0x00,
18 CPU_OPP_NO_CHANGE = 0x01,
19 CPU_OPP_100 = 0x02,
20 CPU_OPP_50 = 0x03,
21 CPU_OPP_MAX = 0x04,
22 CPU_OPP_EXT_CLK = 0x07
23};
24enum prcmu_ape_opp {
25 APE_OPP_NO_CHANGE = 0x00,
26 APE_OPP_100 = 0x02,
27 APE_OPP_50 = 0x03,
28};
29
30#endif /* __MACH_PRCMU_DEFS_H */
diff --git a/arch/arm/mach-ux500/include/mach/prcmu.h b/arch/arm/mach-ux500/include/mach/prcmu.h
deleted file mode 100644
index c49e456162ef..000000000000
--- a/arch/arm/mach-ux500/include/mach/prcmu.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
6 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
7 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
8 *
9 * License Terms: GNU General Public License v2
10 *
11 * PRCM Unit f/w API
12 */
13#ifndef __MACH_PRCMU_H
14#define __MACH_PRCMU_H
15#include <mach/prcmu-defs.h>
16
17void __init prcmu_early_init(void);
18int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
19int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
20int prcmu_set_ape_opp(enum prcmu_ape_opp opp);
21int prcmu_set_cpu_opp(enum prcmu_cpu_opp opp);
22int prcmu_set_ape_cpu_opps(enum prcmu_ape_opp ape_opp,
23 enum prcmu_cpu_opp cpu_opp);
24int prcmu_get_ape_opp(void);
25int prcmu_get_cpu_opp(void);
26bool prcmu_has_arm_maxopp(void);
27
28#endif /* __MACH_PRCMU_H */
diff --git a/arch/arm/mach-ux500/prcmu.c b/arch/arm/mach-ux500/prcmu.c
deleted file mode 100644
index c522d26ef348..000000000000
--- a/arch/arm/mach-ux500/prcmu.c
+++ /dev/null
@@ -1,394 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
9 *
10 * U8500 PRCM Unit interface driver
11 *
12 */
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/mutex.h>
19#include <linux/completion.h>
20#include <linux/jiffies.h>
21#include <linux/bitops.h>
22#include <linux/interrupt.h>
23
24#include <mach/hardware.h>
25#include <mach/prcmu-regs.h>
26#include <mach/prcmu-defs.h>
27
28/* Global var to runtime determine TCDM base for v2 or v1 */
29static __iomem void *tcdm_base;
30
31#define _MBOX_HEADER (tcdm_base + 0xFE8)
32#define MBOX_HEADER_REQ_MB0 (_MBOX_HEADER + 0x0)
33
34#define REQ_MB1 (tcdm_base + 0xFD0)
35#define REQ_MB5 (tcdm_base + 0xE44)
36
37#define REQ_MB1_ARMOPP (REQ_MB1 + 0x0)
38#define REQ_MB1_APEOPP (REQ_MB1 + 0x1)
39#define REQ_MB1_BOOSTOPP (REQ_MB1 + 0x2)
40
41#define ACK_MB1 (tcdm_base + 0xE04)
42#define ACK_MB5 (tcdm_base + 0xDF4)
43
44#define ACK_MB1_CURR_ARMOPP (ACK_MB1 + 0x0)
45#define ACK_MB1_CURR_APEOPP (ACK_MB1 + 0x1)
46
47#define REQ_MB5_I2C_SLAVE_OP (REQ_MB5)
48#define REQ_MB5_I2C_HW_BITS (REQ_MB5 + 1)
49#define REQ_MB5_I2C_REG (REQ_MB5 + 2)
50#define REQ_MB5_I2C_VAL (REQ_MB5 + 3)
51
52#define ACK_MB5_I2C_STATUS (ACK_MB5 + 1)
53#define ACK_MB5_I2C_VAL (ACK_MB5 + 3)
54
55#define PRCM_AVS_VARM_MAX_OPP (tcdm_base + 0x2E4)
56#define PRCM_AVS_ISMODEENABLE 7
57#define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE)
58
59#define I2C_WRITE(slave) \
60 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
61#define I2C_READ(slave) \
62 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0) | BIT(0))
63#define I2C_STOP_EN BIT(3)
64
65enum mb1_h {
66 MB1H_ARM_OPP = 1,
67 MB1H_APE_OPP,
68 MB1H_ARM_APE_OPP,
69};
70
71static struct {
72 struct mutex lock;
73 struct completion work;
74 struct {
75 u8 arm_opp;
76 u8 ape_opp;
77 u8 arm_status;
78 u8 ape_status;
79 } ack;
80} mb1_transfer;
81
82enum ack_mb5_status {
83 I2C_WR_OK = 0x01,
84 I2C_RD_OK = 0x02,
85};
86
87#define MBOX_BIT BIT
88#define NUM_MBOX 8
89
90static struct {
91 struct mutex lock;
92 struct completion work;
93 bool failed;
94 struct {
95 u8 status;
96 u8 value;
97 } ack;
98} mb5_transfer;
99
100/**
101 * prcmu_abb_read() - Read register value(s) from the ABB.
102 * @slave: The I2C slave address.
103 * @reg: The (start) register address.
104 * @value: The read out value(s).
105 * @size: The number of registers to read.
106 *
107 * Reads register value(s) from the ABB.
108 * @size has to be 1 for the current firmware version.
109 */
110int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
111{
112 int r;
113
114 if (size != 1)
115 return -EINVAL;
116
117 r = mutex_lock_interruptible(&mb5_transfer.lock);
118 if (r)
119 return r;
120
121 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
122 cpu_relax();
123
124 writeb(I2C_READ(slave), REQ_MB5_I2C_SLAVE_OP);
125 writeb(I2C_STOP_EN, REQ_MB5_I2C_HW_BITS);
126 writeb(reg, REQ_MB5_I2C_REG);
127
128 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
129 if (!wait_for_completion_timeout(&mb5_transfer.work,
130 msecs_to_jiffies(500))) {
131 pr_err("prcmu: prcmu_abb_read timed out.\n");
132 r = -EIO;
133 goto unlock_and_return;
134 }
135 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
136 if (!r)
137 *value = mb5_transfer.ack.value;
138
139unlock_and_return:
140 mutex_unlock(&mb5_transfer.lock);
141 return r;
142}
143EXPORT_SYMBOL(prcmu_abb_read);
144
145/**
146 * prcmu_abb_write() - Write register value(s) to the ABB.
147 * @slave: The I2C slave address.
148 * @reg: The (start) register address.
149 * @value: The value(s) to write.
150 * @size: The number of registers to write.
151 *
152 * Reads register value(s) from the ABB.
153 * @size has to be 1 for the current firmware version.
154 */
155int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
156{
157 int r;
158
159 if (size != 1)
160 return -EINVAL;
161
162 r = mutex_lock_interruptible(&mb5_transfer.lock);
163 if (r)
164 return r;
165
166
167 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
168 cpu_relax();
169
170 writeb(I2C_WRITE(slave), REQ_MB5_I2C_SLAVE_OP);
171 writeb(I2C_STOP_EN, REQ_MB5_I2C_HW_BITS);
172 writeb(reg, REQ_MB5_I2C_REG);
173 writeb(*value, REQ_MB5_I2C_VAL);
174
175 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
176 if (!wait_for_completion_timeout(&mb5_transfer.work,
177 msecs_to_jiffies(500))) {
178 pr_err("prcmu: prcmu_abb_write timed out.\n");
179 r = -EIO;
180 goto unlock_and_return;
181 }
182 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
183
184unlock_and_return:
185 mutex_unlock(&mb5_transfer.lock);
186 return r;
187}
188EXPORT_SYMBOL(prcmu_abb_write);
189
190static int set_ape_cpu_opps(u8 header, enum prcmu_ape_opp ape_opp,
191 enum prcmu_cpu_opp cpu_opp)
192{
193 bool do_ape;
194 bool do_arm;
195 int err = 0;
196
197 do_ape = ((header == MB1H_APE_OPP) || (header == MB1H_ARM_APE_OPP));
198 do_arm = ((header == MB1H_ARM_OPP) || (header == MB1H_ARM_APE_OPP));
199
200 mutex_lock(&mb1_transfer.lock);
201
202 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
203 cpu_relax();
204
205 writeb(0, MBOX_HEADER_REQ_MB0);
206 writeb(cpu_opp, REQ_MB1_ARMOPP);
207 writeb(ape_opp, REQ_MB1_APEOPP);
208 writeb(0, REQ_MB1_BOOSTOPP);
209 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
210 wait_for_completion(&mb1_transfer.work);
211 if ((do_ape) && (mb1_transfer.ack.ape_status != 0))
212 err = -EIO;
213 if ((do_arm) && (mb1_transfer.ack.arm_status != 0))
214 err = -EIO;
215
216 mutex_unlock(&mb1_transfer.lock);
217
218 return err;
219}
220
221/**
222 * prcmu_set_ape_opp() - Set the OPP of the APE.
223 * @opp: The OPP to set.
224 *
225 * This function sets the OPP of the APE.
226 */
227int prcmu_set_ape_opp(enum prcmu_ape_opp opp)
228{
229 return set_ape_cpu_opps(MB1H_APE_OPP, opp, APE_OPP_NO_CHANGE);
230}
231EXPORT_SYMBOL(prcmu_set_ape_opp);
232
233/**
234 * prcmu_set_cpu_opp() - Set the OPP of the CPU.
235 * @opp: The OPP to set.
236 *
237 * This function sets the OPP of the CPU.
238 */
239int prcmu_set_cpu_opp(enum prcmu_cpu_opp opp)
240{
241 return set_ape_cpu_opps(MB1H_ARM_OPP, CPU_OPP_NO_CHANGE, opp);
242}
243EXPORT_SYMBOL(prcmu_set_cpu_opp);
244
245/**
246 * prcmu_set_ape_cpu_opps() - Set the OPPs of the APE and the CPU.
247 * @ape_opp: The APE OPP to set.
248 * @cpu_opp: The CPU OPP to set.
249 *
250 * This function sets the OPPs of the APE and the CPU.
251 */
252int prcmu_set_ape_cpu_opps(enum prcmu_ape_opp ape_opp,
253 enum prcmu_cpu_opp cpu_opp)
254{
255 return set_ape_cpu_opps(MB1H_ARM_APE_OPP, ape_opp, cpu_opp);
256}
257EXPORT_SYMBOL(prcmu_set_ape_cpu_opps);
258
259/**
260 * prcmu_get_ape_opp() - Get the OPP of the APE.
261 *
262 * This function gets the OPP of the APE.
263 */
264enum prcmu_ape_opp prcmu_get_ape_opp(void)
265{
266 return readb(ACK_MB1_CURR_APEOPP);
267}
268EXPORT_SYMBOL(prcmu_get_ape_opp);
269
270/**
271 * prcmu_get_cpu_opp() - Get the OPP of the CPU.
272 *
273 * This function gets the OPP of the CPU. The OPP is specified in %%.
274 * PRCMU_OPP_EXT is a special OPP value, not specified in %%.
275 */
276int prcmu_get_cpu_opp(void)
277{
278 return readb(ACK_MB1_CURR_ARMOPP);
279}
280EXPORT_SYMBOL(prcmu_get_cpu_opp);
281
282bool prcmu_has_arm_maxopp(void)
283{
284 return (readb(PRCM_AVS_VARM_MAX_OPP) & PRCM_AVS_ISMODEENABLE_MASK)
285 == PRCM_AVS_ISMODEENABLE_MASK;
286}
287
288static void read_mailbox_0(void)
289{
290 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLEAR);
291}
292
293static void read_mailbox_1(void)
294{
295 mb1_transfer.ack.arm_opp = readb(ACK_MB1_CURR_ARMOPP);
296 mb1_transfer.ack.ape_opp = readb(ACK_MB1_CURR_APEOPP);
297 complete(&mb1_transfer.work);
298 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLEAR);
299}
300
301static void read_mailbox_2(void)
302{
303 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLEAR);
304}
305
306static void read_mailbox_3(void)
307{
308 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLEAR);
309}
310
311static void read_mailbox_4(void)
312{
313 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLEAR);
314}
315
316static void read_mailbox_5(void)
317{
318 mb5_transfer.ack.status = readb(ACK_MB5_I2C_STATUS);
319 mb5_transfer.ack.value = readb(ACK_MB5_I2C_VAL);
320 complete(&mb5_transfer.work);
321 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLEAR);
322}
323
324static void read_mailbox_6(void)
325{
326 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLEAR);
327}
328
329static void read_mailbox_7(void)
330{
331 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLEAR);
332}
333
334static void (* const read_mailbox[NUM_MBOX])(void) = {
335 read_mailbox_0,
336 read_mailbox_1,
337 read_mailbox_2,
338 read_mailbox_3,
339 read_mailbox_4,
340 read_mailbox_5,
341 read_mailbox_6,
342 read_mailbox_7
343};
344
345static irqreturn_t prcmu_irq_handler(int irq, void *data)
346{
347 u32 bits;
348 u8 n;
349
350 bits = (readl(PRCM_ARM_IT1_VAL) & (MBOX_BIT(NUM_MBOX) - 1));
351 if (unlikely(!bits))
352 return IRQ_NONE;
353
354 for (n = 0; bits; n++) {
355 if (bits & MBOX_BIT(n)) {
356 bits -= MBOX_BIT(n);
357 read_mailbox[n]();
358 }
359 }
360 return IRQ_HANDLED;
361}
362
363void __init prcmu_early_init(void)
364{
365 if (cpu_is_u8500v11() || cpu_is_u8500ed()) {
366 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
367 } else if (cpu_is_u8500v2()) {
368 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
369 } else {
370 pr_err("prcmu: Unsupported chip version\n");
371 BUG();
372 }
373}
374
375static int __init prcmu_init(void)
376{
377 if (cpu_is_u8500ed()) {
378 pr_err("prcmu: Unsupported chip version\n");
379 return 0;
380 }
381
382 mutex_init(&mb1_transfer.lock);
383 init_completion(&mb1_transfer.work);
384 mutex_init(&mb5_transfer.lock);
385 init_completion(&mb5_transfer.work);
386
387 /* Clean up the mailbox interrupts after pre-kernel code. */
388 writel((MBOX_BIT(NUM_MBOX) - 1), PRCM_ARM_IT1_CLEAR);
389
390 return request_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler, 0,
391 "prcmu", NULL);
392}
393
394arch_initcall(prcmu_init);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 76f82ae44efb..3f17ea146f0e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -85,7 +85,7 @@ void show_mem(unsigned int filter)
85 struct meminfo * mi = &meminfo; 85 struct meminfo * mi = &meminfo;
86 86
87 printk("Mem-info:\n"); 87 printk("Mem-info:\n");
88 show_free_areas(); 88 show_free_areas(filter);
89 89
90 for_each_bank (i, mi) { 90 for_each_bank (i, mi) {
91 struct membank *bank = &mi->bank[i]; 91 struct membank *bank = &mi->bank[i];
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6cf76b3b68d1..08a92368d9d3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -31,8 +31,6 @@
31 31
32#include "mm.h" 32#include "mm.h"
33 33
34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36/* 34/*
37 * empty_zero_page is a special page that is used for 35 * empty_zero_page is a special page that is used for
38 * zero-initialized data and COW. 36 * zero-initialized data and COW.
diff --git a/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h b/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h
index 872de0bf1e6b..ea6c9c88c725 100644
--- a/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h
+++ b/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h
@@ -14,14 +14,14 @@
14#ifndef __ASM_ARCH_OMAP_GPMC_SMSC911X_H__ 14#ifndef __ASM_ARCH_OMAP_GPMC_SMSC911X_H__
15 15
16struct omap_smsc911x_platform_data { 16struct omap_smsc911x_platform_data {
17 int id;
17 int cs; 18 int cs;
18 int gpio_irq; 19 int gpio_irq;
19 int gpio_reset; 20 int gpio_reset;
20 u32 flags; 21 u32 flags;
21}; 22};
22 23
23#if defined(CONFIG_SMSC911X) || \ 24#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
24 defined(CONFIG_SMSC911X_MODULE)
25 25
26extern void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d); 26extern void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d);
27 27
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index 565d2664f5a7..ac4b60d9aa29 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -129,7 +129,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
129 DEBUG_LL_OMAP1(3, sx1); 129 DEBUG_LL_OMAP1(3, sx1);
130 130
131 /* omap2 based boards using UART1 */ 131 /* omap2 based boards using UART1 */
132 DEBUG_LL_OMAP2(1, omap2evm);
133 DEBUG_LL_OMAP2(1, omap_2430sdp); 132 DEBUG_LL_OMAP2(1, omap_2430sdp);
134 DEBUG_LL_OMAP2(1, omap_apollon); 133 DEBUG_LL_OMAP2(1, omap_apollon);
135 DEBUG_LL_OMAP2(1, omap_h4); 134 DEBUG_LL_OMAP2(1, omap_h4);
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 02b96c8f6a17..17d3c939775c 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -113,7 +113,7 @@ extern int omap4430_phy_suspend(struct device *dev, int suspend);
113extern void am35x_musb_reset(void); 113extern void am35x_musb_reset(void);
114extern void am35x_musb_phy_power(u8 on); 114extern void am35x_musb_phy_power(u8 on);
115extern void am35x_musb_clear_irq(void); 115extern void am35x_musb_clear_irq(void);
116extern void am35x_musb_set_mode(u8 musb_mode); 116extern void am35x_set_mode(u8 musb_mode);
117 117
118/* 118/*
119 * FIXME correct answer depends on hmc_mode, 119 * FIXME correct answer depends on hmc_mode,
diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h
index 72444d97f80c..b70c19bab63a 100644
--- a/arch/avr32/include/asm/bitops.h
+++ b/arch/avr32/include/asm/bitops.h
@@ -270,14 +270,21 @@ static inline int __fls(unsigned long word)
270 270
271unsigned long find_first_zero_bit(const unsigned long *addr, 271unsigned long find_first_zero_bit(const unsigned long *addr,
272 unsigned long size); 272 unsigned long size);
273#define find_first_zero_bit find_first_zero_bit
274
273unsigned long find_next_zero_bit(const unsigned long *addr, 275unsigned long find_next_zero_bit(const unsigned long *addr,
274 unsigned long size, 276 unsigned long size,
275 unsigned long offset); 277 unsigned long offset);
278#define find_next_zero_bit find_next_zero_bit
279
276unsigned long find_first_bit(const unsigned long *addr, 280unsigned long find_first_bit(const unsigned long *addr,
277 unsigned long size); 281 unsigned long size);
282#define find_first_bit find_first_bit
283
278unsigned long find_next_bit(const unsigned long *addr, 284unsigned long find_next_bit(const unsigned long *addr,
279 unsigned long size, 285 unsigned long size,
280 unsigned long offset); 286 unsigned long offset);
287#define find_next_bit find_next_bit
281 288
282/* 289/*
283 * ffs: find first bit set. This is defined the same way as 290 * ffs: find first bit set. This is defined the same way as
@@ -299,6 +306,14 @@ static inline int ffs(unsigned long word)
299#include <asm-generic/bitops/hweight.h> 306#include <asm-generic/bitops/hweight.h>
300#include <asm-generic/bitops/lock.h> 307#include <asm-generic/bitops/lock.h>
301 308
309extern unsigned long find_next_zero_bit_le(const void *addr,
310 unsigned long size, unsigned long offset);
311#define find_next_zero_bit_le find_next_zero_bit_le
312
313extern unsigned long find_next_bit_le(const void *addr,
314 unsigned long size, unsigned long offset);
315#define find_next_bit_le find_next_bit_le
316
302#include <asm-generic/bitops/le.h> 317#include <asm-generic/bitops/le.h>
303#include <asm-generic/bitops/ext2-atomic.h> 318#include <asm-generic/bitops/ext2-atomic.h>
304 319
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index a7314d44b17b..2798c2d4a1cf 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -25,8 +25,6 @@
25#include <asm/setup.h> 25#include <asm/setup.h>
26#include <asm/sections.h> 26#include <asm/sections.h>
27 27
28DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
29
30pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data; 28pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
31 29
32struct page *empty_zero_page; 30struct page *empty_zero_page;
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 8addb1220b4f..d619b17c4413 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -24,11 +24,13 @@ config BLACKFIN
24 select HAVE_FUNCTION_TRACER 24 select HAVE_FUNCTION_TRACER
25 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 25 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
26 select HAVE_IDE 26 select HAVE_IDE
27 select HAVE_IRQ_WORK
27 select HAVE_KERNEL_GZIP if RAMKERNEL 28 select HAVE_KERNEL_GZIP if RAMKERNEL
28 select HAVE_KERNEL_BZIP2 if RAMKERNEL 29 select HAVE_KERNEL_BZIP2 if RAMKERNEL
29 select HAVE_KERNEL_LZMA if RAMKERNEL 30 select HAVE_KERNEL_LZMA if RAMKERNEL
30 select HAVE_KERNEL_LZO if RAMKERNEL 31 select HAVE_KERNEL_LZO if RAMKERNEL
31 select HAVE_OPROFILE 32 select HAVE_OPROFILE
33 select HAVE_PERF_EVENTS
32 select ARCH_WANT_OPTIONAL_GPIOLIB 34 select ARCH_WANT_OPTIONAL_GPIOLIB
33 select HAVE_GENERIC_HARDIRQS 35 select HAVE_GENERIC_HARDIRQS
34 select GENERIC_ATOMIC64 36 select GENERIC_ATOMIC64
@@ -45,9 +47,6 @@ config GENERIC_BUG
45config ZONE_DMA 47config ZONE_DMA
46 def_bool y 48 def_bool y
47 49
48config GENERIC_FIND_NEXT_BIT
49 def_bool y
50
51config GENERIC_GPIO 50config GENERIC_GPIO
52 def_bool y 51 def_bool y
53 52
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index 2641731f24cd..e2a3d4c8ab9a 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -9,15 +9,6 @@ config DEBUG_STACKOVERFLOW
9 This option will cause messages to be printed if free stack space 9 This option will cause messages to be printed if free stack space
10 drops below a certain limit. 10 drops below a certain limit.
11 11
12config DEBUG_STACK_USAGE
13 bool "Enable stack utilization instrumentation"
14 depends on DEBUG_KERNEL
15 help
16 Enables the display of the minimum amount of free stack which each
17 task has ever had available in the sysrq-T output.
18
19 This option will slow down process creation somewhat.
20
21config DEBUG_VERBOSE 12config DEBUG_VERBOSE
22 bool "Verbose fault messages" 13 bool "Verbose fault messages"
23 default y 14 default y
@@ -32,7 +23,7 @@ config DEBUG_VERBOSE
32 Most people should say N here. 23 Most people should say N here.
33 24
34config DEBUG_MMRS 25config DEBUG_MMRS
35 bool "Generate Blackfin MMR tree" 26 tristate "Generate Blackfin MMR tree"
36 select DEBUG_FS 27 select DEBUG_FS
37 help 28 help
38 Create a tree of Blackfin MMRs via the debugfs tree. If 29 Create a tree of Blackfin MMRs via the debugfs tree. If
diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
index 95cf2ba9de17..8465b3e6b862 100644
--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
@@ -121,13 +121,11 @@ CONFIG_LOGO=y
121# CONFIG_LOGO_LINUX_VGA16 is not set 121# CONFIG_LOGO_LINUX_VGA16 is not set
122# CONFIG_LOGO_LINUX_CLUT224 is not set 122# CONFIG_LOGO_LINUX_CLUT224 is not set
123# CONFIG_LOGO_BLACKFIN_VGA16 is not set 123# CONFIG_LOGO_BLACKFIN_VGA16 is not set
124CONFIG_SOUND=m 124CONFIG_SOUND=y
125CONFIG_SND=m 125CONFIG_SND=y
126CONFIG_SND_SOC=m 126CONFIG_SND_SOC=y
127CONFIG_SND_BF5XX_I2S=m 127CONFIG_SND_BF5XX_I2S=y
128CONFIG_SND_BF5XX_SOC_SSM2602=m 128CONFIG_SND_BF5XX_SOC_SSM2602=y
129CONFIG_SND_BF5XX_AC97=m
130CONFIG_SND_BF5XX_SOC_AD1980=m
131CONFIG_HID_A4TECH=y 129CONFIG_HID_A4TECH=y
132CONFIG_HID_APPLE=y 130CONFIG_HID_APPLE=y
133CONFIG_HID_BELKIN=y 131CONFIG_HID_BELKIN=y
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 8be8e33fac52..5e7321b26040 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -96,7 +96,7 @@ CONFIG_SERIAL_BFIN_UART1=y
96# CONFIG_HW_RANDOM is not set 96# CONFIG_HW_RANDOM is not set
97CONFIG_I2C=y 97CONFIG_I2C=y
98CONFIG_I2C_CHARDEV=m 98CONFIG_I2C_CHARDEV=m
99CONFIG_I2C_BLACKFIN_TWI=m 99CONFIG_I2C_BLACKFIN_TWI=y
100CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 100CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
101CONFIG_SPI=y 101CONFIG_SPI=y
102CONFIG_SPI_BFIN=y 102CONFIG_SPI_BFIN=y
@@ -115,13 +115,11 @@ CONFIG_LOGO=y
115# CONFIG_LOGO_LINUX_VGA16 is not set 115# CONFIG_LOGO_LINUX_VGA16 is not set
116# CONFIG_LOGO_LINUX_CLUT224 is not set 116# CONFIG_LOGO_LINUX_CLUT224 is not set
117# CONFIG_LOGO_BLACKFIN_VGA16 is not set 117# CONFIG_LOGO_BLACKFIN_VGA16 is not set
118CONFIG_SOUND=m 118CONFIG_SOUND=y
119CONFIG_SND=m 119CONFIG_SND=y
120CONFIG_SND_SOC=m 120CONFIG_SND_SOC=y
121CONFIG_SND_BF5XX_I2S=m 121CONFIG_SND_BF5XX_I2S=y
122CONFIG_SND_BF5XX_SOC_SSM2602=m 122CONFIG_SND_BF5XX_SOC_SSM2602=y
123CONFIG_SND_BF5XX_AC97=m
124CONFIG_SND_BF5XX_SOC_AD1980=m
125CONFIG_HID_A4TECH=y 123CONFIG_HID_A4TECH=y
126CONFIG_HID_APPLE=y 124CONFIG_HID_APPLE=y
127CONFIG_HID_BELKIN=y 125CONFIG_HID_BELKIN=y
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 0aafde6c8c2d..b90d3792ed52 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -99,8 +99,6 @@ CONFIG_SND_PCM_OSS=m
99CONFIG_SND_SOC=m 99CONFIG_SND_SOC=m
100CONFIG_SND_BF5XX_I2S=m 100CONFIG_SND_BF5XX_I2S=m
101CONFIG_SND_BF5XX_SOC_AD73311=m 101CONFIG_SND_BF5XX_SOC_AD73311=m
102CONFIG_SND_BF5XX_AC97=m
103CONFIG_SND_BF5XX_SOC_AD1980=m
104# CONFIG_USB_SUPPORT is not set 102# CONFIG_USB_SUPPORT is not set
105CONFIG_RTC_CLASS=y 103CONFIG_RTC_CLASS=y
106CONFIG_RTC_DRV_BFIN=y 104CONFIG_RTC_DRV_BFIN=y
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index c9077fb58135..005362537a7b 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -110,8 +110,6 @@ CONFIG_SND_PCM_OSS=m
110CONFIG_SND_SOC=m 110CONFIG_SND_SOC=m
111CONFIG_SND_BF5XX_I2S=m 111CONFIG_SND_BF5XX_I2S=m
112CONFIG_SND_BF5XX_SOC_AD73311=m 112CONFIG_SND_BF5XX_SOC_AD73311=m
113CONFIG_SND_BF5XX_AC97=m
114CONFIG_SND_BF5XX_SOC_AD1980=m
115# CONFIG_USB_SUPPORT is not set 113# CONFIG_USB_SUPPORT is not set
116CONFIG_RTC_CLASS=y 114CONFIG_RTC_CLASS=y
117CONFIG_RTC_DRV_BFIN=y 115CONFIG_RTC_DRV_BFIN=y
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index 121cc04d877d..17bcbf60bcae 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -49,16 +49,6 @@ extern void dump_bfin_trace_buffer(void);
49#define dump_bfin_trace_buffer() 49#define dump_bfin_trace_buffer()
50#endif 50#endif
51 51
52/* init functions only */
53extern int init_arch_irq(void);
54extern void init_exception_vectors(void);
55extern void program_IAR(void);
56
57extern asmlinkage void lower_to_irq14(void);
58extern asmlinkage void bfin_return_from_exception(void);
59extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
60extern int bfin_internal_set_wake(unsigned int irq, unsigned int state);
61
62extern void *l1_data_A_sram_alloc(size_t); 52extern void *l1_data_A_sram_alloc(size_t);
63extern void *l1_data_B_sram_alloc(size_t); 53extern void *l1_data_B_sram_alloc(size_t);
64extern void *l1_inst_sram_alloc(size_t); 54extern void *l1_inst_sram_alloc(size_t);
diff --git a/arch/blackfin/include/asm/bfin_pfmon.h b/arch/blackfin/include/asm/bfin_pfmon.h
new file mode 100644
index 000000000000..accd47e2db40
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_pfmon.h
@@ -0,0 +1,44 @@
1/*
2 * Blackfin Performance Monitor definitions
3 *
4 * Copyright 2005-2011 Analog Devices Inc.
5 *
6 * Licensed under the ADI BSD license or GPL-2 (or later).
7 */
8
9#ifndef __ASM_BFIN_PFMON_H__
10#define __ASM_BFIN_PFMON_H__
11
12/* PFCTL Masks */
13#define PFMON_MASK 0xff
14#define PFCEN_MASK 0x3
15#define PFCEN_DISABLE 0x0
16#define PFCEN_ENABLE_USER 0x1
17#define PFCEN_ENABLE_SUPV 0x2
18#define PFCEN_ENABLE_ALL (PFCEN_ENABLE_USER | PFCEN_ENABLE_SUPV)
19
20#define PFPWR_P 0
21#define PEMUSW0_P 2
22#define PFCEN0_P 3
23#define PFMON0_P 5
24#define PEMUSW1_P 13
25#define PFCEN1_P 14
26#define PFMON1_P 16
27#define PFCNT0_P 24
28#define PFCNT1_P 25
29
30#define PFPWR (1 << PFPWR_P)
31#define PEMUSW(n, x) ((x) << ((n) ? PEMUSW1_P : PEMUSW0_P))
32#define PEMUSW0 PEMUSW(0, 1)
33#define PEMUSW1 PEMUSW(1, 1)
34#define PFCEN(n, x) ((x) << ((n) ? PFCEN1_P : PFCEN0_P))
35#define PFCEN0 PFCEN(0, PFCEN_MASK)
36#define PFCEN1 PFCEN(1, PFCEN_MASK)
37#define PFCNT(n, x) ((x) << ((n) ? PFCNT1_P : PFCNT0_P))
38#define PFCNT0 PFCNT(0, 1)
39#define PFCNT1 PFCNT(1, 1)
40#define PFMON(n, x) ((x) << ((n) ? PFMON1_P : PFMON0_P))
41#define PFMON0 PFMON(0, PFMON_MASK)
42#define PFMON1 PFMON(1, PFMON_MASK)
43
44#endif
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index d27600c262c2..f8568a31d0ab 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -100,6 +100,10 @@ struct sport_register {
100}; 100};
101#undef __BFP 101#undef __BFP
102 102
103struct bfin_snd_platform_data {
104 const unsigned short *pin_req;
105};
106
103#define bfin_read_sport_rx32(base) \ 107#define bfin_read_sport_rx32(base) \
104({ \ 108({ \
105 struct sport_register *__mmrs = (void *)base; \ 109 struct sport_register *__mmrs = (void *)base; \
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 77135b62818e..9a5b2c572ebf 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -39,8 +39,13 @@ extern void blackfin_invalidate_entire_icache(void);
39 39
40static inline void flush_icache_range(unsigned start, unsigned end) 40static inline void flush_icache_range(unsigned start, unsigned end)
41{ 41{
42#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) 42#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
43 blackfin_dcache_flush_range(start, end); 43 if (end <= physical_mem_end)
44 blackfin_dcache_flush_range(start, end);
45#endif
46#if defined(CONFIG_BFIN_L2_WRITEBACK)
47 if (start >= L2_START && end <= L2_START + L2_LENGTH)
48 blackfin_dcache_flush_range(start, end);
44#endif 49#endif
45 50
46 /* Make sure all write buffers in the data side of the core 51 /* Make sure all write buffers in the data side of the core
@@ -52,9 +57,17 @@ static inline void flush_icache_range(unsigned start, unsigned end)
52 * the pipeline. 57 * the pipeline.
53 */ 58 */
54 SSYNC(); 59 SSYNC();
55#if defined(CONFIG_BFIN_ICACHE) 60#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
56 blackfin_icache_flush_range(start, end); 61 if (end <= physical_mem_end) {
57 flush_icache_range_others(start, end); 62 blackfin_icache_flush_range(start, end);
63 flush_icache_range_others(start, end);
64 }
65#endif
66#if defined(CONFIG_BFIN_L2_ICACHEABLE)
67 if (start >= L2_START && end <= L2_START + L2_LENGTH) {
68 blackfin_icache_flush_range(start, end);
69 flush_icache_range_others(start, end);
70 }
58#endif 71#endif
59} 72}
60 73
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
index 16883e582e3c..05043786da21 100644
--- a/arch/blackfin/include/asm/cpu.h
+++ b/arch/blackfin/include/asm/cpu.h
@@ -10,11 +10,8 @@
10 10
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12 12
13struct task_struct;
14
15struct blackfin_cpudata { 13struct blackfin_cpudata {
16 struct cpu cpu; 14 struct cpu cpu;
17 struct task_struct *idle;
18 unsigned int imemctl; 15 unsigned int imemctl;
19 unsigned int dmemctl; 16 unsigned int dmemctl;
20}; 17};
diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h
index 7600fe0696af..823679011457 100644
--- a/arch/blackfin/include/asm/def_LPBlackfin.h
+++ b/arch/blackfin/include/asm/def_LPBlackfin.h
@@ -52,10 +52,10 @@
52 52
53#define bfin_read(addr) \ 53#define bfin_read(addr) \
54({ \ 54({ \
55 sizeof(*(addr)) == 1 ? bfin_read8(addr) : \ 55 sizeof(*(addr)) == 1 ? bfin_read8(addr) : \
56 sizeof(*(addr)) == 2 ? bfin_read16(addr) : \ 56 sizeof(*(addr)) == 2 ? bfin_read16(addr) : \
57 sizeof(*(addr)) == 4 ? bfin_read32(addr) : \ 57 sizeof(*(addr)) == 4 ? bfin_read32(addr) : \
58 ({ BUG(); 0; }); \ 58 ({ BUG(); 0; }); \
59}) 59})
60#define bfin_write(addr, val) \ 60#define bfin_write(addr, val) \
61do { \ 61do { \
@@ -69,13 +69,13 @@ do { \
69 69
70#define bfin_write_or(addr, bits) \ 70#define bfin_write_or(addr, bits) \
71do { \ 71do { \
72 void *__addr = (void *)(addr); \ 72 typeof(addr) __addr = (addr); \
73 bfin_write(__addr, bfin_read(__addr) | (bits)); \ 73 bfin_write(__addr, bfin_read(__addr) | (bits)); \
74} while (0) 74} while (0)
75 75
76#define bfin_write_and(addr, bits) \ 76#define bfin_write_and(addr, bits) \
77do { \ 77do { \
78 void *__addr = (void *)(addr); \ 78 typeof(addr) __addr = (addr); \
79 bfin_write(__addr, bfin_read(__addr) & (bits)); \ 79 bfin_write(__addr, bfin_read(__addr) & (bits)); \
80} while (0) 80} while (0)
81 81
diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h
index 7fbe42307b9a..ee73f79aef10 100644
--- a/arch/blackfin/include/asm/irq_handler.h
+++ b/arch/blackfin/include/asm/irq_handler.h
@@ -10,6 +10,16 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12 12
13/* init functions only */
14extern int __init init_arch_irq(void);
15extern void init_exception_vectors(void);
16extern void __init program_IAR(void);
17#ifdef init_mach_irq
18extern void __init init_mach_irq(void);
19#else
20# define init_mach_irq()
21#endif
22
13/* BASE LEVEL interrupt handler routines */ 23/* BASE LEVEL interrupt handler routines */
14asmlinkage void evt_exception(void); 24asmlinkage void evt_exception(void);
15asmlinkage void trap(void); 25asmlinkage void trap(void);
@@ -37,4 +47,19 @@ extern void return_from_exception(void);
37extern int bfin_request_exception(unsigned int exception, void (*handler)(void)); 47extern int bfin_request_exception(unsigned int exception, void (*handler)(void));
38extern int bfin_free_exception(unsigned int exception, void (*handler)(void)); 48extern int bfin_free_exception(unsigned int exception, void (*handler)(void));
39 49
50extern asmlinkage void lower_to_irq14(void);
51extern asmlinkage void bfin_return_from_exception(void);
52extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
53extern int bfin_internal_set_wake(unsigned int irq, unsigned int state);
54
55struct irq_data;
56extern void bfin_handle_irq(unsigned irq);
57extern void bfin_ack_noop(struct irq_data *);
58extern void bfin_internal_mask_irq(unsigned int irq);
59extern void bfin_internal_unmask_irq(unsigned int irq);
60
61struct irq_desc;
62extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *);
63extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *);
64
40#endif 65#endif
diff --git a/arch/blackfin/include/asm/kgdb.h b/arch/blackfin/include/asm/kgdb.h
index 8651afe12990..aaf884591b07 100644
--- a/arch/blackfin/include/asm/kgdb.h
+++ b/arch/blackfin/include/asm/kgdb.h
@@ -103,7 +103,12 @@ static inline void arch_kgdb_breakpoint(void)
103 asm("EXCPT 2;"); 103 asm("EXCPT 2;");
104} 104}
105#define BREAK_INSTR_SIZE 2 105#define BREAK_INSTR_SIZE 2
106#define CACHE_FLUSH_IS_SAFE 1 106#ifdef CONFIG_SMP
107# define CACHE_FLUSH_IS_SAFE 0
108#else
109# define CACHE_FLUSH_IS_SAFE 1
110#endif
111#define GDB_ADJUSTS_BREAK_OFFSET
107#define HW_INST_WATCHPOINT_NUM 6 112#define HW_INST_WATCHPOINT_NUM 6
108#define HW_WATCHPOINT_NUM 8 113#define HW_WATCHPOINT_NUM 8
109#define TYPE_INST_WATCHPOINT 0 114#define TYPE_INST_WATCHPOINT 0
diff --git a/arch/blackfin/include/asm/perf_event.h b/arch/blackfin/include/asm/perf_event.h
new file mode 100644
index 000000000000..3d2b1716322f
--- /dev/null
+++ b/arch/blackfin/include/asm/perf_event.h
@@ -0,0 +1 @@
#define MAX_HWEVENTS 2
diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h
index 832d7c009a2c..7854d4367c15 100644
--- a/arch/blackfin/include/asm/ptrace.h
+++ b/arch/blackfin/include/asm/ptrace.h
@@ -102,14 +102,9 @@ struct pt_regs {
102/* user_mode returns true if only one bit is set in IPEND, other than the 102/* user_mode returns true if only one bit is set in IPEND, other than the
103 master interrupt enable. */ 103 master interrupt enable. */
104#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1))) 104#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1)))
105#define instruction_pointer(regs) ((regs)->pc)
106#define user_stack_pointer(regs) ((regs)->usp)
107#define profile_pc(regs) instruction_pointer(regs)
108extern void show_regs(struct pt_regs *); 105extern void show_regs(struct pt_regs *);
109 106
110#define arch_has_single_step() (1) 107#define arch_has_single_step() (1)
111extern void user_enable_single_step(struct task_struct *child);
112extern void user_disable_single_step(struct task_struct *child);
113/* common code demands this function */ 108/* common code demands this function */
114#define ptrace_disable(child) user_disable_single_step(child) 109#define ptrace_disable(child) user_disable_single_step(child)
115 110
@@ -130,6 +125,8 @@ extern int is_user_addr_valid(struct task_struct *child,
130 ((unsigned long)task_stack_page(task) + \ 125 ((unsigned long)task_stack_page(task) + \
131 (THREAD_SIZE - sizeof(struct pt_regs))) 126 (THREAD_SIZE - sizeof(struct pt_regs)))
132 127
128#include <asm-generic/ptrace.h>
129
133#endif /* __KERNEL__ */ 130#endif /* __KERNEL__ */
134 131
135#endif /* __ASSEMBLY__ */ 132#endif /* __ASSEMBLY__ */
diff --git a/arch/blackfin/include/mach-common/irq.h b/arch/blackfin/include/mach-common/irq.h
new file mode 100644
index 000000000000..cab14e911dc2
--- /dev/null
+++ b/arch/blackfin/include/mach-common/irq.h
@@ -0,0 +1,57 @@
1/*
2 * Common Blackfin IRQ definitions (i.e. the CEC)
3 *
4 * Copyright 2005-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later
7 */
8
9#ifndef _MACH_COMMON_IRQ_H_
10#define _MACH_COMMON_IRQ_H_
11
12/*
13 * Core events interrupt source definitions
14 *
15 * Event Source Event Name
16 * Emulation EMU 0 (highest priority)
17 * Reset RST 1
18 * NMI NMI 2
19 * Exception EVX 3
20 * Reserved -- 4
21 * Hardware Error IVHW 5
22 * Core Timer IVTMR 6
23 * Peripherals IVG7 7
24 * Peripherals IVG8 8
25 * Peripherals IVG9 9
26 * Peripherals IVG10 10
27 * Peripherals IVG11 11
28 * Peripherals IVG12 12
29 * Peripherals IVG13 13
30 * Softirq IVG14 14
31 * System Call IVG15 15 (lowest priority)
32 */
33
34/* The ABSTRACT IRQ definitions */
35#define IRQ_EMU 0 /* Emulation */
36#define IRQ_RST 1 /* reset */
37#define IRQ_NMI 2 /* Non Maskable */
38#define IRQ_EVX 3 /* Exception */
39#define IRQ_UNUSED 4 /* - unused interrupt */
40#define IRQ_HWERR 5 /* Hardware Error */
41#define IRQ_CORETMR 6 /* Core timer */
42
43#define BFIN_IRQ(x) ((x) + 7)
44
45#define IVG7 7
46#define IVG8 8
47#define IVG9 9
48#define IVG10 10
49#define IVG11 11
50#define IVG12 12
51#define IVG13 13
52#define IVG14 14
53#define IVG15 15
54
55#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
56
57#endif
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index ca5ccc777772..d550b24d9e9b 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -33,7 +33,10 @@ obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
33obj-$(CONFIG_STACKTRACE) += stacktrace.o 33obj-$(CONFIG_STACKTRACE) += stacktrace.o
34obj-$(CONFIG_DEBUG_VERBOSE) += trace.o 34obj-$(CONFIG_DEBUG_VERBOSE) += trace.o
35obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o 35obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o
36obj-$(CONFIG_PERF_EVENTS) += perf_event.o
36 37
37# the kgdb test puts code into L2 and without linker 38# the kgdb test puts code into L2 and without linker
38# relaxation, we need to force long calls to/from it 39# relaxation, we need to force long calls to/from it
39CFLAGS_kgdb_test.o := -mlong-calls -O0 40CFLAGS_kgdb_test.o := -mlong-calls -O0
41
42obj-$(CONFIG_DEBUG_MMRS) += debug-mmrs.o
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 6ce8dce753c9..71dbaa4a48af 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -36,6 +36,11 @@ static int __init blackfin_dma_init(void)
36 36
37 printk(KERN_INFO "Blackfin DMA Controller\n"); 37 printk(KERN_INFO "Blackfin DMA Controller\n");
38 38
39
40#if ANOMALY_05000480
41 bfin_write_DMAC_TC_PER(0x0111);
42#endif
43
39 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 44 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
40 atomic_set(&dma_ch[i].chan_status, 0); 45 atomic_set(&dma_ch[i].chan_status, 0);
41 dma_ch[i].regs = dma_io_base_addr[i]; 46 dma_ch[i].regs = dma_io_base_addr[i];
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 170cf90735ba..bcf8cf6fe412 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -10,10 +10,12 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/proc_fs.h> 12#include <linux/proc_fs.h>
13#include <linux/seq_file.h>
13#include <asm/blackfin.h> 14#include <asm/blackfin.h>
14#include <asm/gpio.h> 15#include <asm/gpio.h>
15#include <asm/portmux.h> 16#include <asm/portmux.h>
16#include <linux/irq.h> 17#include <linux/irq.h>
18#include <asm/irq_handler.h>
17 19
18#if ANOMALY_05000311 || ANOMALY_05000323 20#if ANOMALY_05000311 || ANOMALY_05000323
19enum { 21enum {
@@ -534,7 +536,7 @@ static const unsigned int sic_iwr_irqs[] = {
534#if defined(BF533_FAMILY) 536#if defined(BF533_FAMILY)
535 IRQ_PROG_INTB 537 IRQ_PROG_INTB
536#elif defined(BF537_FAMILY) 538#elif defined(BF537_FAMILY)
537 IRQ_PROG_INTB, IRQ_PORTG_INTB, IRQ_MAC_TX 539 IRQ_PF_INTB_WATCH, IRQ_PORTG_INTB, IRQ_PH_INTB_MAC_TX
538#elif defined(BF538_FAMILY) 540#elif defined(BF538_FAMILY)
539 IRQ_PORTF_INTB 541 IRQ_PORTF_INTB
540#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) 542#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
@@ -1203,35 +1205,43 @@ void bfin_reset_boot_spi_cs(unsigned short pin)
1203} 1205}
1204 1206
1205#if defined(CONFIG_PROC_FS) 1207#if defined(CONFIG_PROC_FS)
1206static int gpio_proc_read(char *buf, char **start, off_t offset, 1208static int gpio_proc_show(struct seq_file *m, void *v)
1207 int len, int *unused_i, void *unused_v)
1208{ 1209{
1209 int c, irq, gpio, outlen = 0; 1210 int c, irq, gpio;
1210 1211
1211 for (c = 0; c < MAX_RESOURCES; c++) { 1212 for (c = 0; c < MAX_RESOURCES; c++) {
1212 irq = is_reserved(gpio_irq, c, 1); 1213 irq = is_reserved(gpio_irq, c, 1);
1213 gpio = is_reserved(gpio, c, 1); 1214 gpio = is_reserved(gpio, c, 1);
1214 if (!check_gpio(c) && (gpio || irq)) 1215 if (!check_gpio(c) && (gpio || irq))
1215 len = sprintf(buf, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c, 1216 seq_printf(m, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c,
1216 get_label(c), (gpio && irq) ? " *" : "", 1217 get_label(c), (gpio && irq) ? " *" : "",
1217 get_gpio_dir(c) ? "OUTPUT" : "INPUT"); 1218 get_gpio_dir(c) ? "OUTPUT" : "INPUT");
1218 else if (is_reserved(peri, c, 1)) 1219 else if (is_reserved(peri, c, 1))
1219 len = sprintf(buf, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c)); 1220 seq_printf(m, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c));
1220 else 1221 else
1221 continue; 1222 continue;
1222 buf += len;
1223 outlen += len;
1224 } 1223 }
1225 return outlen; 1224
1225 return 0;
1226} 1226}
1227 1227
1228static int gpio_proc_open(struct inode *inode, struct file *file)
1229{
1230 return single_open(file, gpio_proc_show, NULL);
1231}
1232
1233static const struct file_operations gpio_proc_ops = {
1234 .open = gpio_proc_open,
1235 .read = seq_read,
1236 .llseek = seq_lseek,
1237 .release = single_release,
1238};
1239
1228static __init int gpio_register_proc(void) 1240static __init int gpio_register_proc(void)
1229{ 1241{
1230 struct proc_dir_entry *proc_gpio; 1242 struct proc_dir_entry *proc_gpio;
1231 1243
1232 proc_gpio = create_proc_entry("gpio", S_IRUGO, NULL); 1244 proc_gpio = proc_create("gpio", S_IRUGO, NULL, &gpio_proc_ops);
1233 if (proc_gpio)
1234 proc_gpio->read_proc = gpio_proc_read;
1235 return proc_gpio != NULL; 1245 return proc_gpio != NULL;
1236} 1246}
1237__initcall(gpio_register_proc); 1247__initcall(gpio_register_proc);
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 2c264b51566a..c446591b961d 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -11,6 +11,7 @@
11 11
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/irq_handler.h>
14 15
15/* Allow people to have their own Blackfin exception handler in a module */ 16/* Allow people to have their own Blackfin exception handler in a module */
16EXPORT_SYMBOL(bfin_return_from_exception); 17EXPORT_SYMBOL(bfin_return_from_exception);
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
new file mode 100644
index 000000000000..94b1d8a0256a
--- /dev/null
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -0,0 +1,1860 @@
1/*
2 * debugfs interface to core/system MMRs
3 *
4 * Copyright 2007-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later
7 */
8
9#include <linux/debugfs.h>
10#include <linux/fs.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include <asm/blackfin.h>
15#include <asm/gpio.h>
16#include <asm/bfin_can.h>
17#include <asm/bfin_dma.h>
18#include <asm/bfin_ppi.h>
19#include <asm/bfin_serial.h>
20#include <asm/bfin5xx_spi.h>
21#include <asm/bfin_twi.h>
22
23/* Common code defines PORT_MUX on us, so redirect the MMR back locally */
24#ifdef BFIN_PORT_MUX
25#undef PORT_MUX
26#define PORT_MUX BFIN_PORT_MUX
27#endif
28
29#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)addr)
30#define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR)
31#define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR)
32#define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR)
33
34#define D_RO(name, bits) d_RO(#name, bits, name)
35#define D_WO(name, bits) d_WO(#name, bits, name)
36#define D32(name) d(#name, 32, name)
37#define D16(name) d(#name, 16, name)
38
39#define REGS_OFF(peri, mmr) offsetof(struct bfin_##peri##_regs, mmr)
40#define __REGS(peri, sname, rname) \
41 do { \
42 struct bfin_##peri##_regs r; \
43 void *addr = (void *)(base + REGS_OFF(peri, rname)); \
44 strcpy(_buf, sname); \
45 if (sizeof(r.rname) == 2) \
46 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, addr); \
47 else \
48 debugfs_create_x32(buf, S_IRUSR|S_IWUSR, parent, addr); \
49 } while (0)
50#define REGS_STR_PFX(buf, pfx, num) \
51 ({ \
52 buf + (num >= 0 ? \
53 sprintf(buf, #pfx "%i_", num) : \
54 sprintf(buf, #pfx "_")); \
55 })
56#define REGS_STR_PFX_C(buf, pfx, num) \
57 ({ \
58 buf + (num >= 0 ? \
59 sprintf(buf, #pfx "%c_", 'A' + num) : \
60 sprintf(buf, #pfx "_")); \
61 })
62
63/*
64 * Core registers (not memory mapped)
65 */
66extern u32 last_seqstat;
67
68static int debug_cclk_get(void *data, u64 *val)
69{
70 *val = get_cclk();
71 return 0;
72}
73DEFINE_SIMPLE_ATTRIBUTE(fops_debug_cclk, debug_cclk_get, NULL, "0x%08llx\n");
74
75static int debug_sclk_get(void *data, u64 *val)
76{
77 *val = get_sclk();
78 return 0;
79}
80DEFINE_SIMPLE_ATTRIBUTE(fops_debug_sclk, debug_sclk_get, NULL, "0x%08llx\n");
81
82#define DEFINE_SYSREG(sr, pre, post) \
83static int sysreg_##sr##_get(void *data, u64 *val) \
84{ \
85 unsigned long tmp; \
86 pre; \
87 __asm__ __volatile__("%0 = " #sr ";" : "=d"(tmp)); \
88 *val = tmp; \
89 return 0; \
90} \
91static int sysreg_##sr##_set(void *data, u64 val) \
92{ \
93 unsigned long tmp = val; \
94 __asm__ __volatile__(#sr " = %0;" : : "d"(tmp)); \
95 post; \
96 return 0; \
97} \
98DEFINE_SIMPLE_ATTRIBUTE(fops_sysreg_##sr, sysreg_##sr##_get, sysreg_##sr##_set, "0x%08llx\n")
99
100DEFINE_SYSREG(cycles, , );
101DEFINE_SYSREG(cycles2, __asm__ __volatile__("%0 = cycles;" : "=d"(tmp)), );
102DEFINE_SYSREG(emudat, , );
103DEFINE_SYSREG(seqstat, , );
104DEFINE_SYSREG(syscfg, , CSYNC());
105#define D_SYSREG(sr) debugfs_create_file(#sr, S_IRUSR|S_IWUSR, parent, NULL, &fops_sysreg_##sr)
106
107/*
108 * CAN
109 */
110#define CAN_OFF(mmr) REGS_OFF(can, mmr)
111#define __CAN(uname, lname) __REGS(can, #uname, lname)
112static void __init __maybe_unused
113bfin_debug_mmrs_can(struct dentry *parent, unsigned long base, int num)
114{
115 static struct dentry *am, *mb;
116 int i, j;
117 char buf[32], *_buf = REGS_STR_PFX(buf, CAN, num);
118
119 if (!am) {
120 am = debugfs_create_dir("am", parent);
121 mb = debugfs_create_dir("mb", parent);
122 }
123
124 __CAN(MC1, mc1);
125 __CAN(MD1, md1);
126 __CAN(TRS1, trs1);
127 __CAN(TRR1, trr1);
128 __CAN(TA1, ta1);
129 __CAN(AA1, aa1);
130 __CAN(RMP1, rmp1);
131 __CAN(RML1, rml1);
132 __CAN(MBTIF1, mbtif1);
133 __CAN(MBRIF1, mbrif1);
134 __CAN(MBIM1, mbim1);
135 __CAN(RFH1, rfh1);
136 __CAN(OPSS1, opss1);
137
138 __CAN(MC2, mc2);
139 __CAN(MD2, md2);
140 __CAN(TRS2, trs2);
141 __CAN(TRR2, trr2);
142 __CAN(TA2, ta2);
143 __CAN(AA2, aa2);
144 __CAN(RMP2, rmp2);
145 __CAN(RML2, rml2);
146 __CAN(MBTIF2, mbtif2);
147 __CAN(MBRIF2, mbrif2);
148 __CAN(MBIM2, mbim2);
149 __CAN(RFH2, rfh2);
150 __CAN(OPSS2, opss2);
151
152 __CAN(CLOCK, clock);
153 __CAN(TIMING, timing);
154 __CAN(DEBUG, debug);
155 __CAN(STATUS, status);
156 __CAN(CEC, cec);
157 __CAN(GIS, gis);
158 __CAN(GIM, gim);
159 __CAN(GIF, gif);
160 __CAN(CONTROL, control);
161 __CAN(INTR, intr);
162 __CAN(VERSION, version);
163 __CAN(MBTD, mbtd);
164 __CAN(EWR, ewr);
165 __CAN(ESR, esr);
166 /*__CAN(UCREG, ucreg); no longer exists */
167 __CAN(UCCNT, uccnt);
168 __CAN(UCRC, ucrc);
169 __CAN(UCCNF, uccnf);
170 __CAN(VERSION2, version2);
171
172 for (i = 0; i < 32; ++i) {
173 sprintf(_buf, "AM%02iL", i);
174 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am,
175 (u16 *)(base + CAN_OFF(msk[i].aml)));
176 sprintf(_buf, "AM%02iH", i);
177 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am,
178 (u16 *)(base + CAN_OFF(msk[i].amh)));
179
180 for (j = 0; j < 3; ++j) {
181 sprintf(_buf, "MB%02i_DATA%i", i, j);
182 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
183 (u16 *)(base + CAN_OFF(chl[i].data[j*2])));
184 }
185 sprintf(_buf, "MB%02i_LENGTH", i);
186 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
187 (u16 *)(base + CAN_OFF(chl[i].dlc)));
188 sprintf(_buf, "MB%02i_TIMESTAMP", i);
189 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
190 (u16 *)(base + CAN_OFF(chl[i].tsv)));
191 sprintf(_buf, "MB%02i_ID0", i);
192 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
193 (u16 *)(base + CAN_OFF(chl[i].id0)));
194 sprintf(_buf, "MB%02i_ID1", i);
195 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
196 (u16 *)(base + CAN_OFF(chl[i].id1)));
197 }
198}
199#define CAN(num) bfin_debug_mmrs_can(parent, CAN##num##_MC1, num)
200
201/*
202 * DMA
203 */
204#define __DMA(uname, lname) __REGS(dma, #uname, lname)
205static void __init __maybe_unused
206bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdma, const char *pfx)
207{
208 char buf[32], *_buf;
209
210 if (mdma)
211 _buf = buf + sprintf(buf, "%s_%c%i_", pfx, mdma, num);
212 else
213 _buf = buf + sprintf(buf, "%s%i_", pfx, num);
214
215 __DMA(NEXT_DESC_PTR, next_desc_ptr);
216 __DMA(START_ADDR, start_addr);
217 __DMA(CONFIG, config);
218 __DMA(X_COUNT, x_count);
219 __DMA(X_MODIFY, x_modify);
220 __DMA(Y_COUNT, y_count);
221 __DMA(Y_MODIFY, y_modify);
222 __DMA(CURR_DESC_PTR, curr_desc_ptr);
223 __DMA(CURR_ADDR, curr_addr);
224 __DMA(IRQ_STATUS, irq_status);
225 __DMA(PERIPHERAL_MAP, peripheral_map);
226 __DMA(CURR_X_COUNT, curr_x_count);
227 __DMA(CURR_Y_COUNT, curr_y_count);
228}
229#define _DMA(num, base, mdma, pfx) bfin_debug_mmrs_dma(parent, base, num, mdma, pfx "DMA")
230#define DMA(num) _DMA(num, DMA##num##_NEXT_DESC_PTR, 0, "")
231#define _MDMA(num, x) \
232 do { \
233 _DMA(num, x##DMA_D##num##_CONFIG, 'D', #x); \
234 _DMA(num, x##DMA_S##num##_CONFIG, 'S', #x); \
235 } while (0)
236#define MDMA(num) _MDMA(num, M)
237#define IMDMA(num) _MDMA(num, IM)
238
239/*
240 * EPPI
241 */
242#define __EPPI(uname, lname) __REGS(eppi, #uname, lname)
243static void __init __maybe_unused
244bfin_debug_mmrs_eppi(struct dentry *parent, unsigned long base, int num)
245{
246 char buf[32], *_buf = REGS_STR_PFX(buf, EPPI, num);
247 __EPPI(STATUS, status);
248 __EPPI(HCOUNT, hcount);
249 __EPPI(HDELAY, hdelay);
250 __EPPI(VCOUNT, vcount);
251 __EPPI(VDELAY, vdelay);
252 __EPPI(FRAME, frame);
253 __EPPI(LINE, line);
254 __EPPI(CLKDIV, clkdiv);
255 __EPPI(CONTROL, control);
256 __EPPI(FS1W_HBL, fs1w_hbl);
257 __EPPI(FS1P_AVPL, fs1p_avpl);
258 __EPPI(FS2W_LVB, fs2w_lvb);
259 __EPPI(FS2P_LAVF, fs2p_lavf);
260 __EPPI(CLIP, clip);
261}
262#define EPPI(num) bfin_debug_mmrs_eppi(parent, EPPI##num##_STATUS, num)
263
264/*
265 * General Purpose Timers
266 */
267#define GPTIMER_OFF(mmr) (TIMER0_##mmr - TIMER0_CONFIG)
268#define __GPTIMER(name) \
269 do { \
270 strcpy(_buf, #name); \
271 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, (u16 *)(base + GPTIMER_OFF(name))); \
272 } while (0)
273static void __init __maybe_unused
274bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
275{
276 char buf[32], *_buf = REGS_STR_PFX(buf, TIMER, num);
277 __GPTIMER(CONFIG);
278 __GPTIMER(COUNTER);
279 __GPTIMER(PERIOD);
280 __GPTIMER(WIDTH);
281}
282#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
283
284/*
285 * Handshake MDMA
286 */
287#define __HMDMA(uname, lname) __REGS(hmdma, #uname, lname)
288static void __init __maybe_unused
289bfin_debug_mmrs_hmdma(struct dentry *parent, unsigned long base, int num)
290{
291 char buf[32], *_buf = REGS_STR_PFX(buf, HMDMA, num);
292 __HMDMA(CONTROL, control);
293 __HMDMA(ECINIT, ecinit);
294 __HMDMA(BCINIT, bcinit);
295 __HMDMA(ECURGENT, ecurgent);
296 __HMDMA(ECOVERFLOW, ecoverflow);
297 __HMDMA(ECOUNT, ecount);
298 __HMDMA(BCOUNT, bcount);
299}
300#define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num)
301
302/*
303 * Port/GPIO
304 */
305#define bfin_gpio_regs gpio_port_t
306#define __PORT(uname, lname) __REGS(gpio, #uname, lname)
307static void __init __maybe_unused
308bfin_debug_mmrs_port(struct dentry *parent, unsigned long base, int num)
309{
310 char buf[32], *_buf;
311#ifdef __ADSPBF54x__
312 _buf = REGS_STR_PFX_C(buf, PORT, num);
313 __PORT(FER, port_fer);
314 __PORT(SET, data_set);
315 __PORT(CLEAR, data_clear);
316 __PORT(DIR_SET, dir_set);
317 __PORT(DIR_CLEAR, dir_clear);
318 __PORT(INEN, inen);
319 __PORT(MUX, port_mux);
320#else
321 _buf = buf + sprintf(buf, "PORT%cIO_", num);
322 __PORT(CLEAR, data_clear);
323 __PORT(SET, data_set);
324 __PORT(TOGGLE, toggle);
325 __PORT(MASKA, maska);
326 __PORT(MASKA_CLEAR, maska_clear);
327 __PORT(MASKA_SET, maska_set);
328 __PORT(MASKA_TOGGLE, maska_toggle);
329 __PORT(MASKB, maskb);
330 __PORT(MASKB_CLEAR, maskb_clear);
331 __PORT(MASKB_SET, maskb_set);
332 __PORT(MASKB_TOGGLE, maskb_toggle);
333 __PORT(DIR, dir);
334 __PORT(POLAR, polar);
335 __PORT(EDGE, edge);
336 __PORT(BOTH, both);
337 __PORT(INEN, inen);
338#endif
339 _buf[-1] = '\0';
340 d(buf, 16, base + REGS_OFF(gpio, data));
341}
342#define PORT(base, num) bfin_debug_mmrs_port(parent, base, num)
343
344/*
345 * PPI
346 */
347#define __PPI(uname, lname) __REGS(ppi, #uname, lname)
348static void __init __maybe_unused
349bfin_debug_mmrs_ppi(struct dentry *parent, unsigned long base, int num)
350{
351 char buf[32], *_buf = REGS_STR_PFX(buf, PPI, num);
352 __PPI(CONTROL, control);
353 __PPI(STATUS, status);
354 __PPI(COUNT, count);
355 __PPI(DELAY, delay);
356 __PPI(FRAME, frame);
357}
358#define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_STATUS, num)
359
360/*
361 * SPI
362 */
363#define __SPI(uname, lname) __REGS(spi, #uname, lname)
364static void __init __maybe_unused
365bfin_debug_mmrs_spi(struct dentry *parent, unsigned long base, int num)
366{
367 char buf[32], *_buf = REGS_STR_PFX(buf, SPI, num);
368 __SPI(CTL, ctl);
369 __SPI(FLG, flg);
370 __SPI(STAT, stat);
371 __SPI(TDBR, tdbr);
372 __SPI(RDBR, rdbr);
373 __SPI(BAUD, baud);
374 __SPI(SHADOW, shadow);
375}
376#define SPI(num) bfin_debug_mmrs_spi(parent, SPI##num##_REGBASE, num)
377
378/*
379 * SPORT
380 */
381static inline int sport_width(void *mmr)
382{
383 unsigned long lmmr = (unsigned long)mmr;
384 if ((lmmr & 0xff) == 0x10)
385 /* SPORT#_TX has 0x10 offset -> SPORT#_TCR2 has 0x04 offset */
386 lmmr -= 0xc;
387 else
388 /* SPORT#_RX has 0x18 offset -> SPORT#_RCR2 has 0x24 offset */
389 lmmr += 0xc;
390 /* extract SLEN field from control register 2 and add 1 */
391 return (bfin_read16(lmmr) & 0x1f) + 1;
392}
393static int sport_set(void *mmr, u64 val)
394{
395 unsigned long flags;
396 local_irq_save(flags);
397 if (sport_width(mmr) <= 16)
398 bfin_write16(mmr, val);
399 else
400 bfin_write32(mmr, val);
401 local_irq_restore(flags);
402 return 0;
403}
404static int sport_get(void *mmr, u64 *val)
405{
406 unsigned long flags;
407 local_irq_save(flags);
408 if (sport_width(mmr) <= 16)
409 *val = bfin_read16(mmr);
410 else
411 *val = bfin_read32(mmr);
412 local_irq_restore(flags);
413 return 0;
414}
415DEFINE_SIMPLE_ATTRIBUTE(fops_sport, sport_get, sport_set, "0x%08llx\n");
416/*DEFINE_SIMPLE_ATTRIBUTE(fops_sport_ro, sport_get, NULL, "0x%08llx\n");*/
417DEFINE_SIMPLE_ATTRIBUTE(fops_sport_wo, NULL, sport_set, "0x%08llx\n");
418#define SPORT_OFF(mmr) (SPORT0_##mmr - SPORT0_TCR1)
419#define _D_SPORT(name, perms, fops) \
420 do { \
421 strcpy(_buf, #name); \
422 debugfs_create_file(buf, perms, parent, (void *)(base + SPORT_OFF(name)), fops); \
423 } while (0)
424#define __SPORT_RW(name) _D_SPORT(name, S_IRUSR|S_IWUSR, &fops_sport)
425#define __SPORT_RO(name) _D_SPORT(name, S_IRUSR, &fops_sport_ro)
426#define __SPORT_WO(name) _D_SPORT(name, S_IWUSR, &fops_sport_wo)
427#define __SPORT(name, bits) \
428 do { \
429 strcpy(_buf, #name); \
430 debugfs_create_x##bits(buf, S_IRUSR|S_IWUSR, parent, (u##bits *)(base + SPORT_OFF(name))); \
431 } while (0)
432static void __init __maybe_unused
433bfin_debug_mmrs_sport(struct dentry *parent, unsigned long base, int num)
434{
435 char buf[32], *_buf = REGS_STR_PFX(buf, SPORT, num);
436 __SPORT(CHNL, 16);
437 __SPORT(MCMC1, 16);
438 __SPORT(MCMC2, 16);
439 __SPORT(MRCS0, 32);
440 __SPORT(MRCS1, 32);
441 __SPORT(MRCS2, 32);
442 __SPORT(MRCS3, 32);
443 __SPORT(MTCS0, 32);
444 __SPORT(MTCS1, 32);
445 __SPORT(MTCS2, 32);
446 __SPORT(MTCS3, 32);
447 __SPORT(RCLKDIV, 16);
448 __SPORT(RCR1, 16);
449 __SPORT(RCR2, 16);
450 __SPORT(RFSDIV, 16);
451 __SPORT_RW(RX);
452 __SPORT(STAT, 16);
453 __SPORT(TCLKDIV, 16);
454 __SPORT(TCR1, 16);
455 __SPORT(TCR2, 16);
456 __SPORT(TFSDIV, 16);
457 __SPORT_WO(TX);
458}
459#define SPORT(num) bfin_debug_mmrs_sport(parent, SPORT##num##_TCR1, num)
460
461/*
462 * TWI
463 */
464#define __TWI(uname, lname) __REGS(twi, #uname, lname)
465static void __init __maybe_unused
466bfin_debug_mmrs_twi(struct dentry *parent, unsigned long base, int num)
467{
468 char buf[32], *_buf = REGS_STR_PFX(buf, TWI, num);
469 __TWI(CLKDIV, clkdiv);
470 __TWI(CONTROL, control);
471 __TWI(SLAVE_CTL, slave_ctl);
472 __TWI(SLAVE_STAT, slave_stat);
473 __TWI(SLAVE_ADDR, slave_addr);
474 __TWI(MASTER_CTL, master_ctl);
475 __TWI(MASTER_STAT, master_stat);
476 __TWI(MASTER_ADDR, master_addr);
477 __TWI(INT_STAT, int_stat);
478 __TWI(INT_MASK, int_mask);
479 __TWI(FIFO_CTL, fifo_ctl);
480 __TWI(FIFO_STAT, fifo_stat);
481 __TWI(XMT_DATA8, xmt_data8);
482 __TWI(XMT_DATA16, xmt_data16);
483 __TWI(RCV_DATA8, rcv_data8);
484 __TWI(RCV_DATA16, rcv_data16);
485}
486#define TWI(num) bfin_debug_mmrs_twi(parent, TWI##num##_CLKDIV, num)
487
488/*
489 * UART
490 */
491#define __UART(uname, lname) __REGS(uart, #uname, lname)
492static void __init __maybe_unused
493bfin_debug_mmrs_uart(struct dentry *parent, unsigned long base, int num)
494{
495 char buf[32], *_buf = REGS_STR_PFX(buf, UART, num);
496#ifdef BFIN_UART_BF54X_STYLE
497 __UART(DLL, dll);
498 __UART(DLH, dlh);
499 __UART(GCTL, gctl);
500 __UART(LCR, lcr);
501 __UART(MCR, mcr);
502 __UART(LSR, lsr);
503 __UART(MSR, msr);
504 __UART(SCR, scr);
505 __UART(IER_SET, ier_set);
506 __UART(IER_CLEAR, ier_clear);
507 __UART(THR, thr);
508 __UART(RBR, rbr);
509#else
510 __UART(DLL, dll);
511 __UART(THR, thr);
512 __UART(RBR, rbr);
513 __UART(DLH, dlh);
514 __UART(IER, ier);
515 __UART(IIR, iir);
516 __UART(LCR, lcr);
517 __UART(MCR, mcr);
518 __UART(LSR, lsr);
519 __UART(MSR, msr);
520 __UART(SCR, scr);
521 __UART(GCTL, gctl);
522#endif
523}
524#define UART(num) bfin_debug_mmrs_uart(parent, UART##num##_DLL, num)
525
526/*
527 * The actual debugfs generation
528 */
529static struct dentry *debug_mmrs_dentry;
530
531static int __init bfin_debug_mmrs_init(void)
532{
533 struct dentry *top, *parent;
534
535 pr_info("debug-mmrs: setting up Blackfin MMR debugfs\n");
536
537 top = debugfs_create_dir("blackfin", NULL);
538 if (top == NULL)
539 return -1;
540
541 parent = debugfs_create_dir("core_regs", top);
542 debugfs_create_file("cclk", S_IRUSR, parent, NULL, &fops_debug_cclk);
543 debugfs_create_file("sclk", S_IRUSR, parent, NULL, &fops_debug_sclk);
544 debugfs_create_x32("last_seqstat", S_IRUSR, parent, &last_seqstat);
545 D_SYSREG(cycles);
546 D_SYSREG(cycles2);
547 D_SYSREG(emudat);
548 D_SYSREG(seqstat);
549 D_SYSREG(syscfg);
550
551 /* Core MMRs */
552 parent = debugfs_create_dir("ctimer", top);
553 D32(TCNTL);
554 D32(TCOUNT);
555 D32(TPERIOD);
556 D32(TSCALE);
557
558 parent = debugfs_create_dir("cec", top);
559 D32(EVT0);
560 D32(EVT1);
561 D32(EVT2);
562 D32(EVT3);
563 D32(EVT4);
564 D32(EVT5);
565 D32(EVT6);
566 D32(EVT7);
567 D32(EVT8);
568 D32(EVT9);
569 D32(EVT10);
570 D32(EVT11);
571 D32(EVT12);
572 D32(EVT13);
573 D32(EVT14);
574 D32(EVT15);
575 D32(EVT_OVERRIDE);
576 D32(IMASK);
577 D32(IPEND);
578 D32(ILAT);
579 D32(IPRIO);
580
581 parent = debugfs_create_dir("debug", top);
582 D32(DBGSTAT);
583 D32(DSPID);
584
585 parent = debugfs_create_dir("mmu", top);
586 D32(SRAM_BASE_ADDRESS);
587 D32(DCPLB_ADDR0);
588 D32(DCPLB_ADDR10);
589 D32(DCPLB_ADDR11);
590 D32(DCPLB_ADDR12);
591 D32(DCPLB_ADDR13);
592 D32(DCPLB_ADDR14);
593 D32(DCPLB_ADDR15);
594 D32(DCPLB_ADDR1);
595 D32(DCPLB_ADDR2);
596 D32(DCPLB_ADDR3);
597 D32(DCPLB_ADDR4);
598 D32(DCPLB_ADDR5);
599 D32(DCPLB_ADDR6);
600 D32(DCPLB_ADDR7);
601 D32(DCPLB_ADDR8);
602 D32(DCPLB_ADDR9);
603 D32(DCPLB_DATA0);
604 D32(DCPLB_DATA10);
605 D32(DCPLB_DATA11);
606 D32(DCPLB_DATA12);
607 D32(DCPLB_DATA13);
608 D32(DCPLB_DATA14);
609 D32(DCPLB_DATA15);
610 D32(DCPLB_DATA1);
611 D32(DCPLB_DATA2);
612 D32(DCPLB_DATA3);
613 D32(DCPLB_DATA4);
614 D32(DCPLB_DATA5);
615 D32(DCPLB_DATA6);
616 D32(DCPLB_DATA7);
617 D32(DCPLB_DATA8);
618 D32(DCPLB_DATA9);
619 D32(DCPLB_FAULT_ADDR);
620 D32(DCPLB_STATUS);
621 D32(DMEM_CONTROL);
622 D32(DTEST_COMMAND);
623 D32(DTEST_DATA0);
624 D32(DTEST_DATA1);
625
626 D32(ICPLB_ADDR0);
627 D32(ICPLB_ADDR1);
628 D32(ICPLB_ADDR2);
629 D32(ICPLB_ADDR3);
630 D32(ICPLB_ADDR4);
631 D32(ICPLB_ADDR5);
632 D32(ICPLB_ADDR6);
633 D32(ICPLB_ADDR7);
634 D32(ICPLB_ADDR8);
635 D32(ICPLB_ADDR9);
636 D32(ICPLB_ADDR10);
637 D32(ICPLB_ADDR11);
638 D32(ICPLB_ADDR12);
639 D32(ICPLB_ADDR13);
640 D32(ICPLB_ADDR14);
641 D32(ICPLB_ADDR15);
642 D32(ICPLB_DATA0);
643 D32(ICPLB_DATA1);
644 D32(ICPLB_DATA2);
645 D32(ICPLB_DATA3);
646 D32(ICPLB_DATA4);
647 D32(ICPLB_DATA5);
648 D32(ICPLB_DATA6);
649 D32(ICPLB_DATA7);
650 D32(ICPLB_DATA8);
651 D32(ICPLB_DATA9);
652 D32(ICPLB_DATA10);
653 D32(ICPLB_DATA11);
654 D32(ICPLB_DATA12);
655 D32(ICPLB_DATA13);
656 D32(ICPLB_DATA14);
657 D32(ICPLB_DATA15);
658 D32(ICPLB_FAULT_ADDR);
659 D32(ICPLB_STATUS);
660 D32(IMEM_CONTROL);
661 if (!ANOMALY_05000481) {
662 D32(ITEST_COMMAND);
663 D32(ITEST_DATA0);
664 D32(ITEST_DATA1);
665 }
666
667 parent = debugfs_create_dir("perf", top);
668 D32(PFCNTR0);
669 D32(PFCNTR1);
670 D32(PFCTL);
671
672 parent = debugfs_create_dir("trace", top);
673 D32(TBUF);
674 D32(TBUFCTL);
675 D32(TBUFSTAT);
676
677 parent = debugfs_create_dir("watchpoint", top);
678 D32(WPIACTL);
679 D32(WPIA0);
680 D32(WPIA1);
681 D32(WPIA2);
682 D32(WPIA3);
683 D32(WPIA4);
684 D32(WPIA5);
685 D32(WPIACNT0);
686 D32(WPIACNT1);
687 D32(WPIACNT2);
688 D32(WPIACNT3);
689 D32(WPIACNT4);
690 D32(WPIACNT5);
691 D32(WPDACTL);
692 D32(WPDA0);
693 D32(WPDA1);
694 D32(WPDACNT0);
695 D32(WPDACNT1);
696 D32(WPSTAT);
697
698 /* System MMRs */
699#ifdef ATAPI_CONTROL
700 parent = debugfs_create_dir("atapi", top);
701 D16(ATAPI_CONTROL);
702 D16(ATAPI_DEV_ADDR);
703 D16(ATAPI_DEV_RXBUF);
704 D16(ATAPI_DEV_TXBUF);
705 D16(ATAPI_DMA_TFRCNT);
706 D16(ATAPI_INT_MASK);
707 D16(ATAPI_INT_STATUS);
708 D16(ATAPI_LINE_STATUS);
709 D16(ATAPI_MULTI_TIM_0);
710 D16(ATAPI_MULTI_TIM_1);
711 D16(ATAPI_MULTI_TIM_2);
712 D16(ATAPI_PIO_TFRCNT);
713 D16(ATAPI_PIO_TIM_0);
714 D16(ATAPI_PIO_TIM_1);
715 D16(ATAPI_REG_TIM_0);
716 D16(ATAPI_SM_STATE);
717 D16(ATAPI_STATUS);
718 D16(ATAPI_TERMINATE);
719 D16(ATAPI_UDMAOUT_TFRCNT);
720 D16(ATAPI_ULTRA_TIM_0);
721 D16(ATAPI_ULTRA_TIM_1);
722 D16(ATAPI_ULTRA_TIM_2);
723 D16(ATAPI_ULTRA_TIM_3);
724 D16(ATAPI_UMAIN_TFRCNT);
725 D16(ATAPI_XFER_LEN);
726#endif
727
728#if defined(CAN_MC1) || defined(CAN0_MC1) || defined(CAN1_MC1)
729 parent = debugfs_create_dir("can", top);
730# ifdef CAN_MC1
731 bfin_debug_mmrs_can(parent, CAN_MC1, -1);
732# endif
733# ifdef CAN0_MC1
734 CAN(0);
735# endif
736# ifdef CAN1_MC1
737 CAN(1);
738# endif
739#endif
740
741#ifdef CNT_COMMAND
742 parent = debugfs_create_dir("counter", top);
743 D16(CNT_COMMAND);
744 D16(CNT_CONFIG);
745 D32(CNT_COUNTER);
746 D16(CNT_DEBOUNCE);
747 D16(CNT_IMASK);
748 D32(CNT_MAX);
749 D32(CNT_MIN);
750 D16(CNT_STATUS);
751#endif
752
753 parent = debugfs_create_dir("dmac", top);
754#ifdef DMA_TC_CNT
755 D16(DMAC_TC_CNT);
756 D16(DMAC_TC_PER);
757#endif
758#ifdef DMAC0_TC_CNT
759 D16(DMAC0_TC_CNT);
760 D16(DMAC0_TC_PER);
761#endif
762#ifdef DMAC1_TC_CNT
763 D16(DMAC1_TC_CNT);
764 D16(DMAC1_TC_PER);
765#endif
766#ifdef DMAC1_PERIMUX
767 D16(DMAC1_PERIMUX);
768#endif
769
770#ifdef __ADSPBF561__
771 /* XXX: should rewrite the MMR map */
772# define DMA0_NEXT_DESC_PTR DMA2_0_NEXT_DESC_PTR
773# define DMA1_NEXT_DESC_PTR DMA2_1_NEXT_DESC_PTR
774# define DMA2_NEXT_DESC_PTR DMA2_2_NEXT_DESC_PTR
775# define DMA3_NEXT_DESC_PTR DMA2_3_NEXT_DESC_PTR
776# define DMA4_NEXT_DESC_PTR DMA2_4_NEXT_DESC_PTR
777# define DMA5_NEXT_DESC_PTR DMA2_5_NEXT_DESC_PTR
778# define DMA6_NEXT_DESC_PTR DMA2_6_NEXT_DESC_PTR
779# define DMA7_NEXT_DESC_PTR DMA2_7_NEXT_DESC_PTR
780# define DMA8_NEXT_DESC_PTR DMA2_8_NEXT_DESC_PTR
781# define DMA9_NEXT_DESC_PTR DMA2_9_NEXT_DESC_PTR
782# define DMA10_NEXT_DESC_PTR DMA2_10_NEXT_DESC_PTR
783# define DMA11_NEXT_DESC_PTR DMA2_11_NEXT_DESC_PTR
784# define DMA12_NEXT_DESC_PTR DMA1_0_NEXT_DESC_PTR
785# define DMA13_NEXT_DESC_PTR DMA1_1_NEXT_DESC_PTR
786# define DMA14_NEXT_DESC_PTR DMA1_2_NEXT_DESC_PTR
787# define DMA15_NEXT_DESC_PTR DMA1_3_NEXT_DESC_PTR
788# define DMA16_NEXT_DESC_PTR DMA1_4_NEXT_DESC_PTR
789# define DMA17_NEXT_DESC_PTR DMA1_5_NEXT_DESC_PTR
790# define DMA18_NEXT_DESC_PTR DMA1_6_NEXT_DESC_PTR
791# define DMA19_NEXT_DESC_PTR DMA1_7_NEXT_DESC_PTR
792# define DMA20_NEXT_DESC_PTR DMA1_8_NEXT_DESC_PTR
793# define DMA21_NEXT_DESC_PTR DMA1_9_NEXT_DESC_PTR
794# define DMA22_NEXT_DESC_PTR DMA1_10_NEXT_DESC_PTR
795# define DMA23_NEXT_DESC_PTR DMA1_11_NEXT_DESC_PTR
796#endif
797 parent = debugfs_create_dir("dma", top);
798 DMA(0);
799 DMA(1);
800 DMA(1);
801 DMA(2);
802 DMA(3);
803 DMA(4);
804 DMA(5);
805 DMA(6);
806 DMA(7);
807#ifdef DMA8_NEXT_DESC_PTR
808 DMA(8);
809 DMA(9);
810 DMA(10);
811 DMA(11);
812#endif
813#ifdef DMA12_NEXT_DESC_PTR
814 DMA(12);
815 DMA(13);
816 DMA(14);
817 DMA(15);
818 DMA(16);
819 DMA(17);
820 DMA(18);
821 DMA(19);
822#endif
823#ifdef DMA20_NEXT_DESC_PTR
824 DMA(20);
825 DMA(21);
826 DMA(22);
827 DMA(23);
828#endif
829
830 parent = debugfs_create_dir("ebiu_amc", top);
831 D32(EBIU_AMBCTL0);
832 D32(EBIU_AMBCTL1);
833 D16(EBIU_AMGCTL);
834#ifdef EBIU_MBSCTL
835 D16(EBIU_MBSCTL);
836 D32(EBIU_ARBSTAT);
837 D32(EBIU_MODE);
838 D16(EBIU_FCTL);
839#endif
840
841#ifdef EBIU_SDGCTL
842 parent = debugfs_create_dir("ebiu_sdram", top);
843# ifdef __ADSPBF561__
844 D32(EBIU_SDBCTL);
845# else
846 D16(EBIU_SDBCTL);
847# endif
848 D32(EBIU_SDGCTL);
849 D16(EBIU_SDRRC);
850 D16(EBIU_SDSTAT);
851#endif
852
853#ifdef EBIU_DDRACCT
854 parent = debugfs_create_dir("ebiu_ddr", top);
855 D32(EBIU_DDRACCT);
856 D32(EBIU_DDRARCT);
857 D32(EBIU_DDRBRC0);
858 D32(EBIU_DDRBRC1);
859 D32(EBIU_DDRBRC2);
860 D32(EBIU_DDRBRC3);
861 D32(EBIU_DDRBRC4);
862 D32(EBIU_DDRBRC5);
863 D32(EBIU_DDRBRC6);
864 D32(EBIU_DDRBRC7);
865 D32(EBIU_DDRBWC0);
866 D32(EBIU_DDRBWC1);
867 D32(EBIU_DDRBWC2);
868 D32(EBIU_DDRBWC3);
869 D32(EBIU_DDRBWC4);
870 D32(EBIU_DDRBWC5);
871 D32(EBIU_DDRBWC6);
872 D32(EBIU_DDRBWC7);
873 D32(EBIU_DDRCTL0);
874 D32(EBIU_DDRCTL1);
875 D32(EBIU_DDRCTL2);
876 D32(EBIU_DDRCTL3);
877 D32(EBIU_DDRGC0);
878 D32(EBIU_DDRGC1);
879 D32(EBIU_DDRGC2);
880 D32(EBIU_DDRGC3);
881 D32(EBIU_DDRMCCL);
882 D32(EBIU_DDRMCEN);
883 D32(EBIU_DDRQUE);
884 D32(EBIU_DDRTACT);
885 D32(EBIU_ERRADD);
886 D16(EBIU_ERRMST);
887 D16(EBIU_RSTCTL);
888#endif
889
890#ifdef EMAC_ADDRHI
891 parent = debugfs_create_dir("emac", top);
892 D32(EMAC_ADDRHI);
893 D32(EMAC_ADDRLO);
894 D32(EMAC_FLC);
895 D32(EMAC_HASHHI);
896 D32(EMAC_HASHLO);
897 D32(EMAC_MMC_CTL);
898 D32(EMAC_MMC_RIRQE);
899 D32(EMAC_MMC_RIRQS);
900 D32(EMAC_MMC_TIRQE);
901 D32(EMAC_MMC_TIRQS);
902 D32(EMAC_OPMODE);
903 D32(EMAC_RXC_ALIGN);
904 D32(EMAC_RXC_ALLFRM);
905 D32(EMAC_RXC_ALLOCT);
906 D32(EMAC_RXC_BROAD);
907 D32(EMAC_RXC_DMAOVF);
908 D32(EMAC_RXC_EQ64);
909 D32(EMAC_RXC_FCS);
910 D32(EMAC_RXC_GE1024);
911 D32(EMAC_RXC_LNERRI);
912 D32(EMAC_RXC_LNERRO);
913 D32(EMAC_RXC_LONG);
914 D32(EMAC_RXC_LT1024);
915 D32(EMAC_RXC_LT128);
916 D32(EMAC_RXC_LT256);
917 D32(EMAC_RXC_LT512);
918 D32(EMAC_RXC_MACCTL);
919 D32(EMAC_RXC_MULTI);
920 D32(EMAC_RXC_OCTET);
921 D32(EMAC_RXC_OK);
922 D32(EMAC_RXC_OPCODE);
923 D32(EMAC_RXC_PAUSE);
924 D32(EMAC_RXC_SHORT);
925 D32(EMAC_RXC_TYPED);
926 D32(EMAC_RXC_UNICST);
927 D32(EMAC_RX_IRQE);
928 D32(EMAC_RX_STAT);
929 D32(EMAC_RX_STKY);
930 D32(EMAC_STAADD);
931 D32(EMAC_STADAT);
932 D32(EMAC_SYSCTL);
933 D32(EMAC_SYSTAT);
934 D32(EMAC_TXC_1COL);
935 D32(EMAC_TXC_ABORT);
936 D32(EMAC_TXC_ALLFRM);
937 D32(EMAC_TXC_ALLOCT);
938 D32(EMAC_TXC_BROAD);
939 D32(EMAC_TXC_CRSERR);
940 D32(EMAC_TXC_DEFER);
941 D32(EMAC_TXC_DMAUND);
942 D32(EMAC_TXC_EQ64);
943 D32(EMAC_TXC_GE1024);
944 D32(EMAC_TXC_GT1COL);
945 D32(EMAC_TXC_LATECL);
946 D32(EMAC_TXC_LT1024);
947 D32(EMAC_TXC_LT128);
948 D32(EMAC_TXC_LT256);
949 D32(EMAC_TXC_LT512);
950 D32(EMAC_TXC_MACCTL);
951 D32(EMAC_TXC_MULTI);
952 D32(EMAC_TXC_OCTET);
953 D32(EMAC_TXC_OK);
954 D32(EMAC_TXC_UNICST);
955 D32(EMAC_TXC_XS_COL);
956 D32(EMAC_TXC_XS_DFR);
957 D32(EMAC_TX_IRQE);
958 D32(EMAC_TX_STAT);
959 D32(EMAC_TX_STKY);
960 D32(EMAC_VLAN1);
961 D32(EMAC_VLAN2);
962 D32(EMAC_WKUP_CTL);
963 D32(EMAC_WKUP_FFCMD);
964 D32(EMAC_WKUP_FFCRC0);
965 D32(EMAC_WKUP_FFCRC1);
966 D32(EMAC_WKUP_FFMSK0);
967 D32(EMAC_WKUP_FFMSK1);
968 D32(EMAC_WKUP_FFMSK2);
969 D32(EMAC_WKUP_FFMSK3);
970 D32(EMAC_WKUP_FFOFF);
971# ifdef EMAC_PTP_ACCR
972 D32(EMAC_PTP_ACCR);
973 D32(EMAC_PTP_ADDEND);
974 D32(EMAC_PTP_ALARMHI);
975 D32(EMAC_PTP_ALARMLO);
976 D16(EMAC_PTP_CTL);
977 D32(EMAC_PTP_FOFF);
978 D32(EMAC_PTP_FV1);
979 D32(EMAC_PTP_FV2);
980 D32(EMAC_PTP_FV3);
981 D16(EMAC_PTP_ID_OFF);
982 D32(EMAC_PTP_ID_SNAP);
983 D16(EMAC_PTP_IE);
984 D16(EMAC_PTP_ISTAT);
985 D32(EMAC_PTP_OFFSET);
986 D32(EMAC_PTP_PPS_PERIOD);
987 D32(EMAC_PTP_PPS_STARTHI);
988 D32(EMAC_PTP_PPS_STARTLO);
989 D32(EMAC_PTP_RXSNAPHI);
990 D32(EMAC_PTP_RXSNAPLO);
991 D32(EMAC_PTP_TIMEHI);
992 D32(EMAC_PTP_TIMELO);
993 D32(EMAC_PTP_TXSNAPHI);
994 D32(EMAC_PTP_TXSNAPLO);
995# endif
996#endif
997
998#if defined(EPPI0_STATUS) || defined(EPPI1_STATUS) || defined(EPPI2_STATUS)
999 parent = debugfs_create_dir("eppi", top);
1000# ifdef EPPI0_STATUS
1001 EPPI(0);
1002# endif
1003# ifdef EPPI1_STATUS
1004 EPPI(1);
1005# endif
1006# ifdef EPPI2_STATUS
1007 EPPI(2);
1008# endif
1009#endif
1010
1011 parent = debugfs_create_dir("gptimer", top);
1012#ifdef TIMER_DISABLE
1013 D16(TIMER_DISABLE);
1014 D16(TIMER_ENABLE);
1015 D32(TIMER_STATUS);
1016#endif
1017#ifdef TIMER_DISABLE0
1018 D16(TIMER_DISABLE0);
1019 D16(TIMER_ENABLE0);
1020 D32(TIMER_STATUS0);
1021#endif
1022#ifdef TIMER_DISABLE1
1023 D16(TIMER_DISABLE1);
1024 D16(TIMER_ENABLE1);
1025 D32(TIMER_STATUS1);
1026#endif
1027 /* XXX: Should convert BF561 MMR names */
1028#ifdef TMRS4_DISABLE
1029 D16(TMRS4_DISABLE);
1030 D16(TMRS4_ENABLE);
1031 D32(TMRS4_STATUS);
1032 D16(TMRS8_DISABLE);
1033 D16(TMRS8_ENABLE);
1034 D32(TMRS8_STATUS);
1035#endif
1036 GPTIMER(0);
1037 GPTIMER(1);
1038 GPTIMER(2);
1039#ifdef TIMER3_CONFIG
1040 GPTIMER(3);
1041 GPTIMER(4);
1042 GPTIMER(5);
1043 GPTIMER(6);
1044 GPTIMER(7);
1045#endif
1046#ifdef TIMER8_CONFIG
1047 GPTIMER(8);
1048 GPTIMER(9);
1049 GPTIMER(10);
1050#endif
1051#ifdef TIMER11_CONFIG
1052 GPTIMER(11);
1053#endif
1054
1055#ifdef HMDMA0_CONTROL
1056 parent = debugfs_create_dir("hmdma", top);
1057 HMDMA(0);
1058 HMDMA(1);
1059#endif
1060
1061#ifdef HOST_CONTROL
1062 parent = debugfs_create_dir("hostdp", top);
1063 D16(HOST_CONTROL);
1064 D16(HOST_STATUS);
1065 D16(HOST_TIMEOUT);
1066#endif
1067
1068#ifdef IMDMA_S0_CONFIG
1069 parent = debugfs_create_dir("imdma", top);
1070 IMDMA(0);
1071 IMDMA(1);
1072#endif
1073
1074#ifdef KPAD_CTL
1075 parent = debugfs_create_dir("keypad", top);
1076 D16(KPAD_CTL);
1077 D16(KPAD_PRESCALE);
1078 D16(KPAD_MSEL);
1079 D16(KPAD_ROWCOL);
1080 D16(KPAD_STAT);
1081 D16(KPAD_SOFTEVAL);
1082#endif
1083
1084 parent = debugfs_create_dir("mdma", top);
1085 MDMA(0);
1086 MDMA(1);
1087#ifdef MDMA_D2_CONFIG
1088 MDMA(2);
1089 MDMA(3);
1090#endif
1091
1092#ifdef MXVR_CONFIG
1093 parent = debugfs_create_dir("mxvr", top);
1094 D16(MXVR_CONFIG);
1095# ifdef MXVR_PLL_CTL_0
1096 D32(MXVR_PLL_CTL_0);
1097# endif
1098 D32(MXVR_STATE_0);
1099 D32(MXVR_STATE_1);
1100 D32(MXVR_INT_STAT_0);
1101 D32(MXVR_INT_STAT_1);
1102 D32(MXVR_INT_EN_0);
1103 D32(MXVR_INT_EN_1);
1104 D16(MXVR_POSITION);
1105 D16(MXVR_MAX_POSITION);
1106 D16(MXVR_DELAY);
1107 D16(MXVR_MAX_DELAY);
1108 D32(MXVR_LADDR);
1109 D16(MXVR_GADDR);
1110 D32(MXVR_AADDR);
1111 D32(MXVR_ALLOC_0);
1112 D32(MXVR_ALLOC_1);
1113 D32(MXVR_ALLOC_2);
1114 D32(MXVR_ALLOC_3);
1115 D32(MXVR_ALLOC_4);
1116 D32(MXVR_ALLOC_5);
1117 D32(MXVR_ALLOC_6);
1118 D32(MXVR_ALLOC_7);
1119 D32(MXVR_ALLOC_8);
1120 D32(MXVR_ALLOC_9);
1121 D32(MXVR_ALLOC_10);
1122 D32(MXVR_ALLOC_11);
1123 D32(MXVR_ALLOC_12);
1124 D32(MXVR_ALLOC_13);
1125 D32(MXVR_ALLOC_14);
1126 D32(MXVR_SYNC_LCHAN_0);
1127 D32(MXVR_SYNC_LCHAN_1);
1128 D32(MXVR_SYNC_LCHAN_2);
1129 D32(MXVR_SYNC_LCHAN_3);
1130 D32(MXVR_SYNC_LCHAN_4);
1131 D32(MXVR_SYNC_LCHAN_5);
1132 D32(MXVR_SYNC_LCHAN_6);
1133 D32(MXVR_SYNC_LCHAN_7);
1134 D32(MXVR_DMA0_CONFIG);
1135 D32(MXVR_DMA0_START_ADDR);
1136 D16(MXVR_DMA0_COUNT);
1137 D32(MXVR_DMA0_CURR_ADDR);
1138 D16(MXVR_DMA0_CURR_COUNT);
1139 D32(MXVR_DMA1_CONFIG);
1140 D32(MXVR_DMA1_START_ADDR);
1141 D16(MXVR_DMA1_COUNT);
1142 D32(MXVR_DMA1_CURR_ADDR);
1143 D16(MXVR_DMA1_CURR_COUNT);
1144 D32(MXVR_DMA2_CONFIG);
1145 D32(MXVR_DMA2_START_ADDR);
1146 D16(MXVR_DMA2_COUNT);
1147 D32(MXVR_DMA2_CURR_ADDR);
1148 D16(MXVR_DMA2_CURR_COUNT);
1149 D32(MXVR_DMA3_CONFIG);
1150 D32(MXVR_DMA3_START_ADDR);
1151 D16(MXVR_DMA3_COUNT);
1152 D32(MXVR_DMA3_CURR_ADDR);
1153 D16(MXVR_DMA3_CURR_COUNT);
1154 D32(MXVR_DMA4_CONFIG);
1155 D32(MXVR_DMA4_START_ADDR);
1156 D16(MXVR_DMA4_COUNT);
1157 D32(MXVR_DMA4_CURR_ADDR);
1158 D16(MXVR_DMA4_CURR_COUNT);
1159 D32(MXVR_DMA5_CONFIG);
1160 D32(MXVR_DMA5_START_ADDR);
1161 D16(MXVR_DMA5_COUNT);
1162 D32(MXVR_DMA5_CURR_ADDR);
1163 D16(MXVR_DMA5_CURR_COUNT);
1164 D32(MXVR_DMA6_CONFIG);
1165 D32(MXVR_DMA6_START_ADDR);
1166 D16(MXVR_DMA6_COUNT);
1167 D32(MXVR_DMA6_CURR_ADDR);
1168 D16(MXVR_DMA6_CURR_COUNT);
1169 D32(MXVR_DMA7_CONFIG);
1170 D32(MXVR_DMA7_START_ADDR);
1171 D16(MXVR_DMA7_COUNT);
1172 D32(MXVR_DMA7_CURR_ADDR);
1173 D16(MXVR_DMA7_CURR_COUNT);
1174 D16(MXVR_AP_CTL);
1175 D32(MXVR_APRB_START_ADDR);
1176 D32(MXVR_APRB_CURR_ADDR);
1177 D32(MXVR_APTB_START_ADDR);
1178 D32(MXVR_APTB_CURR_ADDR);
1179 D32(MXVR_CM_CTL);
1180 D32(MXVR_CMRB_START_ADDR);
1181 D32(MXVR_CMRB_CURR_ADDR);
1182 D32(MXVR_CMTB_START_ADDR);
1183 D32(MXVR_CMTB_CURR_ADDR);
1184 D32(MXVR_RRDB_START_ADDR);
1185 D32(MXVR_RRDB_CURR_ADDR);
1186 D32(MXVR_PAT_DATA_0);
1187 D32(MXVR_PAT_EN_0);
1188 D32(MXVR_PAT_DATA_1);
1189 D32(MXVR_PAT_EN_1);
1190 D16(MXVR_FRAME_CNT_0);
1191 D16(MXVR_FRAME_CNT_1);
1192 D32(MXVR_ROUTING_0);
1193 D32(MXVR_ROUTING_1);
1194 D32(MXVR_ROUTING_2);
1195 D32(MXVR_ROUTING_3);
1196 D32(MXVR_ROUTING_4);
1197 D32(MXVR_ROUTING_5);
1198 D32(MXVR_ROUTING_6);
1199 D32(MXVR_ROUTING_7);
1200 D32(MXVR_ROUTING_8);
1201 D32(MXVR_ROUTING_9);
1202 D32(MXVR_ROUTING_10);
1203 D32(MXVR_ROUTING_11);
1204 D32(MXVR_ROUTING_12);
1205 D32(MXVR_ROUTING_13);
1206 D32(MXVR_ROUTING_14);
1207# ifdef MXVR_PLL_CTL_1
1208 D32(MXVR_PLL_CTL_1);
1209# endif
1210 D16(MXVR_BLOCK_CNT);
1211# ifdef MXVR_CLK_CTL
1212 D32(MXVR_CLK_CTL);
1213# endif
1214# ifdef MXVR_CDRPLL_CTL
1215 D32(MXVR_CDRPLL_CTL);
1216# endif
1217# ifdef MXVR_FMPLL_CTL
1218 D32(MXVR_FMPLL_CTL);
1219# endif
1220# ifdef MXVR_PIN_CTL
1221 D16(MXVR_PIN_CTL);
1222# endif
1223# ifdef MXVR_SCLK_CNT
1224 D16(MXVR_SCLK_CNT);
1225# endif
1226#endif
1227
1228#ifdef NFC_ADDR
1229 parent = debugfs_create_dir("nfc", top);
1230 D_WO(NFC_ADDR, 16);
1231 D_WO(NFC_CMD, 16);
1232 D_RO(NFC_COUNT, 16);
1233 D16(NFC_CTL);
1234 D_WO(NFC_DATA_RD, 16);
1235 D_WO(NFC_DATA_WR, 16);
1236 D_RO(NFC_ECC0, 16);
1237 D_RO(NFC_ECC1, 16);
1238 D_RO(NFC_ECC2, 16);
1239 D_RO(NFC_ECC3, 16);
1240 D16(NFC_IRQMASK);
1241 D16(NFC_IRQSTAT);
1242 D_WO(NFC_PGCTL, 16);
1243 D_RO(NFC_READ, 16);
1244 D16(NFC_RST);
1245 D_RO(NFC_STAT, 16);
1246#endif
1247
1248#ifdef OTP_CONTROL
1249 parent = debugfs_create_dir("otp", top);
1250 D16(OTP_CONTROL);
1251 D16(OTP_BEN);
1252 D16(OTP_STATUS);
1253 D32(OTP_TIMING);
1254 D32(OTP_DATA0);
1255 D32(OTP_DATA1);
1256 D32(OTP_DATA2);
1257 D32(OTP_DATA3);
1258#endif
1259
1260#ifdef PIXC_CTL
1261 parent = debugfs_create_dir("pixc", top);
1262 D16(PIXC_CTL);
1263 D16(PIXC_PPL);
1264 D16(PIXC_LPF);
1265 D16(PIXC_AHSTART);
1266 D16(PIXC_AHEND);
1267 D16(PIXC_AVSTART);
1268 D16(PIXC_AVEND);
1269 D16(PIXC_ATRANSP);
1270 D16(PIXC_BHSTART);
1271 D16(PIXC_BHEND);
1272 D16(PIXC_BVSTART);
1273 D16(PIXC_BVEND);
1274 D16(PIXC_BTRANSP);
1275 D16(PIXC_INTRSTAT);
1276 D32(PIXC_RYCON);
1277 D32(PIXC_GUCON);
1278 D32(PIXC_BVCON);
1279 D32(PIXC_CCBIAS);
1280 D32(PIXC_TC);
1281#endif
1282
1283 parent = debugfs_create_dir("pll", top);
1284 D16(PLL_CTL);
1285 D16(PLL_DIV);
1286 D16(PLL_LOCKCNT);
1287 D16(PLL_STAT);
1288 D16(VR_CTL);
1289 D32(CHIPID); /* it's part of this hardware block */
1290
1291#if defined(PPI_STATUS) || defined(PPI0_STATUS) || defined(PPI1_STATUS)
1292 parent = debugfs_create_dir("ppi", top);
1293# ifdef PPI_STATUS
1294 bfin_debug_mmrs_ppi(parent, PPI_STATUS, -1);
1295# endif
1296# ifdef PPI0_STATUS
1297 PPI(0);
1298# endif
1299# ifdef PPI1_STATUS
1300 PPI(1);
1301# endif
1302#endif
1303
1304#ifdef PWM_CTRL
1305 parent = debugfs_create_dir("pwm", top);
1306 D16(PWM_CTRL);
1307 D16(PWM_STAT);
1308 D16(PWM_TM);
1309 D16(PWM_DT);
1310 D16(PWM_GATE);
1311 D16(PWM_CHA);
1312 D16(PWM_CHB);
1313 D16(PWM_CHC);
1314 D16(PWM_SEG);
1315 D16(PWM_SYNCWT);
1316 D16(PWM_CHAL);
1317 D16(PWM_CHBL);
1318 D16(PWM_CHCL);
1319 D16(PWM_LSI);
1320 D16(PWM_STAT2);
1321#endif
1322
1323#ifdef RSI_CONFIG
1324 parent = debugfs_create_dir("rsi", top);
1325 D32(RSI_ARGUMENT);
1326 D16(RSI_CEATA_CONTROL);
1327 D16(RSI_CLK_CONTROL);
1328 D16(RSI_COMMAND);
1329 D16(RSI_CONFIG);
1330 D16(RSI_DATA_CNT);
1331 D16(RSI_DATA_CONTROL);
1332 D16(RSI_DATA_LGTH);
1333 D32(RSI_DATA_TIMER);
1334 D16(RSI_EMASK);
1335 D16(RSI_ESTAT);
1336 D32(RSI_FIFO);
1337 D16(RSI_FIFO_CNT);
1338 D32(RSI_MASK0);
1339 D32(RSI_MASK1);
1340 D16(RSI_PID0);
1341 D16(RSI_PID1);
1342 D16(RSI_PID2);
1343 D16(RSI_PID3);
1344 D16(RSI_PWR_CONTROL);
1345 D16(RSI_RD_WAIT_EN);
1346 D32(RSI_RESPONSE0);
1347 D32(RSI_RESPONSE1);
1348 D32(RSI_RESPONSE2);
1349 D32(RSI_RESPONSE3);
1350 D16(RSI_RESP_CMD);
1351 D32(RSI_STATUS);
1352 D_WO(RSI_STATUSCL, 16);
1353#endif
1354
1355#ifdef RTC_ALARM
1356 parent = debugfs_create_dir("rtc", top);
1357 D32(RTC_ALARM);
1358 D16(RTC_ICTL);
1359 D16(RTC_ISTAT);
1360 D16(RTC_PREN);
1361 D32(RTC_STAT);
1362 D16(RTC_SWCNT);
1363#endif
1364
1365#ifdef SDH_CFG
1366 parent = debugfs_create_dir("sdh", top);
1367 D32(SDH_ARGUMENT);
1368 D16(SDH_CFG);
1369 D16(SDH_CLK_CTL);
1370 D16(SDH_COMMAND);
1371 D_RO(SDH_DATA_CNT, 16);
1372 D16(SDH_DATA_CTL);
1373 D16(SDH_DATA_LGTH);
1374 D32(SDH_DATA_TIMER);
1375 D16(SDH_E_MASK);
1376 D16(SDH_E_STATUS);
1377 D32(SDH_FIFO);
1378 D_RO(SDH_FIFO_CNT, 16);
1379 D32(SDH_MASK0);
1380 D32(SDH_MASK1);
1381 D_RO(SDH_PID0, 16);
1382 D_RO(SDH_PID1, 16);
1383 D_RO(SDH_PID2, 16);
1384 D_RO(SDH_PID3, 16);
1385 D_RO(SDH_PID4, 16);
1386 D_RO(SDH_PID5, 16);
1387 D_RO(SDH_PID6, 16);
1388 D_RO(SDH_PID7, 16);
1389 D16(SDH_PWR_CTL);
1390 D16(SDH_RD_WAIT_EN);
1391 D_RO(SDH_RESPONSE0, 32);
1392 D_RO(SDH_RESPONSE1, 32);
1393 D_RO(SDH_RESPONSE2, 32);
1394 D_RO(SDH_RESPONSE3, 32);
1395 D_RO(SDH_RESP_CMD, 16);
1396 D_RO(SDH_STATUS, 32);
1397 D_WO(SDH_STATUS_CLR, 16);
1398#endif
1399
1400#ifdef SECURE_CONTROL
1401 parent = debugfs_create_dir("security", top);
1402 D16(SECURE_CONTROL);
1403 D16(SECURE_STATUS);
1404 D32(SECURE_SYSSWT);
1405#endif
1406
1407 parent = debugfs_create_dir("sic", top);
1408 D16(SWRST);
1409 D16(SYSCR);
1410 D16(SIC_RVECT);
1411 D32(SIC_IAR0);
1412 D32(SIC_IAR1);
1413 D32(SIC_IAR2);
1414#ifdef SIC_IAR3
1415 D32(SIC_IAR3);
1416#endif
1417#ifdef SIC_IAR4
1418 D32(SIC_IAR4);
1419 D32(SIC_IAR5);
1420 D32(SIC_IAR6);
1421#endif
1422#ifdef SIC_IAR7
1423 D32(SIC_IAR7);
1424#endif
1425#ifdef SIC_IAR8
1426 D32(SIC_IAR8);
1427 D32(SIC_IAR9);
1428 D32(SIC_IAR10);
1429 D32(SIC_IAR11);
1430#endif
1431#ifdef SIC_IMASK
1432 D32(SIC_IMASK);
1433 D32(SIC_ISR);
1434 D32(SIC_IWR);
1435#endif
1436#ifdef SIC_IMASK0
1437 D32(SIC_IMASK0);
1438 D32(SIC_IMASK1);
1439 D32(SIC_ISR0);
1440 D32(SIC_ISR1);
1441 D32(SIC_IWR0);
1442 D32(SIC_IWR1);
1443#endif
1444#ifdef SIC_IMASK2
1445 D32(SIC_IMASK2);
1446 D32(SIC_ISR2);
1447 D32(SIC_IWR2);
1448#endif
1449#ifdef SICB_RVECT
1450 D16(SICB_SWRST);
1451 D16(SICB_SYSCR);
1452 D16(SICB_RVECT);
1453 D32(SICB_IAR0);
1454 D32(SICB_IAR1);
1455 D32(SICB_IAR2);
1456 D32(SICB_IAR3);
1457 D32(SICB_IAR4);
1458 D32(SICB_IAR5);
1459 D32(SICB_IAR6);
1460 D32(SICB_IAR7);
1461 D32(SICB_IMASK0);
1462 D32(SICB_IMASK1);
1463 D32(SICB_ISR0);
1464 D32(SICB_ISR1);
1465 D32(SICB_IWR0);
1466 D32(SICB_IWR1);
1467#endif
1468
1469 parent = debugfs_create_dir("spi", top);
1470#ifdef SPI0_REGBASE
1471 SPI(0);
1472#endif
1473#ifdef SPI1_REGBASE
1474 SPI(1);
1475#endif
1476#ifdef SPI2_REGBASE
1477 SPI(2);
1478#endif
1479
1480 parent = debugfs_create_dir("sport", top);
1481#ifdef SPORT0_STAT
1482 SPORT(0);
1483#endif
1484#ifdef SPORT1_STAT
1485 SPORT(1);
1486#endif
1487#ifdef SPORT2_STAT
1488 SPORT(2);
1489#endif
1490#ifdef SPORT3_STAT
1491 SPORT(3);
1492#endif
1493
1494#if defined(TWI_CLKDIV) || defined(TWI0_CLKDIV) || defined(TWI1_CLKDIV)
1495 parent = debugfs_create_dir("twi", top);
1496# ifdef TWI_CLKDIV
1497 bfin_debug_mmrs_twi(parent, TWI_CLKDIV, -1);
1498# endif
1499# ifdef TWI0_CLKDIV
1500 TWI(0);
1501# endif
1502# ifdef TWI1_CLKDIV
1503 TWI(1);
1504# endif
1505#endif
1506
1507 parent = debugfs_create_dir("uart", top);
1508#ifdef BFIN_UART_DLL
1509 bfin_debug_mmrs_uart(parent, BFIN_UART_DLL, -1);
1510#endif
1511#ifdef UART0_DLL
1512 UART(0);
1513#endif
1514#ifdef UART1_DLL
1515 UART(1);
1516#endif
1517#ifdef UART2_DLL
1518 UART(2);
1519#endif
1520#ifdef UART3_DLL
1521 UART(3);
1522#endif
1523
1524#ifdef USB_FADDR
1525 parent = debugfs_create_dir("usb", top);
1526 D16(USB_FADDR);
1527 D16(USB_POWER);
1528 D16(USB_INTRTX);
1529 D16(USB_INTRRX);
1530 D16(USB_INTRTXE);
1531 D16(USB_INTRRXE);
1532 D16(USB_INTRUSB);
1533 D16(USB_INTRUSBE);
1534 D16(USB_FRAME);
1535 D16(USB_INDEX);
1536 D16(USB_TESTMODE);
1537 D16(USB_GLOBINTR);
1538 D16(USB_GLOBAL_CTL);
1539 D16(USB_TX_MAX_PACKET);
1540 D16(USB_CSR0);
1541 D16(USB_TXCSR);
1542 D16(USB_RX_MAX_PACKET);
1543 D16(USB_RXCSR);
1544 D16(USB_COUNT0);
1545 D16(USB_RXCOUNT);
1546 D16(USB_TXTYPE);
1547 D16(USB_NAKLIMIT0);
1548 D16(USB_TXINTERVAL);
1549 D16(USB_RXTYPE);
1550 D16(USB_RXINTERVAL);
1551 D16(USB_TXCOUNT);
1552 D16(USB_EP0_FIFO);
1553 D16(USB_EP1_FIFO);
1554 D16(USB_EP2_FIFO);
1555 D16(USB_EP3_FIFO);
1556 D16(USB_EP4_FIFO);
1557 D16(USB_EP5_FIFO);
1558 D16(USB_EP6_FIFO);
1559 D16(USB_EP7_FIFO);
1560 D16(USB_OTG_DEV_CTL);
1561 D16(USB_OTG_VBUS_IRQ);
1562 D16(USB_OTG_VBUS_MASK);
1563 D16(USB_LINKINFO);
1564 D16(USB_VPLEN);
1565 D16(USB_HS_EOF1);
1566 D16(USB_FS_EOF1);
1567 D16(USB_LS_EOF1);
1568 D16(USB_APHY_CNTRL);
1569 D16(USB_APHY_CALIB);
1570 D16(USB_APHY_CNTRL2);
1571 D16(USB_PHY_TEST);
1572 D16(USB_PLLOSC_CTRL);
1573 D16(USB_SRP_CLKDIV);
1574 D16(USB_EP_NI0_TXMAXP);
1575 D16(USB_EP_NI0_TXCSR);
1576 D16(USB_EP_NI0_RXMAXP);
1577 D16(USB_EP_NI0_RXCSR);
1578 D16(USB_EP_NI0_RXCOUNT);
1579 D16(USB_EP_NI0_TXTYPE);
1580 D16(USB_EP_NI0_TXINTERVAL);
1581 D16(USB_EP_NI0_RXTYPE);
1582 D16(USB_EP_NI0_RXINTERVAL);
1583 D16(USB_EP_NI0_TXCOUNT);
1584 D16(USB_EP_NI1_TXMAXP);
1585 D16(USB_EP_NI1_TXCSR);
1586 D16(USB_EP_NI1_RXMAXP);
1587 D16(USB_EP_NI1_RXCSR);
1588 D16(USB_EP_NI1_RXCOUNT);
1589 D16(USB_EP_NI1_TXTYPE);
1590 D16(USB_EP_NI1_TXINTERVAL);
1591 D16(USB_EP_NI1_RXTYPE);
1592 D16(USB_EP_NI1_RXINTERVAL);
1593 D16(USB_EP_NI1_TXCOUNT);
1594 D16(USB_EP_NI2_TXMAXP);
1595 D16(USB_EP_NI2_TXCSR);
1596 D16(USB_EP_NI2_RXMAXP);
1597 D16(USB_EP_NI2_RXCSR);
1598 D16(USB_EP_NI2_RXCOUNT);
1599 D16(USB_EP_NI2_TXTYPE);
1600 D16(USB_EP_NI2_TXINTERVAL);
1601 D16(USB_EP_NI2_RXTYPE);
1602 D16(USB_EP_NI2_RXINTERVAL);
1603 D16(USB_EP_NI2_TXCOUNT);
1604 D16(USB_EP_NI3_TXMAXP);
1605 D16(USB_EP_NI3_TXCSR);
1606 D16(USB_EP_NI3_RXMAXP);
1607 D16(USB_EP_NI3_RXCSR);
1608 D16(USB_EP_NI3_RXCOUNT);
1609 D16(USB_EP_NI3_TXTYPE);
1610 D16(USB_EP_NI3_TXINTERVAL);
1611 D16(USB_EP_NI3_RXTYPE);
1612 D16(USB_EP_NI3_RXINTERVAL);
1613 D16(USB_EP_NI3_TXCOUNT);
1614 D16(USB_EP_NI4_TXMAXP);
1615 D16(USB_EP_NI4_TXCSR);
1616 D16(USB_EP_NI4_RXMAXP);
1617 D16(USB_EP_NI4_RXCSR);
1618 D16(USB_EP_NI4_RXCOUNT);
1619 D16(USB_EP_NI4_TXTYPE);
1620 D16(USB_EP_NI4_TXINTERVAL);
1621 D16(USB_EP_NI4_RXTYPE);
1622 D16(USB_EP_NI4_RXINTERVAL);
1623 D16(USB_EP_NI4_TXCOUNT);
1624 D16(USB_EP_NI5_TXMAXP);
1625 D16(USB_EP_NI5_TXCSR);
1626 D16(USB_EP_NI5_RXMAXP);
1627 D16(USB_EP_NI5_RXCSR);
1628 D16(USB_EP_NI5_RXCOUNT);
1629 D16(USB_EP_NI5_TXTYPE);
1630 D16(USB_EP_NI5_TXINTERVAL);
1631 D16(USB_EP_NI5_RXTYPE);
1632 D16(USB_EP_NI5_RXINTERVAL);
1633 D16(USB_EP_NI5_TXCOUNT);
1634 D16(USB_EP_NI6_TXMAXP);
1635 D16(USB_EP_NI6_TXCSR);
1636 D16(USB_EP_NI6_RXMAXP);
1637 D16(USB_EP_NI6_RXCSR);
1638 D16(USB_EP_NI6_RXCOUNT);
1639 D16(USB_EP_NI6_TXTYPE);
1640 D16(USB_EP_NI6_TXINTERVAL);
1641 D16(USB_EP_NI6_RXTYPE);
1642 D16(USB_EP_NI6_RXINTERVAL);
1643 D16(USB_EP_NI6_TXCOUNT);
1644 D16(USB_EP_NI7_TXMAXP);
1645 D16(USB_EP_NI7_TXCSR);
1646 D16(USB_EP_NI7_RXMAXP);
1647 D16(USB_EP_NI7_RXCSR);
1648 D16(USB_EP_NI7_RXCOUNT);
1649 D16(USB_EP_NI7_TXTYPE);
1650 D16(USB_EP_NI7_TXINTERVAL);
1651 D16(USB_EP_NI7_RXTYPE);
1652 D16(USB_EP_NI7_RXINTERVAL);
1653 D16(USB_EP_NI7_TXCOUNT);
1654 D16(USB_DMA_INTERRUPT);
1655 D16(USB_DMA0CONTROL);
1656 D16(USB_DMA0ADDRLOW);
1657 D16(USB_DMA0ADDRHIGH);
1658 D16(USB_DMA0COUNTLOW);
1659 D16(USB_DMA0COUNTHIGH);
1660 D16(USB_DMA1CONTROL);
1661 D16(USB_DMA1ADDRLOW);
1662 D16(USB_DMA1ADDRHIGH);
1663 D16(USB_DMA1COUNTLOW);
1664 D16(USB_DMA1COUNTHIGH);
1665 D16(USB_DMA2CONTROL);
1666 D16(USB_DMA2ADDRLOW);
1667 D16(USB_DMA2ADDRHIGH);
1668 D16(USB_DMA2COUNTLOW);
1669 D16(USB_DMA2COUNTHIGH);
1670 D16(USB_DMA3CONTROL);
1671 D16(USB_DMA3ADDRLOW);
1672 D16(USB_DMA3ADDRHIGH);
1673 D16(USB_DMA3COUNTLOW);
1674 D16(USB_DMA3COUNTHIGH);
1675 D16(USB_DMA4CONTROL);
1676 D16(USB_DMA4ADDRLOW);
1677 D16(USB_DMA4ADDRHIGH);
1678 D16(USB_DMA4COUNTLOW);
1679 D16(USB_DMA4COUNTHIGH);
1680 D16(USB_DMA5CONTROL);
1681 D16(USB_DMA5ADDRLOW);
1682 D16(USB_DMA5ADDRHIGH);
1683 D16(USB_DMA5COUNTLOW);
1684 D16(USB_DMA5COUNTHIGH);
1685 D16(USB_DMA6CONTROL);
1686 D16(USB_DMA6ADDRLOW);
1687 D16(USB_DMA6ADDRHIGH);
1688 D16(USB_DMA6COUNTLOW);
1689 D16(USB_DMA6COUNTHIGH);
1690 D16(USB_DMA7CONTROL);
1691 D16(USB_DMA7ADDRLOW);
1692 D16(USB_DMA7ADDRHIGH);
1693 D16(USB_DMA7COUNTLOW);
1694 D16(USB_DMA7COUNTHIGH);
1695#endif
1696
1697#ifdef WDOG_CNT
1698 parent = debugfs_create_dir("watchdog", top);
1699 D32(WDOG_CNT);
1700 D16(WDOG_CTL);
1701 D32(WDOG_STAT);
1702#endif
1703#ifdef WDOGA_CNT
1704 parent = debugfs_create_dir("watchdog", top);
1705 D32(WDOGA_CNT);
1706 D16(WDOGA_CTL);
1707 D32(WDOGA_STAT);
1708 D32(WDOGB_CNT);
1709 D16(WDOGB_CTL);
1710 D32(WDOGB_STAT);
1711#endif
1712
1713 /* BF533 glue */
1714#ifdef FIO_FLAG_D
1715#define PORTFIO FIO_FLAG_D
1716#endif
1717 /* BF561 glue */
1718#ifdef FIO0_FLAG_D
1719#define PORTFIO FIO0_FLAG_D
1720#endif
1721#ifdef FIO1_FLAG_D
1722#define PORTGIO FIO1_FLAG_D
1723#endif
1724#ifdef FIO2_FLAG_D
1725#define PORTHIO FIO2_FLAG_D
1726#endif
1727 parent = debugfs_create_dir("port", top);
1728#ifdef PORTFIO
1729 PORT(PORTFIO, 'F');
1730#endif
1731#ifdef PORTGIO
1732 PORT(PORTGIO, 'G');
1733#endif
1734#ifdef PORTHIO
1735 PORT(PORTHIO, 'H');
1736#endif
1737
1738#ifdef __ADSPBF51x__
1739 D16(PORTF_FER);
1740 D16(PORTF_DRIVE);
1741 D16(PORTF_HYSTERESIS);
1742 D16(PORTF_MUX);
1743
1744 D16(PORTG_FER);
1745 D16(PORTG_DRIVE);
1746 D16(PORTG_HYSTERESIS);
1747 D16(PORTG_MUX);
1748
1749 D16(PORTH_FER);
1750 D16(PORTH_DRIVE);
1751 D16(PORTH_HYSTERESIS);
1752 D16(PORTH_MUX);
1753
1754 D16(MISCPORT_DRIVE);
1755 D16(MISCPORT_HYSTERESIS);
1756#endif /* BF51x */
1757
1758#ifdef __ADSPBF52x__
1759 D16(PORTF_FER);
1760 D16(PORTF_DRIVE);
1761 D16(PORTF_HYSTERESIS);
1762 D16(PORTF_MUX);
1763 D16(PORTF_SLEW);
1764
1765 D16(PORTG_FER);
1766 D16(PORTG_DRIVE);
1767 D16(PORTG_HYSTERESIS);
1768 D16(PORTG_MUX);
1769 D16(PORTG_SLEW);
1770
1771 D16(PORTH_FER);
1772 D16(PORTH_DRIVE);
1773 D16(PORTH_HYSTERESIS);
1774 D16(PORTH_MUX);
1775 D16(PORTH_SLEW);
1776
1777 D16(MISCPORT_DRIVE);
1778 D16(MISCPORT_HYSTERESIS);
1779 D16(MISCPORT_SLEW);
1780#endif /* BF52x */
1781
1782#ifdef BF537_FAMILY
1783 D16(PORTF_FER);
1784 D16(PORTG_FER);
1785 D16(PORTH_FER);
1786 D16(PORT_MUX);
1787#endif /* BF534 BF536 BF537 */
1788
1789#ifdef BF538_FAMILY
1790 D16(PORTCIO_FER);
1791 D16(PORTCIO);
1792 D16(PORTCIO_CLEAR);
1793 D16(PORTCIO_SET);
1794 D16(PORTCIO_TOGGLE);
1795 D16(PORTCIO_DIR);
1796 D16(PORTCIO_INEN);
1797
1798 D16(PORTDIO);
1799 D16(PORTDIO_CLEAR);
1800 D16(PORTDIO_DIR);
1801 D16(PORTDIO_FER);
1802 D16(PORTDIO_INEN);
1803 D16(PORTDIO_SET);
1804 D16(PORTDIO_TOGGLE);
1805
1806 D16(PORTEIO);
1807 D16(PORTEIO_CLEAR);
1808 D16(PORTEIO_DIR);
1809 D16(PORTEIO_FER);
1810 D16(PORTEIO_INEN);
1811 D16(PORTEIO_SET);
1812 D16(PORTEIO_TOGGLE);
1813#endif /* BF538 BF539 */
1814
1815#ifdef __ADSPBF54x__
1816 {
1817 int num;
1818 unsigned long base;
1819 char *_buf, buf[32];
1820
1821 base = PORTA_FER;
1822 for (num = 0; num < 10; ++num) {
1823 PORT(base, num);
1824 base += sizeof(struct bfin_gpio_regs);
1825 }
1826
1827#define __PINT(uname, lname) __REGS(pint, #uname, lname)
1828 parent = debugfs_create_dir("pint", top);
1829 base = PINT0_MASK_SET;
1830 for (num = 0; num < 4; ++num) {
1831 _buf = REGS_STR_PFX(buf, PINT, num);
1832 __PINT(MASK_SET, mask_set);
1833 __PINT(MASK_CLEAR, mask_clear);
1834 __PINT(IRQ, irq);
1835 __PINT(ASSIGN, assign);
1836 __PINT(EDGE_SET, edge_set);
1837 __PINT(EDGE_CLEAR, edge_clear);
1838 __PINT(INVERT_SET, invert_set);
1839 __PINT(INVERT_CLEAR, invert_clear);
1840 __PINT(PINSTATE, pinstate);
1841 __PINT(LATCH, latch);
1842 base += sizeof(struct bfin_pint_regs);
1843 }
1844
1845 }
1846#endif /* BF54x */
1847
1848 debug_mmrs_dentry = top;
1849
1850 return 0;
1851}
1852module_init(bfin_debug_mmrs_init);
1853
1854static void __exit bfin_debug_mmrs_exit(void)
1855{
1856 debugfs_remove_recursive(debug_mmrs_dentry);
1857}
1858module_exit(bfin_debug_mmrs_exit);
1859
1860MODULE_LICENSE("GPL");
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index f37019c847c9..486426f8a0d7 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -33,6 +33,7 @@
33#include <linux/io.h> 33#include <linux/io.h>
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/irq_handler.h>
36 37
37DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); 38DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
38 39
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 1696d34f51c2..ff3d747154ac 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -11,6 +11,7 @@
11#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <asm/irq_handler.h>
14#include <asm/trace.h> 15#include <asm/trace.h>
15#include <asm/pda.h> 16#include <asm/pda.h>
16 17
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
index 401eb1d8e3b4..679d0db35256 100644
--- a/arch/blackfin/kernel/nmi.c
+++ b/arch/blackfin/kernel/nmi.c
@@ -145,16 +145,16 @@ int check_nmi_wdt_touched(void)
145{ 145{
146 unsigned int this_cpu = smp_processor_id(); 146 unsigned int this_cpu = smp_processor_id();
147 unsigned int cpu; 147 unsigned int cpu;
148 cpumask_t mask;
148 149
149 cpumask_t mask = cpu_online_map; 150 cpumask_copy(&mask, cpu_online_mask);
150
151 if (!atomic_read(&nmi_touched[this_cpu])) 151 if (!atomic_read(&nmi_touched[this_cpu]))
152 return 0; 152 return 0;
153 153
154 atomic_set(&nmi_touched[this_cpu], 0); 154 atomic_set(&nmi_touched[this_cpu], 0);
155 155
156 cpu_clear(this_cpu, mask); 156 cpumask_clear_cpu(this_cpu, &mask);
157 for_each_cpu_mask(cpu, mask) { 157 for_each_cpu(cpu, &mask) {
158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]), 158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
159 (unsigned long)(&nmi_touched[cpu])); 159 (unsigned long)(&nmi_touched[cpu]));
160 if (!atomic_read(&nmi_touched[cpu])) 160 if (!atomic_read(&nmi_touched[cpu]))
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
new file mode 100644
index 000000000000..04300f29c0e7
--- /dev/null
+++ b/arch/blackfin/kernel/perf_event.c
@@ -0,0 +1,498 @@
1/*
2 * Blackfin performance counters
3 *
4 * Copyright 2011 Analog Devices Inc.
5 *
6 * Ripped from SuperH version:
7 *
8 * Copyright (C) 2009 Paul Mundt
9 *
10 * Heavily based on the x86 and PowerPC implementations.
11 *
12 * x86:
13 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
15 * Copyright (C) 2009 Jaswinder Singh Rajput
16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
19 *
20 * ppc:
21 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
22 *
23 * Licensed under the GPL-2 or later.
24 */
25
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/perf_event.h>
29#include <asm/bfin_pfmon.h>
30
31/*
32 * We have two counters, and each counter can support an event type.
33 * The 'o' is PFCNTx=1 and 's' is PFCNTx=0
34 *
35 * 0x04 o pc invariant branches
36 * 0x06 o mispredicted branches
37 * 0x09 o predicted branches taken
38 * 0x0B o EXCPT insn
39 * 0x0C o CSYNC/SSYNC insn
40 * 0x0D o Insns committed
41 * 0x0E o Interrupts taken
42 * 0x0F o Misaligned address exceptions
43 * 0x80 o Code memory fetches stalled due to DMA
44 * 0x83 o 64bit insn fetches delivered
45 * 0x9A o data cache fills (bank a)
46 * 0x9B o data cache fills (bank b)
47 * 0x9C o data cache lines evicted (bank a)
48 * 0x9D o data cache lines evicted (bank b)
49 * 0x9E o data cache high priority fills
50 * 0x9F o data cache low priority fills
51 * 0x00 s loop 0 iterations
52 * 0x01 s loop 1 iterations
53 * 0x0A s CSYNC/SSYNC stalls
54 * 0x10 s DAG read/after write hazards
55 * 0x13 s RAW data hazards
56 * 0x81 s code TAG stalls
57 * 0x82 s code fill stalls
58 * 0x90 s processor to memory stalls
59 * 0x91 s data memory stalls not hidden by 0x90
60 * 0x92 s data store buffer full stalls
61 * 0x93 s data memory write buffer full stalls due to high->low priority
62 * 0x95 s data memory fill buffer stalls
63 * 0x96 s data TAG collision stalls
64 * 0x97 s data collision stalls
65 * 0x98 s data stalls
66 * 0x99 s data stalls sent to processor
67 */
68
69static const int event_map[] = {
70 /* use CYCLES cpu register */
71 [PERF_COUNT_HW_CPU_CYCLES] = -1,
72 [PERF_COUNT_HW_INSTRUCTIONS] = 0x0D,
73 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
74 [PERF_COUNT_HW_CACHE_MISSES] = 0x83,
75 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x09,
76 [PERF_COUNT_HW_BRANCH_MISSES] = 0x06,
77 [PERF_COUNT_HW_BUS_CYCLES] = -1,
78};
79
80#define C(x) PERF_COUNT_HW_CACHE_##x
81
82static const int cache_events[PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
85{
86 [C(L1D)] = { /* Data bank A */
87 [C(OP_READ)] = {
88 [C(RESULT_ACCESS)] = 0,
89 [C(RESULT_MISS) ] = 0x9A,
90 },
91 [C(OP_WRITE)] = {
92 [C(RESULT_ACCESS)] = 0,
93 [C(RESULT_MISS) ] = 0,
94 },
95 [C(OP_PREFETCH)] = {
96 [C(RESULT_ACCESS)] = 0,
97 [C(RESULT_MISS) ] = 0,
98 },
99 },
100
101 [C(L1I)] = {
102 [C(OP_READ)] = {
103 [C(RESULT_ACCESS)] = 0,
104 [C(RESULT_MISS) ] = 0x83,
105 },
106 [C(OP_WRITE)] = {
107 [C(RESULT_ACCESS)] = -1,
108 [C(RESULT_MISS) ] = -1,
109 },
110 [C(OP_PREFETCH)] = {
111 [C(RESULT_ACCESS)] = 0,
112 [C(RESULT_MISS) ] = 0,
113 },
114 },
115
116 [C(LL)] = {
117 [C(OP_READ)] = {
118 [C(RESULT_ACCESS)] = -1,
119 [C(RESULT_MISS) ] = -1,
120 },
121 [C(OP_WRITE)] = {
122 [C(RESULT_ACCESS)] = -1,
123 [C(RESULT_MISS) ] = -1,
124 },
125 [C(OP_PREFETCH)] = {
126 [C(RESULT_ACCESS)] = -1,
127 [C(RESULT_MISS) ] = -1,
128 },
129 },
130
131 [C(DTLB)] = {
132 [C(OP_READ)] = {
133 [C(RESULT_ACCESS)] = -1,
134 [C(RESULT_MISS) ] = -1,
135 },
136 [C(OP_WRITE)] = {
137 [C(RESULT_ACCESS)] = -1,
138 [C(RESULT_MISS) ] = -1,
139 },
140 [C(OP_PREFETCH)] = {
141 [C(RESULT_ACCESS)] = -1,
142 [C(RESULT_MISS) ] = -1,
143 },
144 },
145
146 [C(ITLB)] = {
147 [C(OP_READ)] = {
148 [C(RESULT_ACCESS)] = -1,
149 [C(RESULT_MISS) ] = -1,
150 },
151 [C(OP_WRITE)] = {
152 [C(RESULT_ACCESS)] = -1,
153 [C(RESULT_MISS) ] = -1,
154 },
155 [C(OP_PREFETCH)] = {
156 [C(RESULT_ACCESS)] = -1,
157 [C(RESULT_MISS) ] = -1,
158 },
159 },
160
161 [C(BPU)] = {
162 [C(OP_READ)] = {
163 [C(RESULT_ACCESS)] = -1,
164 [C(RESULT_MISS) ] = -1,
165 },
166 [C(OP_WRITE)] = {
167 [C(RESULT_ACCESS)] = -1,
168 [C(RESULT_MISS) ] = -1,
169 },
170 [C(OP_PREFETCH)] = {
171 [C(RESULT_ACCESS)] = -1,
172 [C(RESULT_MISS) ] = -1,
173 },
174 },
175};
176
177const char *perf_pmu_name(void)
178{
179 return "bfin";
180}
181EXPORT_SYMBOL(perf_pmu_name);
182
183int perf_num_counters(void)
184{
185 return ARRAY_SIZE(event_map);
186}
187EXPORT_SYMBOL(perf_num_counters);
188
189static u64 bfin_pfmon_read(int idx)
190{
191 return bfin_read32(PFCNTR0 + (idx * 4));
192}
193
194static void bfin_pfmon_disable(struct hw_perf_event *hwc, int idx)
195{
196 bfin_write_PFCTL(bfin_read_PFCTL() & ~PFCEN(idx, PFCEN_MASK));
197}
198
199static void bfin_pfmon_enable(struct hw_perf_event *hwc, int idx)
200{
201 u32 val, mask;
202
203 val = PFPWR;
204 if (idx) {
205 mask = ~(PFCNT1 | PFMON1 | PFCEN1 | PEMUSW1);
206 /* The packed config is for event0, so shift it to event1 slots */
207 val |= (hwc->config << (PFMON1_P - PFMON0_P));
208 val |= (hwc->config & PFCNT0) << (PFCNT1_P - PFCNT0_P);
209 bfin_write_PFCNTR1(0);
210 } else {
211 mask = ~(PFCNT0 | PFMON0 | PFCEN0 | PEMUSW0);
212 val |= hwc->config;
213 bfin_write_PFCNTR0(0);
214 }
215
216 bfin_write_PFCTL((bfin_read_PFCTL() & mask) | val);
217}
218
219static void bfin_pfmon_disable_all(void)
220{
221 bfin_write_PFCTL(bfin_read_PFCTL() & ~PFPWR);
222}
223
224static void bfin_pfmon_enable_all(void)
225{
226 bfin_write_PFCTL(bfin_read_PFCTL() | PFPWR);
227}
228
229struct cpu_hw_events {
230 struct perf_event *events[MAX_HWEVENTS];
231 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
232};
233DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
234
235static int hw_perf_cache_event(int config, int *evp)
236{
237 unsigned long type, op, result;
238 int ev;
239
240 /* unpack config */
241 type = config & 0xff;
242 op = (config >> 8) & 0xff;
243 result = (config >> 16) & 0xff;
244
245 if (type >= PERF_COUNT_HW_CACHE_MAX ||
246 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
247 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
248 return -EINVAL;
249
250 ev = cache_events[type][op][result];
251 if (ev == 0)
252 return -EOPNOTSUPP;
253 if (ev == -1)
254 return -EINVAL;
255 *evp = ev;
256 return 0;
257}
258
259static void bfin_perf_event_update(struct perf_event *event,
260 struct hw_perf_event *hwc, int idx)
261{
262 u64 prev_raw_count, new_raw_count;
263 s64 delta;
264 int shift = 0;
265
266 /*
267 * Depending on the counter configuration, they may or may not
268 * be chained, in which case the previous counter value can be
269 * updated underneath us if the lower-half overflows.
270 *
271 * Our tactic to handle this is to first atomically read and
272 * exchange a new raw count - then add that new-prev delta
273 * count to the generic counter atomically.
274 *
275 * As there is no interrupt associated with the overflow events,
276 * this is the simplest approach for maintaining consistency.
277 */
278again:
279 prev_raw_count = local64_read(&hwc->prev_count);
280 new_raw_count = bfin_pfmon_read(idx);
281
282 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
283 new_raw_count) != prev_raw_count)
284 goto again;
285
286 /*
287 * Now we have the new raw value and have updated the prev
288 * timestamp already. We can now calculate the elapsed delta
289 * (counter-)time and add that to the generic counter.
290 *
291 * Careful, not all hw sign-extends above the physical width
292 * of the count.
293 */
294 delta = (new_raw_count << shift) - (prev_raw_count << shift);
295 delta >>= shift;
296
297 local64_add(delta, &event->count);
298}
299
300static void bfin_pmu_stop(struct perf_event *event, int flags)
301{
302 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
303 struct hw_perf_event *hwc = &event->hw;
304 int idx = hwc->idx;
305
306 if (!(event->hw.state & PERF_HES_STOPPED)) {
307 bfin_pfmon_disable(hwc, idx);
308 cpuc->events[idx] = NULL;
309 event->hw.state |= PERF_HES_STOPPED;
310 }
311
312 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
313 bfin_perf_event_update(event, &event->hw, idx);
314 event->hw.state |= PERF_HES_UPTODATE;
315 }
316}
317
318static void bfin_pmu_start(struct perf_event *event, int flags)
319{
320 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
321 struct hw_perf_event *hwc = &event->hw;
322 int idx = hwc->idx;
323
324 if (WARN_ON_ONCE(idx == -1))
325 return;
326
327 if (flags & PERF_EF_RELOAD)
328 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
329
330 cpuc->events[idx] = event;
331 event->hw.state = 0;
332 bfin_pfmon_enable(hwc, idx);
333}
334
335static void bfin_pmu_del(struct perf_event *event, int flags)
336{
337 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
338
339 bfin_pmu_stop(event, PERF_EF_UPDATE);
340 __clear_bit(event->hw.idx, cpuc->used_mask);
341
342 perf_event_update_userpage(event);
343}
344
345static int bfin_pmu_add(struct perf_event *event, int flags)
346{
347 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
348 struct hw_perf_event *hwc = &event->hw;
349 int idx = hwc->idx;
350 int ret = -EAGAIN;
351
352 perf_pmu_disable(event->pmu);
353
354 if (__test_and_set_bit(idx, cpuc->used_mask)) {
355 idx = find_first_zero_bit(cpuc->used_mask, MAX_HWEVENTS);
356 if (idx == MAX_HWEVENTS)
357 goto out;
358
359 __set_bit(idx, cpuc->used_mask);
360 hwc->idx = idx;
361 }
362
363 bfin_pfmon_disable(hwc, idx);
364
365 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
366 if (flags & PERF_EF_START)
367 bfin_pmu_start(event, PERF_EF_RELOAD);
368
369 perf_event_update_userpage(event);
370 ret = 0;
371out:
372 perf_pmu_enable(event->pmu);
373 return ret;
374}
375
376static void bfin_pmu_read(struct perf_event *event)
377{
378 bfin_perf_event_update(event, &event->hw, event->hw.idx);
379}
380
381static int bfin_pmu_event_init(struct perf_event *event)
382{
383 struct perf_event_attr *attr = &event->attr;
384 struct hw_perf_event *hwc = &event->hw;
385 int config = -1;
386 int ret;
387
388 if (attr->exclude_hv || attr->exclude_idle)
389 return -EPERM;
390
391 /*
392 * All of the on-chip counters are "limited", in that they have
393 * no interrupts, and are therefore unable to do sampling without
394 * further work and timer assistance.
395 */
396 if (hwc->sample_period)
397 return -EINVAL;
398
399 ret = 0;
400 switch (attr->type) {
401 case PERF_TYPE_RAW:
402 config = PFMON(0, attr->config & PFMON_MASK) |
403 PFCNT(0, !(attr->config & 0x100));
404 break;
405 case PERF_TYPE_HW_CACHE:
406 ret = hw_perf_cache_event(attr->config, &config);
407 break;
408 case PERF_TYPE_HARDWARE:
409 if (attr->config >= ARRAY_SIZE(event_map))
410 return -EINVAL;
411
412 config = event_map[attr->config];
413 break;
414 }
415
416 if (config == -1)
417 return -EINVAL;
418
419 if (!attr->exclude_kernel)
420 config |= PFCEN(0, PFCEN_ENABLE_SUPV);
421 if (!attr->exclude_user)
422 config |= PFCEN(0, PFCEN_ENABLE_USER);
423
424 hwc->config |= config;
425
426 return ret;
427}
428
429static void bfin_pmu_enable(struct pmu *pmu)
430{
431 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
432 struct perf_event *event;
433 struct hw_perf_event *hwc;
434 int i;
435
436 for (i = 0; i < MAX_HWEVENTS; ++i) {
437 event = cpuc->events[i];
438 if (!event)
439 continue;
440 hwc = &event->hw;
441 bfin_pfmon_enable(hwc, hwc->idx);
442 }
443
444 bfin_pfmon_enable_all();
445}
446
447static void bfin_pmu_disable(struct pmu *pmu)
448{
449 bfin_pfmon_disable_all();
450}
451
452static struct pmu pmu = {
453 .pmu_enable = bfin_pmu_enable,
454 .pmu_disable = bfin_pmu_disable,
455 .event_init = bfin_pmu_event_init,
456 .add = bfin_pmu_add,
457 .del = bfin_pmu_del,
458 .start = bfin_pmu_start,
459 .stop = bfin_pmu_stop,
460 .read = bfin_pmu_read,
461};
462
463static void bfin_pmu_setup(int cpu)
464{
465 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
466
467 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
468}
469
470static int __cpuinit
471bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
472{
473 unsigned int cpu = (long)hcpu;
474
475 switch (action & ~CPU_TASKS_FROZEN) {
476 case CPU_UP_PREPARE:
477 bfin_write_PFCTL(0);
478 bfin_pmu_setup(cpu);
479 break;
480
481 default:
482 break;
483 }
484
485 return NOTIFY_OK;
486}
487
488static int __init bfin_pmu_init(void)
489{
490 int ret;
491
492 ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
493 if (!ret)
494 perf_cpu_notifier(bfin_pmu_notifier);
495
496 return ret;
497}
498early_initcall(bfin_pmu_init);
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index b407bc8ad918..6a660fa921b5 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -171,10 +171,8 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
171 unsigned long newsp; 171 unsigned long newsp;
172 172
173#ifdef __ARCH_SYNC_CORE_DCACHE 173#ifdef __ARCH_SYNC_CORE_DCACHE
174 if (current->rt.nr_cpus_allowed == num_possible_cpus()) { 174 if (current->rt.nr_cpus_allowed == num_possible_cpus())
175 current->cpus_allowed = cpumask_of_cpu(smp_processor_id()); 175 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
176 current->rt.nr_cpus_allowed = 1;
177 }
178#endif 176#endif
179 177
180 /* syscall2 puts clone_flags in r0 and usp in r1 */ 178 /* syscall2 puts clone_flags in r0 and usp in r1 */
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index 53d08dee8531..488bdc51aaa5 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -23,6 +23,9 @@
23__attribute__ ((__l1_text__, __noreturn__)) 23__attribute__ ((__l1_text__, __noreturn__))
24static void bfin_reset(void) 24static void bfin_reset(void)
25{ 25{
26 if (!ANOMALY_05000353 && !ANOMALY_05000386)
27 bfrom_SoftReset((void *)(L1_SCRATCH_START + L1_SCRATCH_LENGTH - 20));
28
26 /* Wait for completion of "system" events such as cache line 29 /* Wait for completion of "system" events such as cache line
27 * line fills so that we avoid infinite stalls later on as 30 * line fills so that we avoid infinite stalls later on as
28 * much as possible. This code is in L1, so it won't trigger 31 * much as possible. This code is in L1, so it won't trigger
@@ -30,46 +33,40 @@ static void bfin_reset(void)
30 */ 33 */
31 __builtin_bfin_ssync(); 34 __builtin_bfin_ssync();
32 35
33 /* The bootrom checks to see how it was reset and will 36 /* Initiate System software reset. */
34 * automatically perform a software reset for us when 37 bfin_write_SWRST(0x7);
35 * it starts executing after the core reset.
36 */
37 if (ANOMALY_05000353 || ANOMALY_05000386) {
38 /* Initiate System software reset. */
39 bfin_write_SWRST(0x7);
40 38
41 /* Due to the way reset is handled in the hardware, we need 39 /* Due to the way reset is handled in the hardware, we need
42 * to delay for 10 SCLKS. The only reliable way to do this is 40 * to delay for 10 SCLKS. The only reliable way to do this is
43 * to calculate the CCLK/SCLK ratio and multiply 10. For now, 41 * to calculate the CCLK/SCLK ratio and multiply 10. For now,
44 * we'll assume worse case which is a 1:15 ratio. 42 * we'll assume worse case which is a 1:15 ratio.
45 */ 43 */
46 asm( 44 asm(
47 "LSETUP (1f, 1f) LC0 = %0\n" 45 "LSETUP (1f, 1f) LC0 = %0\n"
48 "1: nop;" 46 "1: nop;"
49 : 47 :
50 : "a" (15 * 10) 48 : "a" (15 * 10)
51 : "LC0", "LB0", "LT0" 49 : "LC0", "LB0", "LT0"
52 ); 50 );
53 51
54 /* Clear System software reset */ 52 /* Clear System software reset */
55 bfin_write_SWRST(0); 53 bfin_write_SWRST(0);
56 54
57 /* The BF526 ROM will crash during reset */ 55 /* The BF526 ROM will crash during reset */
58#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__) 56#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__)
59 bfin_read_SWRST(); 57 bfin_read_SWRST();
60#endif 58#endif
61 59
62 /* Wait for the SWRST write to complete. Cannot rely on SSYNC 60 /* Wait for the SWRST write to complete. Cannot rely on SSYNC
63 * though as the System state is all reset now. 61 * though as the System state is all reset now.
64 */ 62 */
65 asm( 63 asm(
66 "LSETUP (1f, 1f) LC1 = %0\n" 64 "LSETUP (1f, 1f) LC1 = %0\n"
67 "1: nop;" 65 "1: nop;"
68 : 66 :
69 : "a" (15 * 1) 67 : "a" (15 * 1)
70 : "LC1", "LB1", "LT1" 68 : "LC1", "LB1", "LT1"
71 ); 69 );
72 }
73 70
74 while (1) 71 while (1)
75 /* Issue core reset */ 72 /* Issue core reset */
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 805c6132c779..536bd9d7e0cf 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <asm/cpu.h> 29#include <asm/cpu.h>
30#include <asm/fixed_code.h> 30#include <asm/fixed_code.h>
31#include <asm/early_printk.h> 31#include <asm/early_printk.h>
32#include <asm/irq_handler.h>
32 33
33u16 _bfin_swrst; 34u16 _bfin_swrst;
34EXPORT_SYMBOL(_bfin_swrst); 35EXPORT_SYMBOL(_bfin_swrst);
@@ -105,6 +106,8 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
105 bfin_dcache_init(dcplb_tbl[cpu]); 106 bfin_dcache_init(dcplb_tbl[cpu]);
106#endif 107#endif
107 108
109 bfin_setup_cpudata(cpu);
110
108 /* 111 /*
109 * In cache coherence emulation mode, we need to have the 112 * In cache coherence emulation mode, we need to have the
110 * D-cache enabled before running any atomic operation which 113 * D-cache enabled before running any atomic operation which
@@ -163,7 +166,6 @@ void __cpuinit bfin_setup_cpudata(unsigned int cpu)
163{ 166{
164 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); 167 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
165 168
166 cpudata->idle = current;
167 cpudata->imemctl = bfin_read_IMEM_CONTROL(); 169 cpudata->imemctl = bfin_read_IMEM_CONTROL();
168 cpudata->dmemctl = bfin_read_DMEM_CONTROL(); 170 cpudata->dmemctl = bfin_read_DMEM_CONTROL();
169} 171}
@@ -851,6 +853,7 @@ void __init native_machine_early_platform_add_devices(void)
851 853
852void __init setup_arch(char **cmdline_p) 854void __init setup_arch(char **cmdline_p)
853{ 855{
856 u32 mmr;
854 unsigned long sclk, cclk; 857 unsigned long sclk, cclk;
855 858
856 native_machine_early_platform_add_devices(); 859 native_machine_early_platform_add_devices();
@@ -902,10 +905,10 @@ void __init setup_arch(char **cmdline_p)
902 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL); 905 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
903#endif 906#endif
904#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL 907#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
905 bfin_write_PORTF_HYSTERISIS(HYST_PORTF_0_15); 908 bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
906 bfin_write_PORTG_HYSTERISIS(HYST_PORTG_0_15); 909 bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
907 bfin_write_PORTH_HYSTERISIS(HYST_PORTH_0_15); 910 bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
908 bfin_write_MISCPORT_HYSTERISIS((bfin_read_MISCPORT_HYSTERISIS() & 911 bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
909 ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO); 912 ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
910#endif 913#endif
911 914
@@ -921,17 +924,14 @@ void __init setup_arch(char **cmdline_p)
921 bfin_read_IMDMA_D1_IRQ_STATUS(); 924 bfin_read_IMDMA_D1_IRQ_STATUS();
922 } 925 }
923#endif 926#endif
924 printk(KERN_INFO "Hardware Trace ");
925 if (bfin_read_TBUFCTL() & 0x1)
926 printk(KERN_CONT "Active ");
927 else
928 printk(KERN_CONT "Off ");
929 if (bfin_read_TBUFCTL() & 0x2)
930 printk(KERN_CONT "and Enabled\n");
931 else
932 printk(KERN_CONT "and Disabled\n");
933 927
934 printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF); 928 mmr = bfin_read_TBUFCTL();
929 printk(KERN_INFO "Hardware Trace %s and %sabled\n",
930 (mmr & 0x1) ? "active" : "off",
931 (mmr & 0x2) ? "en" : "dis");
932
933 mmr = bfin_read_SYSCR();
934 printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
935 935
936 /* Newer parts mirror SWRST bits in SYSCR */ 936 /* Newer parts mirror SWRST bits in SYSCR */
937#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \ 937#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
@@ -939,7 +939,7 @@ void __init setup_arch(char **cmdline_p)
939 _bfin_swrst = bfin_read_SWRST(); 939 _bfin_swrst = bfin_read_SWRST();
940#else 940#else
941 /* Clear boot mode field */ 941 /* Clear boot mode field */
942 _bfin_swrst = bfin_read_SYSCR() & ~0xf; 942 _bfin_swrst = mmr & ~0xf;
943#endif 943#endif
944 944
945#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 945#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -1036,8 +1036,6 @@ void __init setup_arch(char **cmdline_p)
1036static int __init topology_init(void) 1036static int __init topology_init(void)
1037{ 1037{
1038 unsigned int cpu; 1038 unsigned int cpu;
1039 /* Record CPU-private information for the boot processor. */
1040 bfin_setup_cpudata(0);
1041 1039
1042 for_each_possible_cpu(cpu) { 1040 for_each_possible_cpu(cpu) {
1043 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); 1041 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
@@ -1283,12 +1281,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1283 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1281 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1284 BFIN_DLINES); 1282 BFIN_DLINES);
1285#ifdef __ARCH_SYNC_CORE_DCACHE 1283#ifdef __ARCH_SYNC_CORE_DCACHE
1286 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]); 1284 seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1287#endif 1285#endif
1288#ifdef __ARCH_SYNC_CORE_ICACHE 1286#ifdef __ARCH_SYNC_CORE_ICACHE
1289 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]); 1287 seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1290#endif 1288#endif
1291 1289
1290 seq_printf(m, "\n");
1291
1292 if (cpu_num != num_possible_cpus() - 1) 1292 if (cpu_num != num_possible_cpus() - 1)
1293 return 0; 1293 return 0;
1294 1294
@@ -1312,13 +1312,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1312 " in data cache\n"); 1312 " in data cache\n");
1313 } 1313 }
1314 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1314 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1315 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1315 seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1316 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1316 physical_mem_end >> 10, 0ul, physical_mem_end);
1317 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", 1317 seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1318 ((int)memory_end - (int)_rambase) >> 10, 1318 ((int)memory_end - (int)_rambase) >> 10,
1319 (void *)_rambase, 1319 _rambase, memory_end);
1320 (void *)memory_end);
1321 seq_printf(m, "\n");
1322 1320
1323 return 0; 1321 return 0;
1324} 1322}
@@ -1326,7 +1324,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1326static void *c_start(struct seq_file *m, loff_t *pos) 1324static void *c_start(struct seq_file *m, loff_t *pos)
1327{ 1325{
1328 if (*pos == 0) 1326 if (*pos == 0)
1329 *pos = first_cpu(cpu_online_map); 1327 *pos = cpumask_first(cpu_online_mask);
1330 if (*pos >= num_online_cpus()) 1328 if (*pos >= num_online_cpus())
1331 return NULL; 1329 return NULL;
1332 1330
@@ -1335,7 +1333,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1335 1333
1336static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1334static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1337{ 1335{
1338 *pos = next_cpu(*pos, cpu_online_map); 1336 *pos = cpumask_next(*pos, cpu_online_mask);
1339 1337
1340 return c_start(m, pos); 1338 return c_start(m, pos);
1341} 1339}
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 8d85c8c6f857..3ac5b66d14aa 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -155,14 +155,8 @@ SECTIONS
155 SECURITY_INITCALL 155 SECURITY_INITCALL
156 INIT_RAM_FS 156 INIT_RAM_FS
157 157
158 . = ALIGN(4);
159 ___per_cpu_load = .; 158 ___per_cpu_load = .;
160 ___per_cpu_start = .; 159 PERCPU_INPUT(32)
161 *(.data.percpu.first)
162 *(.data.percpu.page_aligned)
163 *(.data.percpu)
164 *(.data.percpu.shared_aligned)
165 ___per_cpu_end = .;
166 160
167 EXIT_DATA 161 EXIT_DATA
168 __einitdata = .; 162 __einitdata = .;
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index 24918c5f7ea1..d2f076fbbc9e 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -5,7 +5,7 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
@@ -141,6 +141,7 @@
141#define ANOMALY_05000364 (0) 141#define ANOMALY_05000364 (0)
142#define ANOMALY_05000371 (0) 142#define ANOMALY_05000371 (0)
143#define ANOMALY_05000380 (0) 143#define ANOMALY_05000380 (0)
144#define ANOMALY_05000383 (0)
144#define ANOMALY_05000386 (0) 145#define ANOMALY_05000386 (0)
145#define ANOMALY_05000389 (0) 146#define ANOMALY_05000389 (0)
146#define ANOMALY_05000400 (0) 147#define ANOMALY_05000400 (0)
@@ -155,6 +156,7 @@
155#define ANOMALY_05000467 (0) 156#define ANOMALY_05000467 (0)
156#define ANOMALY_05000474 (0) 157#define ANOMALY_05000474 (0)
157#define ANOMALY_05000475 (0) 158#define ANOMALY_05000475 (0)
159#define ANOMALY_05000480 (0)
158#define ANOMALY_05000485 (0) 160#define ANOMALY_05000485 (0)
159 161
160#endif 162#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
index b657d37a3402..bb79627f0929 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
@@ -990,18 +990,18 @@
990#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val) 990#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
991#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW) 991#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
992#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val) 992#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
993#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS) 993#define bfin_read_PORTF_HYSTERESIS() bfin_read16(PORTF_HYSTERESIS)
994#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val) 994#define bfin_write_PORTF_HYSTERESIS(val) bfin_write16(PORTF_HYSTERESIS, val)
995#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS) 995#define bfin_read_PORTG_HYSTERESIS() bfin_read16(PORTG_HYSTERESIS)
996#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val) 996#define bfin_write_PORTG_HYSTERESIS(val) bfin_write16(PORTG_HYSTERESIS, val)
997#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS) 997#define bfin_read_PORTH_HYSTERESIS() bfin_read16(PORTH_HYSTERESIS)
998#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val) 998#define bfin_write_PORTH_HYSTERESIS(val) bfin_write16(PORTH_HYSTERESIS, val)
999#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE) 999#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
1000#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val) 1000#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
1001#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW) 1001#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
1002#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val) 1002#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
1003#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS) 1003#define bfin_read_MISCPORT_HYSTERESIS() bfin_read16(MISCPORT_HYSTERESIS)
1004#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val) 1004#define bfin_write_MISCPORT_HYSTERESIS(val) bfin_write16(MISCPORT_HYSTERESIS, val)
1005 1005
1006/* HOST Port Registers */ 1006/* HOST Port Registers */
1007 1007
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF512.h b/arch/blackfin/mach-bf518/include/mach/defBF512.h
index cb1172f50757..729704078cd7 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF512.h
@@ -561,12 +561,12 @@
561#define PORTF_SLEW 0xFFC03230 /* Port F slew control */ 561#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
562#define PORTG_SLEW 0xFFC03234 /* Port G slew control */ 562#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
563#define PORTH_SLEW 0xFFC03238 /* Port H slew control */ 563#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
564#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */ 564#define PORTF_HYSTERESIS 0xFFC03240 /* Port F Schmitt trigger control */
565#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */ 565#define PORTG_HYSTERESIS 0xFFC03244 /* Port G Schmitt trigger control */
566#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */ 566#define PORTH_HYSTERESIS 0xFFC03248 /* Port H Schmitt trigger control */
567#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */ 567#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
568#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */ 568#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
569#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */ 569#define MISCPORT_HYSTERESIS 0xFFC03288 /* Misc Port Schmitt trigger control */
570 570
571 571
572/*********************************************************************************** 572/***********************************************************************************
diff --git a/arch/blackfin/mach-bf518/include/mach/irq.h b/arch/blackfin/mach-bf518/include/mach/irq.h
index 435e76e31aaa..edf8efd457dc 100644
--- a/arch/blackfin/mach-bf518/include/mach/irq.h
+++ b/arch/blackfin/mach-bf518/include/mach/irq.h
@@ -7,38 +7,9 @@
7#ifndef _BF518_IRQ_H_ 7#ifndef _BF518_IRQ_H_
8#define _BF518_IRQ_H_ 8#define _BF518_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions 11
12 Event Source Core Event Name 12#define NR_PERI_INTS (2 * 32)
13 Core Emulation **
14 Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21
22 .....
23
24 Software Interrupt 1 IVG14 31
25 Software Interrupt 2 --
26 (lowest priority) IVG15 32 *
27*/
28
29#define NR_PERI_INTS (2 * 32)
30
31/* The ABSTRACT IRQ definitions */
32/** the first seven of the following are fixed, the rest you change if you need to **/
33#define IRQ_EMU 0 /* Emulation */
34#define IRQ_RST 1 /* reset */
35#define IRQ_NMI 2 /* Non Maskable */
36#define IRQ_EVX 3 /* Exception */
37#define IRQ_UNUSED 4 /* - unused interrupt */
38#define IRQ_HWERR 5 /* Hardware Error */
39#define IRQ_CORETMR 6 /* Core timer */
40
41#define BFIN_IRQ(x) ((x) + 7)
42 13
43#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
44#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */ 15#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */
@@ -54,23 +25,23 @@
54#define IRQ_UART0_ERROR BFIN_IRQ(12) /* UART0 Status */ 25#define IRQ_UART0_ERROR BFIN_IRQ(12) /* UART0 Status */
55#define IRQ_UART1_ERROR BFIN_IRQ(13) /* UART1 Status */ 26#define IRQ_UART1_ERROR BFIN_IRQ(13) /* UART1 Status */
56#define IRQ_RTC BFIN_IRQ(14) /* RTC */ 27#define IRQ_RTC BFIN_IRQ(14) /* RTC */
57#define IRQ_PPI BFIN_IRQ(15) /* DMA Channel 0 (PPI) */ 28#define IRQ_PPI BFIN_IRQ(15) /* DMA Channel 0 (PPI) */
58#define IRQ_SPORT0_RX BFIN_IRQ(16) /* DMA 3 Channel (SPORT0 RX) */ 29#define IRQ_SPORT0_RX BFIN_IRQ(16) /* DMA 3 Channel (SPORT0 RX) */
59#define IRQ_SPORT0_TX BFIN_IRQ(17) /* DMA 4 Channel (SPORT0 TX) */ 30#define IRQ_SPORT0_TX BFIN_IRQ(17) /* DMA 4 Channel (SPORT0 TX) */
60#define IRQ_RSI BFIN_IRQ(17) /* DMA 4 Channel (RSI) */ 31#define IRQ_RSI BFIN_IRQ(17) /* DMA 4 Channel (RSI) */
61#define IRQ_SPORT1_RX BFIN_IRQ(18) /* DMA 5 Channel (SPORT1 RX/SPI) */ 32#define IRQ_SPORT1_RX BFIN_IRQ(18) /* DMA 5 Channel (SPORT1 RX/SPI) */
62#define IRQ_SPI1 BFIN_IRQ(18) /* DMA 5 Channel (SPI1) */ 33#define IRQ_SPI1 BFIN_IRQ(18) /* DMA 5 Channel (SPI1) */
63#define IRQ_SPORT1_TX BFIN_IRQ(19) /* DMA 6 Channel (SPORT1 TX) */ 34#define IRQ_SPORT1_TX BFIN_IRQ(19) /* DMA 6 Channel (SPORT1 TX) */
64#define IRQ_TWI BFIN_IRQ(20) /* TWI */ 35#define IRQ_TWI BFIN_IRQ(20) /* TWI */
65#define IRQ_SPI0 BFIN_IRQ(21) /* DMA 7 Channel (SPI0) */ 36#define IRQ_SPI0 BFIN_IRQ(21) /* DMA 7 Channel (SPI0) */
66#define IRQ_UART0_RX BFIN_IRQ(22) /* DMA8 Channel (UART0 RX) */ 37#define IRQ_UART0_RX BFIN_IRQ(22) /* DMA8 Channel (UART0 RX) */
67#define IRQ_UART0_TX BFIN_IRQ(23) /* DMA9 Channel (UART0 TX) */ 38#define IRQ_UART0_TX BFIN_IRQ(23) /* DMA9 Channel (UART0 TX) */
68#define IRQ_UART1_RX BFIN_IRQ(24) /* DMA10 Channel (UART1 RX) */ 39#define IRQ_UART1_RX BFIN_IRQ(24) /* DMA10 Channel (UART1 RX) */
69#define IRQ_UART1_TX BFIN_IRQ(25) /* DMA11 Channel (UART1 TX) */ 40#define IRQ_UART1_TX BFIN_IRQ(25) /* DMA11 Channel (UART1 TX) */
70#define IRQ_OPTSEC BFIN_IRQ(26) /* OTPSEC Interrupt */ 41#define IRQ_OPTSEC BFIN_IRQ(26) /* OTPSEC Interrupt */
71#define IRQ_CNT BFIN_IRQ(27) /* GP Counter */ 42#define IRQ_CNT BFIN_IRQ(27) /* GP Counter */
72#define IRQ_MAC_RX BFIN_IRQ(28) /* DMA1 Channel (MAC RX) */ 43#define IRQ_MAC_RX BFIN_IRQ(28) /* DMA1 Channel (MAC RX) */
73#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */ 44#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */
74#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX) */ 45#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX) */
75#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */ 46#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */
76#define IRQ_TIMER0 BFIN_IRQ(32) /* Timer 0 */ 47#define IRQ_TIMER0 BFIN_IRQ(32) /* Timer 0 */
@@ -96,101 +67,90 @@
96#define IRQ_PWM_SYNC BFIN_IRQ(54) /* PWM Sync Interrupt */ 67#define IRQ_PWM_SYNC BFIN_IRQ(54) /* PWM Sync Interrupt */
97#define IRQ_PTP_STAT BFIN_IRQ(55) /* PTP Stat Interrupt */ 68#define IRQ_PTP_STAT BFIN_IRQ(55) /* PTP Stat Interrupt */
98 69
99#define SYS_IRQS BFIN_IRQ(63) /* 70 */ 70#define SYS_IRQS BFIN_IRQ(63) /* 70 */
100 71
101#define IRQ_PF0 71 72#define IRQ_PF0 71
102#define IRQ_PF1 72 73#define IRQ_PF1 72
103#define IRQ_PF2 73 74#define IRQ_PF2 73
104#define IRQ_PF3 74 75#define IRQ_PF3 74
105#define IRQ_PF4 75 76#define IRQ_PF4 75
106#define IRQ_PF5 76 77#define IRQ_PF5 76
107#define IRQ_PF6 77 78#define IRQ_PF6 77
108#define IRQ_PF7 78 79#define IRQ_PF7 78
109#define IRQ_PF8 79 80#define IRQ_PF8 79
110#define IRQ_PF9 80 81#define IRQ_PF9 80
111#define IRQ_PF10 81 82#define IRQ_PF10 81
112#define IRQ_PF11 82 83#define IRQ_PF11 82
113#define IRQ_PF12 83 84#define IRQ_PF12 83
114#define IRQ_PF13 84 85#define IRQ_PF13 84
115#define IRQ_PF14 85 86#define IRQ_PF14 85
116#define IRQ_PF15 86 87#define IRQ_PF15 86
117 88
118#define IRQ_PG0 87 89#define IRQ_PG0 87
119#define IRQ_PG1 88 90#define IRQ_PG1 88
120#define IRQ_PG2 89 91#define IRQ_PG2 89
121#define IRQ_PG3 90 92#define IRQ_PG3 90
122#define IRQ_PG4 91 93#define IRQ_PG4 91
123#define IRQ_PG5 92 94#define IRQ_PG5 92
124#define IRQ_PG6 93 95#define IRQ_PG6 93
125#define IRQ_PG7 94 96#define IRQ_PG7 94
126#define IRQ_PG8 95 97#define IRQ_PG8 95
127#define IRQ_PG9 96 98#define IRQ_PG9 96
128#define IRQ_PG10 97 99#define IRQ_PG10 97
129#define IRQ_PG11 98 100#define IRQ_PG11 98
130#define IRQ_PG12 99 101#define IRQ_PG12 99
131#define IRQ_PG13 100 102#define IRQ_PG13 100
132#define IRQ_PG14 101 103#define IRQ_PG14 101
133#define IRQ_PG15 102 104#define IRQ_PG15 102
134 105
135#define IRQ_PH0 103 106#define IRQ_PH0 103
136#define IRQ_PH1 104 107#define IRQ_PH1 104
137#define IRQ_PH2 105 108#define IRQ_PH2 105
138#define IRQ_PH3 106 109#define IRQ_PH3 106
139#define IRQ_PH4 107 110#define IRQ_PH4 107
140#define IRQ_PH5 108 111#define IRQ_PH5 108
141#define IRQ_PH6 109 112#define IRQ_PH6 109
142#define IRQ_PH7 110 113#define IRQ_PH7 110
143#define IRQ_PH8 111 114#define IRQ_PH8 111
144#define IRQ_PH9 112 115#define IRQ_PH9 112
145#define IRQ_PH10 113 116#define IRQ_PH10 113
146#define IRQ_PH11 114 117#define IRQ_PH11 114
147#define IRQ_PH12 115 118#define IRQ_PH12 115
148#define IRQ_PH13 116 119#define IRQ_PH13 116
149#define IRQ_PH14 117 120#define IRQ_PH14 117
150#define IRQ_PH15 118 121#define IRQ_PH15 118
151 122
152#define GPIO_IRQ_BASE IRQ_PF0 123#define GPIO_IRQ_BASE IRQ_PF0
153 124
154#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */ 125#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */
155#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */ 126#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */
156#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */ 127#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */
157#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */ 128#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */
158#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */ 129#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */
159#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */ 130#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */
160#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */ 131#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */
161#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */ 132#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */
162 133
163#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) 134#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1)
164#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
165
166#define IVG7 7
167#define IVG8 8
168#define IVG9 9
169#define IVG10 10
170#define IVG11 11
171#define IVG12 12
172#define IVG13 13
173#define IVG14 14
174#define IVG15 15
175 135
176/* IAR0 BIT FIELDS */ 136/* IAR0 BIT FIELDS */
177#define IRQ_PLL_WAKEUP_POS 0 137#define IRQ_PLL_WAKEUP_POS 0
178#define IRQ_DMA0_ERROR_POS 4 138#define IRQ_DMA0_ERROR_POS 4
179#define IRQ_DMAR0_BLK_POS 8 139#define IRQ_DMAR0_BLK_POS 8
180#define IRQ_DMAR1_BLK_POS 12 140#define IRQ_DMAR1_BLK_POS 12
181#define IRQ_DMAR0_OVR_POS 16 141#define IRQ_DMAR0_OVR_POS 16
182#define IRQ_DMAR1_OVR_POS 20 142#define IRQ_DMAR1_OVR_POS 20
183#define IRQ_PPI_ERROR_POS 24 143#define IRQ_PPI_ERROR_POS 24
184#define IRQ_MAC_ERROR_POS 28 144#define IRQ_MAC_ERROR_POS 28
185 145
186/* IAR1 BIT FIELDS */ 146/* IAR1 BIT FIELDS */
187#define IRQ_SPORT0_ERROR_POS 0 147#define IRQ_SPORT0_ERROR_POS 0
188#define IRQ_SPORT1_ERROR_POS 4 148#define IRQ_SPORT1_ERROR_POS 4
189#define IRQ_PTP_ERROR_POS 8 149#define IRQ_PTP_ERROR_POS 8
190#define IRQ_UART0_ERROR_POS 16 150#define IRQ_UART0_ERROR_POS 16
191#define IRQ_UART1_ERROR_POS 20 151#define IRQ_UART1_ERROR_POS 20
192#define IRQ_RTC_POS 24 152#define IRQ_RTC_POS 24
193#define IRQ_PPI_POS 28 153#define IRQ_PPI_POS 28
194 154
195/* IAR2 BIT FIELDS */ 155/* IAR2 BIT FIELDS */
196#define IRQ_SPORT0_RX_POS 0 156#define IRQ_SPORT0_RX_POS 0
@@ -199,19 +159,19 @@
199#define IRQ_SPORT1_RX_POS 8 159#define IRQ_SPORT1_RX_POS 8
200#define IRQ_SPI1_POS 8 160#define IRQ_SPI1_POS 8
201#define IRQ_SPORT1_TX_POS 12 161#define IRQ_SPORT1_TX_POS 12
202#define IRQ_TWI_POS 16 162#define IRQ_TWI_POS 16
203#define IRQ_SPI0_POS 20 163#define IRQ_SPI0_POS 20
204#define IRQ_UART0_RX_POS 24 164#define IRQ_UART0_RX_POS 24
205#define IRQ_UART0_TX_POS 28 165#define IRQ_UART0_TX_POS 28
206 166
207/* IAR3 BIT FIELDS */ 167/* IAR3 BIT FIELDS */
208#define IRQ_UART1_RX_POS 0 168#define IRQ_UART1_RX_POS 0
209#define IRQ_UART1_TX_POS 4 169#define IRQ_UART1_TX_POS 4
210#define IRQ_OPTSEC_POS 8 170#define IRQ_OPTSEC_POS 8
211#define IRQ_CNT_POS 12 171#define IRQ_CNT_POS 12
212#define IRQ_MAC_RX_POS 16 172#define IRQ_MAC_RX_POS 16
213#define IRQ_PORTH_INTA_POS 20 173#define IRQ_PORTH_INTA_POS 20
214#define IRQ_MAC_TX_POS 24 174#define IRQ_MAC_TX_POS 24
215#define IRQ_PORTH_INTB_POS 28 175#define IRQ_PORTH_INTB_POS 28
216 176
217/* IAR4 BIT FIELDS */ 177/* IAR4 BIT FIELDS */
@@ -227,19 +187,19 @@
227/* IAR5 BIT FIELDS */ 187/* IAR5 BIT FIELDS */
228#define IRQ_PORTG_INTA_POS 0 188#define IRQ_PORTG_INTA_POS 0
229#define IRQ_PORTG_INTB_POS 4 189#define IRQ_PORTG_INTB_POS 4
230#define IRQ_MEM_DMA0_POS 8 190#define IRQ_MEM_DMA0_POS 8
231#define IRQ_MEM_DMA1_POS 12 191#define IRQ_MEM_DMA1_POS 12
232#define IRQ_WATCH_POS 16 192#define IRQ_WATCH_POS 16
233#define IRQ_PORTF_INTA_POS 20 193#define IRQ_PORTF_INTA_POS 20
234#define IRQ_PORTF_INTB_POS 24 194#define IRQ_PORTF_INTB_POS 24
235#define IRQ_SPI0_ERROR_POS 28 195#define IRQ_SPI0_ERROR_POS 28
236 196
237/* IAR6 BIT FIELDS */ 197/* IAR6 BIT FIELDS */
238#define IRQ_SPI1_ERROR_POS 0 198#define IRQ_SPI1_ERROR_POS 0
239#define IRQ_RSI_INT0_POS 12 199#define IRQ_RSI_INT0_POS 12
240#define IRQ_RSI_INT1_POS 16 200#define IRQ_RSI_INT1_POS 16
241#define IRQ_PWM_TRIP_POS 20 201#define IRQ_PWM_TRIP_POS 20
242#define IRQ_PWM_SYNC_POS 24 202#define IRQ_PWM_SYNC_POS 24
243#define IRQ_PTP_STAT_POS 28 203#define IRQ_PTP_STAT_POS 28
244 204
245#endif /* _BF518_IRQ_H_ */ 205#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 2cd2ff6f3043..e67ac7720668 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -26,6 +26,7 @@
26#include <asm/portmux.h> 26#include <asm/portmux.h>
27#include <asm/dpmc.h> 27#include <asm/dpmc.h>
28#include <linux/spi/ad7877.h> 28#include <linux/spi/ad7877.h>
29#include <asm/bfin_sport.h>
29 30
30/* 31/*
31 * Name the Board for the /proc/cpuinfo 32 * Name the Board for the /proc/cpuinfo
@@ -526,11 +527,69 @@ static struct bfin5xx_spi_chip spidev_chip_info = {
526}; 527};
527#endif 528#endif
528 529
530#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
531 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
532
533static const u16 bfin_snd_pin[][7] = {
534 {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
535 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0, 0},
536 {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
537 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_TFS, 0},
538};
539
540static struct bfin_snd_platform_data bfin_snd_data[] = {
541 {
542 .pin_req = &bfin_snd_pin[0][0],
543 },
544 {
545 .pin_req = &bfin_snd_pin[1][0],
546 },
547};
548
549#define BFIN_SND_RES(x) \
550 [x] = { \
551 { \
552 .start = SPORT##x##_TCR1, \
553 .end = SPORT##x##_TCR1, \
554 .flags = IORESOURCE_MEM \
555 }, \
556 { \
557 .start = CH_SPORT##x##_RX, \
558 .end = CH_SPORT##x##_RX, \
559 .flags = IORESOURCE_DMA, \
560 }, \
561 { \
562 .start = CH_SPORT##x##_TX, \
563 .end = CH_SPORT##x##_TX, \
564 .flags = IORESOURCE_DMA, \
565 }, \
566 { \
567 .start = IRQ_SPORT##x##_ERROR, \
568 .end = IRQ_SPORT##x##_ERROR, \
569 .flags = IORESOURCE_IRQ, \
570 } \
571 }
572
573static struct resource bfin_snd_resources[][4] = {
574 BFIN_SND_RES(0),
575 BFIN_SND_RES(1),
576};
577
578static struct platform_device bfin_pcm = {
579 .name = "bfin-pcm-audio",
580 .id = -1,
581};
582#endif
583
529#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 584#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
530static struct platform_device bfin_i2s = { 585static struct platform_device bfin_i2s = {
531 .name = "bfin-i2s", 586 .name = "bfin-i2s",
532 .id = CONFIG_SND_BF5XX_SPORT_NUM, 587 .id = CONFIG_SND_BF5XX_SPORT_NUM,
533 /* TODO: add platform data here */ 588 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
589 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
590 .dev = {
591 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
592 },
534}; 593};
535#endif 594#endif
536 595
@@ -538,7 +597,11 @@ static struct platform_device bfin_i2s = {
538static struct platform_device bfin_tdm = { 597static struct platform_device bfin_tdm = {
539 .name = "bfin-tdm", 598 .name = "bfin-tdm",
540 .id = CONFIG_SND_BF5XX_SPORT_NUM, 599 .id = CONFIG_SND_BF5XX_SPORT_NUM,
541 /* TODO: add platform data here */ 600 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
601 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
602 .dev = {
603 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
604 },
542}; 605};
543#endif 606#endif
544 607
@@ -583,7 +646,9 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
583 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 646 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
584 .bus_num = 0, 647 .bus_num = 0,
585 .chip_select = 4, 648 .chip_select = 4,
649 .platform_data = "ad1836",
586 .controller_data = &ad1836_spi_chip_info, 650 .controller_data = &ad1836_spi_chip_info,
651 .mode = SPI_MODE_3,
587 }, 652 },
588#endif 653#endif
589#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 654#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -1211,6 +1276,11 @@ static struct platform_device *stamp_devices[] __initdata = {
1211 &ezkit_flash_device, 1276 &ezkit_flash_device,
1212#endif 1277#endif
1213 1278
1279#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
1280 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
1281 &bfin_pcm,
1282#endif
1283
1214#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1284#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
1215 &bfin_i2s, 1285 &bfin_i2s,
1216#endif 1286#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index 9358afa05c90..e66a7e89cd3c 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -5,14 +5,14 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision E, 03/15/2010; ADSP-BF526 Blackfin Processor Anomaly List 14 * - Revision E, 03/15/2010; ADSP-BF526 Blackfin Processor Anomaly List
15 * - Revision G, 08/25/2009; ADSP-BF527 Blackfin Processor Anomaly List 15 * - Revision H, 04/29/2010; ADSP-BF527 Blackfin Processor Anomaly List
16 */ 16 */
17 17
18#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
@@ -220,6 +220,8 @@
220#define ANOMALY_05000483 (1) 220#define ANOMALY_05000483 (1)
221/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */ 221/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
222#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, < 3)) 222#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, < 3))
223/* The CODEC Zero-Cross Detect Feature is not Functional */
224#define ANOMALY_05000487 (1)
223/* IFLUSH sucks at life */ 225/* IFLUSH sucks at life */
224#define ANOMALY_05000491 (1) 226#define ANOMALY_05000491 (1)
225 227
@@ -268,11 +270,13 @@
268#define ANOMALY_05000323 (0) 270#define ANOMALY_05000323 (0)
269#define ANOMALY_05000362 (1) 271#define ANOMALY_05000362 (1)
270#define ANOMALY_05000363 (0) 272#define ANOMALY_05000363 (0)
273#define ANOMALY_05000383 (0)
271#define ANOMALY_05000400 (0) 274#define ANOMALY_05000400 (0)
272#define ANOMALY_05000402 (0) 275#define ANOMALY_05000402 (0)
273#define ANOMALY_05000412 (0) 276#define ANOMALY_05000412 (0)
274#define ANOMALY_05000447 (0) 277#define ANOMALY_05000447 (0)
275#define ANOMALY_05000448 (0) 278#define ANOMALY_05000448 (0)
276#define ANOMALY_05000474 (0) 279#define ANOMALY_05000474 (0)
280#define ANOMALY_05000480 (0)
277 281
278#endif 282#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
index 618dfcdfa91a..2c12e879aa4e 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
@@ -1007,18 +1007,18 @@
1007#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val) 1007#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
1008#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW) 1008#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
1009#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val) 1009#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
1010#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS) 1010#define bfin_read_PORTF_HYSTERESIS() bfin_read16(PORTF_HYSTERESIS)
1011#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val) 1011#define bfin_write_PORTF_HYSTERESIS(val) bfin_write16(PORTF_HYSTERESIS, val)
1012#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS) 1012#define bfin_read_PORTG_HYSTERESIS() bfin_read16(PORTG_HYSTERESIS)
1013#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val) 1013#define bfin_write_PORTG_HYSTERESIS(val) bfin_write16(PORTG_HYSTERESIS, val)
1014#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS) 1014#define bfin_read_PORTH_HYSTERESIS() bfin_read16(PORTH_HYSTERESIS)
1015#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val) 1015#define bfin_write_PORTH_HYSTERESIS(val) bfin_write16(PORTH_HYSTERESIS, val)
1016#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE) 1016#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
1017#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val) 1017#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
1018#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW) 1018#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
1019#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val) 1019#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
1020#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS) 1020#define bfin_read_MISCPORT_HYSTERESIS() bfin_read16(MISCPORT_HYSTERESIS)
1021#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val) 1021#define bfin_write_MISCPORT_HYSTERESIS(val) bfin_write16(MISCPORT_HYSTERESIS, val)
1022 1022
1023/* HOST Port Registers */ 1023/* HOST Port Registers */
1024 1024
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF522.h b/arch/blackfin/mach-bf527/include/mach/defBF522.h
index 84ef11e52644..37d353a19722 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF522.h
@@ -562,12 +562,12 @@
562#define PORTF_SLEW 0xFFC03230 /* Port F slew control */ 562#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
563#define PORTG_SLEW 0xFFC03234 /* Port G slew control */ 563#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
564#define PORTH_SLEW 0xFFC03238 /* Port H slew control */ 564#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
565#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */ 565#define PORTF_HYSTERESIS 0xFFC03240 /* Port F Schmitt trigger control */
566#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */ 566#define PORTG_HYSTERESIS 0xFFC03244 /* Port G Schmitt trigger control */
567#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */ 567#define PORTH_HYSTERESIS 0xFFC03248 /* Port H Schmitt trigger control */
568#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */ 568#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
569#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */ 569#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
570#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */ 570#define MISCPORT_HYSTERESIS 0xFFC03288 /* Misc Port Schmitt trigger control */
571 571
572 572
573/*********************************************************************************** 573/***********************************************************************************
diff --git a/arch/blackfin/mach-bf527/include/mach/irq.h b/arch/blackfin/mach-bf527/include/mach/irq.h
index 704d9253e41d..ed7310ff819b 100644
--- a/arch/blackfin/mach-bf527/include/mach/irq.h
+++ b/arch/blackfin/mach-bf527/include/mach/irq.h
@@ -7,38 +7,9 @@
7#ifndef _BF527_IRQ_H_ 7#ifndef _BF527_IRQ_H_
8#define _BF527_IRQ_H_ 8#define _BF527_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions 11
12 Event Source Core Event Name 12#define NR_PERI_INTS (2 * 32)
13 Core Emulation **
14 Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21
22 .....
23
24 Software Interrupt 1 IVG14 31
25 Software Interrupt 2 --
26 (lowest priority) IVG15 32 *
27*/
28
29#define NR_PERI_INTS (2 * 32)
30
31/* The ABSTRACT IRQ definitions */
32/** the first seven of the following are fixed, the rest you change if you need to **/
33#define IRQ_EMU 0 /* Emulation */
34#define IRQ_RST 1 /* reset */
35#define IRQ_NMI 2 /* Non Maskable */
36#define IRQ_EVX 3 /* Exception */
37#define IRQ_UNUSED 4 /* - unused interrupt */
38#define IRQ_HWERR 5 /* Hardware Error */
39#define IRQ_CORETMR 6 /* Core timer */
40
41#define BFIN_IRQ(x) ((x) + 7)
42 13
43#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
44#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */ 15#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */
@@ -53,21 +24,21 @@
53#define IRQ_UART0_ERROR BFIN_IRQ(12) /* UART0 Status */ 24#define IRQ_UART0_ERROR BFIN_IRQ(12) /* UART0 Status */
54#define IRQ_UART1_ERROR BFIN_IRQ(13) /* UART1 Status */ 25#define IRQ_UART1_ERROR BFIN_IRQ(13) /* UART1 Status */
55#define IRQ_RTC BFIN_IRQ(14) /* RTC */ 26#define IRQ_RTC BFIN_IRQ(14) /* RTC */
56#define IRQ_PPI BFIN_IRQ(15) /* DMA Channel 0 (PPI/NAND) */ 27#define IRQ_PPI BFIN_IRQ(15) /* DMA Channel 0 (PPI/NAND) */
57#define IRQ_SPORT0_RX BFIN_IRQ(16) /* DMA 3 Channel (SPORT0 RX) */ 28#define IRQ_SPORT0_RX BFIN_IRQ(16) /* DMA 3 Channel (SPORT0 RX) */
58#define IRQ_SPORT0_TX BFIN_IRQ(17) /* DMA 4 Channel (SPORT0 TX) */ 29#define IRQ_SPORT0_TX BFIN_IRQ(17) /* DMA 4 Channel (SPORT0 TX) */
59#define IRQ_SPORT1_RX BFIN_IRQ(18) /* DMA 5 Channel (SPORT1 RX) */ 30#define IRQ_SPORT1_RX BFIN_IRQ(18) /* DMA 5 Channel (SPORT1 RX) */
60#define IRQ_SPORT1_TX BFIN_IRQ(19) /* DMA 6 Channel (SPORT1 TX) */ 31#define IRQ_SPORT1_TX BFIN_IRQ(19) /* DMA 6 Channel (SPORT1 TX) */
61#define IRQ_TWI BFIN_IRQ(20) /* TWI */ 32#define IRQ_TWI BFIN_IRQ(20) /* TWI */
62#define IRQ_SPI BFIN_IRQ(21) /* DMA 7 Channel (SPI) */ 33#define IRQ_SPI BFIN_IRQ(21) /* DMA 7 Channel (SPI) */
63#define IRQ_UART0_RX BFIN_IRQ(22) /* DMA8 Channel (UART0 RX) */ 34#define IRQ_UART0_RX BFIN_IRQ(22) /* DMA8 Channel (UART0 RX) */
64#define IRQ_UART0_TX BFIN_IRQ(23) /* DMA9 Channel (UART0 TX) */ 35#define IRQ_UART0_TX BFIN_IRQ(23) /* DMA9 Channel (UART0 TX) */
65#define IRQ_UART1_RX BFIN_IRQ(24) /* DMA10 Channel (UART1 RX) */ 36#define IRQ_UART1_RX BFIN_IRQ(24) /* DMA10 Channel (UART1 RX) */
66#define IRQ_UART1_TX BFIN_IRQ(25) /* DMA11 Channel (UART1 TX) */ 37#define IRQ_UART1_TX BFIN_IRQ(25) /* DMA11 Channel (UART1 TX) */
67#define IRQ_OPTSEC BFIN_IRQ(26) /* OTPSEC Interrupt */ 38#define IRQ_OPTSEC BFIN_IRQ(26) /* OTPSEC Interrupt */
68#define IRQ_CNT BFIN_IRQ(27) /* GP Counter */ 39#define IRQ_CNT BFIN_IRQ(27) /* GP Counter */
69#define IRQ_MAC_RX BFIN_IRQ(28) /* DMA1 Channel (MAC RX/HDMA) */ 40#define IRQ_MAC_RX BFIN_IRQ(28) /* DMA1 Channel (MAC RX/HDMA) */
70#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */ 41#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */
71#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */ 42#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */
72#define IRQ_NFC BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */ 43#define IRQ_NFC BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */
73#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */ 44#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */
@@ -96,119 +67,108 @@
96#define IRQ_USB_INT2 BFIN_IRQ(54) /* USB_INT2 Interrupt */ 67#define IRQ_USB_INT2 BFIN_IRQ(54) /* USB_INT2 Interrupt */
97#define IRQ_USB_DMA BFIN_IRQ(55) /* USB_DMAINT Interrupt */ 68#define IRQ_USB_DMA BFIN_IRQ(55) /* USB_DMAINT Interrupt */
98 69
99#define SYS_IRQS BFIN_IRQ(63) /* 70 */ 70#define SYS_IRQS BFIN_IRQ(63) /* 70 */
100 71
101#define IRQ_PF0 71 72#define IRQ_PF0 71
102#define IRQ_PF1 72 73#define IRQ_PF1 72
103#define IRQ_PF2 73 74#define IRQ_PF2 73
104#define IRQ_PF3 74 75#define IRQ_PF3 74
105#define IRQ_PF4 75 76#define IRQ_PF4 75
106#define IRQ_PF5 76 77#define IRQ_PF5 76
107#define IRQ_PF6 77 78#define IRQ_PF6 77
108#define IRQ_PF7 78 79#define IRQ_PF7 78
109#define IRQ_PF8 79 80#define IRQ_PF8 79
110#define IRQ_PF9 80 81#define IRQ_PF9 80
111#define IRQ_PF10 81 82#define IRQ_PF10 81
112#define IRQ_PF11 82 83#define IRQ_PF11 82
113#define IRQ_PF12 83 84#define IRQ_PF12 83
114#define IRQ_PF13 84 85#define IRQ_PF13 84
115#define IRQ_PF14 85 86#define IRQ_PF14 85
116#define IRQ_PF15 86 87#define IRQ_PF15 86
117 88
118#define IRQ_PG0 87 89#define IRQ_PG0 87
119#define IRQ_PG1 88 90#define IRQ_PG1 88
120#define IRQ_PG2 89 91#define IRQ_PG2 89
121#define IRQ_PG3 90 92#define IRQ_PG3 90
122#define IRQ_PG4 91 93#define IRQ_PG4 91
123#define IRQ_PG5 92 94#define IRQ_PG5 92
124#define IRQ_PG6 93 95#define IRQ_PG6 93
125#define IRQ_PG7 94 96#define IRQ_PG7 94
126#define IRQ_PG8 95 97#define IRQ_PG8 95
127#define IRQ_PG9 96 98#define IRQ_PG9 96
128#define IRQ_PG10 97 99#define IRQ_PG10 97
129#define IRQ_PG11 98 100#define IRQ_PG11 98
130#define IRQ_PG12 99 101#define IRQ_PG12 99
131#define IRQ_PG13 100 102#define IRQ_PG13 100
132#define IRQ_PG14 101 103#define IRQ_PG14 101
133#define IRQ_PG15 102 104#define IRQ_PG15 102
134 105
135#define IRQ_PH0 103 106#define IRQ_PH0 103
136#define IRQ_PH1 104 107#define IRQ_PH1 104
137#define IRQ_PH2 105 108#define IRQ_PH2 105
138#define IRQ_PH3 106 109#define IRQ_PH3 106
139#define IRQ_PH4 107 110#define IRQ_PH4 107
140#define IRQ_PH5 108 111#define IRQ_PH5 108
141#define IRQ_PH6 109 112#define IRQ_PH6 109
142#define IRQ_PH7 110 113#define IRQ_PH7 110
143#define IRQ_PH8 111 114#define IRQ_PH8 111
144#define IRQ_PH9 112 115#define IRQ_PH9 112
145#define IRQ_PH10 113 116#define IRQ_PH10 113
146#define IRQ_PH11 114 117#define IRQ_PH11 114
147#define IRQ_PH12 115 118#define IRQ_PH12 115
148#define IRQ_PH13 116 119#define IRQ_PH13 116
149#define IRQ_PH14 117 120#define IRQ_PH14 117
150#define IRQ_PH15 118 121#define IRQ_PH15 118
151 122
152#define GPIO_IRQ_BASE IRQ_PF0 123#define GPIO_IRQ_BASE IRQ_PF0
153 124
154#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */ 125#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */
155#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */ 126#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */
156#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */ 127#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */
157#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */ 128#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */
158#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */ 129#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */
159#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */ 130#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */
160#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */ 131#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */
161#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */ 132#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */
162 133
163#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) 134#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1)
164#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
165
166#define IVG7 7
167#define IVG8 8
168#define IVG9 9
169#define IVG10 10
170#define IVG11 11
171#define IVG12 12
172#define IVG13 13
173#define IVG14 14
174#define IVG15 15
175 135
176/* IAR0 BIT FIELDS */ 136/* IAR0 BIT FIELDS */
177#define IRQ_PLL_WAKEUP_POS 0 137#define IRQ_PLL_WAKEUP_POS 0
178#define IRQ_DMA0_ERROR_POS 4 138#define IRQ_DMA0_ERROR_POS 4
179#define IRQ_DMAR0_BLK_POS 8 139#define IRQ_DMAR0_BLK_POS 8
180#define IRQ_DMAR1_BLK_POS 12 140#define IRQ_DMAR1_BLK_POS 12
181#define IRQ_DMAR0_OVR_POS 16 141#define IRQ_DMAR0_OVR_POS 16
182#define IRQ_DMAR1_OVR_POS 20 142#define IRQ_DMAR1_OVR_POS 20
183#define IRQ_PPI_ERROR_POS 24 143#define IRQ_PPI_ERROR_POS 24
184#define IRQ_MAC_ERROR_POS 28 144#define IRQ_MAC_ERROR_POS 28
185 145
186/* IAR1 BIT FIELDS */ 146/* IAR1 BIT FIELDS */
187#define IRQ_SPORT0_ERROR_POS 0 147#define IRQ_SPORT0_ERROR_POS 0
188#define IRQ_SPORT1_ERROR_POS 4 148#define IRQ_SPORT1_ERROR_POS 4
189#define IRQ_UART0_ERROR_POS 16 149#define IRQ_UART0_ERROR_POS 16
190#define IRQ_UART1_ERROR_POS 20 150#define IRQ_UART1_ERROR_POS 20
191#define IRQ_RTC_POS 24 151#define IRQ_RTC_POS 24
192#define IRQ_PPI_POS 28 152#define IRQ_PPI_POS 28
193 153
194/* IAR2 BIT FIELDS */ 154/* IAR2 BIT FIELDS */
195#define IRQ_SPORT0_RX_POS 0 155#define IRQ_SPORT0_RX_POS 0
196#define IRQ_SPORT0_TX_POS 4 156#define IRQ_SPORT0_TX_POS 4
197#define IRQ_SPORT1_RX_POS 8 157#define IRQ_SPORT1_RX_POS 8
198#define IRQ_SPORT1_TX_POS 12 158#define IRQ_SPORT1_TX_POS 12
199#define IRQ_TWI_POS 16 159#define IRQ_TWI_POS 16
200#define IRQ_SPI_POS 20 160#define IRQ_SPI_POS 20
201#define IRQ_UART0_RX_POS 24 161#define IRQ_UART0_RX_POS 24
202#define IRQ_UART0_TX_POS 28 162#define IRQ_UART0_TX_POS 28
203 163
204/* IAR3 BIT FIELDS */ 164/* IAR3 BIT FIELDS */
205#define IRQ_UART1_RX_POS 0 165#define IRQ_UART1_RX_POS 0
206#define IRQ_UART1_TX_POS 4 166#define IRQ_UART1_TX_POS 4
207#define IRQ_OPTSEC_POS 8 167#define IRQ_OPTSEC_POS 8
208#define IRQ_CNT_POS 12 168#define IRQ_CNT_POS 12
209#define IRQ_MAC_RX_POS 16 169#define IRQ_MAC_RX_POS 16
210#define IRQ_PORTH_INTA_POS 20 170#define IRQ_PORTH_INTA_POS 20
211#define IRQ_MAC_TX_POS 24 171#define IRQ_MAC_TX_POS 24
212#define IRQ_PORTH_INTB_POS 28 172#define IRQ_PORTH_INTB_POS 28
213 173
214/* IAR4 BIT FIELDS */ 174/* IAR4 BIT FIELDS */
@@ -224,21 +184,21 @@
224/* IAR5 BIT FIELDS */ 184/* IAR5 BIT FIELDS */
225#define IRQ_PORTG_INTA_POS 0 185#define IRQ_PORTG_INTA_POS 0
226#define IRQ_PORTG_INTB_POS 4 186#define IRQ_PORTG_INTB_POS 4
227#define IRQ_MEM_DMA0_POS 8 187#define IRQ_MEM_DMA0_POS 8
228#define IRQ_MEM_DMA1_POS 12 188#define IRQ_MEM_DMA1_POS 12
229#define IRQ_WATCH_POS 16 189#define IRQ_WATCH_POS 16
230#define IRQ_PORTF_INTA_POS 20 190#define IRQ_PORTF_INTA_POS 20
231#define IRQ_PORTF_INTB_POS 24 191#define IRQ_PORTF_INTB_POS 24
232#define IRQ_SPI_ERROR_POS 28 192#define IRQ_SPI_ERROR_POS 28
233 193
234/* IAR6 BIT FIELDS */ 194/* IAR6 BIT FIELDS */
235#define IRQ_NFC_ERROR_POS 0 195#define IRQ_NFC_ERROR_POS 0
236#define IRQ_HDMA_ERROR_POS 4 196#define IRQ_HDMA_ERROR_POS 4
237#define IRQ_HDMA_POS 8 197#define IRQ_HDMA_POS 8
238#define IRQ_USB_EINT_POS 12 198#define IRQ_USB_EINT_POS 12
239#define IRQ_USB_INT0_POS 16 199#define IRQ_USB_INT0_POS 16
240#define IRQ_USB_INT1_POS 20 200#define IRQ_USB_INT1_POS 20
241#define IRQ_USB_INT2_POS 24 201#define IRQ_USB_INT2_POS 24
242#define IRQ_USB_DMA_POS 28 202#define IRQ_USB_DMA_POS 28
243 203
244#endif /* _BF527_IRQ_H_ */ 204#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 78f872187918..72aa59440f82 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -5,13 +5,13 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision E, 09/18/2008; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List 14 * - Revision F, 05/25/2010; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -206,6 +206,10 @@
206#define ANOMALY_05000443 (1) 206#define ANOMALY_05000443 (1)
207/* False Hardware Error when RETI Points to Invalid Memory */ 207/* False Hardware Error when RETI Points to Invalid Memory */
208#define ANOMALY_05000461 (1) 208#define ANOMALY_05000461 (1)
209/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
210#define ANOMALY_05000462 (1)
211/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
212#define ANOMALY_05000471 (1)
209/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 213/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
210#define ANOMALY_05000473 (1) 214#define ANOMALY_05000473 (1)
211/* Possible Lockup Condition whem Modifying PLL from External Memory */ 215/* Possible Lockup Condition whem Modifying PLL from External Memory */
@@ -351,12 +355,14 @@
351#define ANOMALY_05000362 (1) 355#define ANOMALY_05000362 (1)
352#define ANOMALY_05000364 (0) 356#define ANOMALY_05000364 (0)
353#define ANOMALY_05000380 (0) 357#define ANOMALY_05000380 (0)
358#define ANOMALY_05000383 (0)
354#define ANOMALY_05000386 (1) 359#define ANOMALY_05000386 (1)
355#define ANOMALY_05000389 (0) 360#define ANOMALY_05000389 (0)
356#define ANOMALY_05000412 (0) 361#define ANOMALY_05000412 (0)
357#define ANOMALY_05000430 (0) 362#define ANOMALY_05000430 (0)
358#define ANOMALY_05000432 (0) 363#define ANOMALY_05000432 (0)
359#define ANOMALY_05000435 (0) 364#define ANOMALY_05000435 (0)
365#define ANOMALY_05000440 (0)
360#define ANOMALY_05000447 (0) 366#define ANOMALY_05000447 (0)
361#define ANOMALY_05000448 (0) 367#define ANOMALY_05000448 (0)
362#define ANOMALY_05000456 (0) 368#define ANOMALY_05000456 (0)
@@ -364,6 +370,7 @@
364#define ANOMALY_05000465 (0) 370#define ANOMALY_05000465 (0)
365#define ANOMALY_05000467 (0) 371#define ANOMALY_05000467 (0)
366#define ANOMALY_05000474 (0) 372#define ANOMALY_05000474 (0)
373#define ANOMALY_05000480 (0)
367#define ANOMALY_05000485 (0) 374#define ANOMALY_05000485 (0)
368 375
369#endif 376#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/irq.h b/arch/blackfin/mach-bf533/include/mach/irq.h
index 1f7e9765d954..709733754142 100644
--- a/arch/blackfin/mach-bf533/include/mach/irq.h
+++ b/arch/blackfin/mach-bf533/include/mach/irq.h
@@ -7,83 +7,36 @@
7#ifndef _BF533_IRQ_H_ 7#ifndef _BF533_IRQ_H_
8#define _BF533_IRQ_H_ 8#define _BF533_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions
12 Event Source Core Event Name
13Core Emulation **
14 Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21 PLL Wakeup Interrupt IVG7 7
22 DMA Error (generic) IVG7 8
23 PPI Error Interrupt IVG7 9
24 SPORT0 Error Interrupt IVG7 10
25 SPORT1 Error Interrupt IVG7 11
26 SPI Error Interrupt IVG7 12
27 UART Error Interrupt IVG7 13
28 RTC Interrupt IVG8 14
29 DMA0 Interrupt (PPI) IVG8 15
30 DMA1 (SPORT0 RX) IVG9 16
31 DMA2 (SPORT0 TX) IVG9 17
32 DMA3 (SPORT1 RX) IVG9 18
33 DMA4 (SPORT1 TX) IVG9 19
34 DMA5 (PPI) IVG10 20
35 DMA6 (UART RX) IVG10 21
36 DMA7 (UART TX) IVG10 22
37 Timer0 IVG11 23
38 Timer1 IVG11 24
39 Timer2 IVG11 25
40 PF Interrupt A IVG12 26
41 PF Interrupt B IVG12 27
42 DMA8/9 Interrupt IVG13 28
43 DMA10/11 Interrupt IVG13 29
44 Watchdog Timer IVG13 30
45 11
46 Softirq IVG14 31 12#define NR_PERI_INTS 24
47 System Call --
48 (lowest priority) IVG15 32 *
49 */
50#define SYS_IRQS 31
51#define NR_PERI_INTS 24
52 13
53/* The ABSTRACT IRQ definitions */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
54/** the first seven of the following are fixed, the rest you change if you need to **/ 15#define IRQ_DMA_ERROR BFIN_IRQ(1) /* DMA Error (general) */
55#define IRQ_EMU 0 /*Emulation */ 16#define IRQ_PPI_ERROR BFIN_IRQ(2) /* PPI Error Interrupt */
56#define IRQ_RST 1 /*reset */ 17#define IRQ_SPORT0_ERROR BFIN_IRQ(3) /* SPORT0 Error Interrupt */
57#define IRQ_NMI 2 /*Non Maskable */ 18#define IRQ_SPORT1_ERROR BFIN_IRQ(4) /* SPORT1 Error Interrupt */
58#define IRQ_EVX 3 /*Exception */ 19#define IRQ_SPI_ERROR BFIN_IRQ(5) /* SPI Error Interrupt */
59#define IRQ_UNUSED 4 /*- unused interrupt*/ 20#define IRQ_UART0_ERROR BFIN_IRQ(6) /* UART Error Interrupt */
60#define IRQ_HWERR 5 /*Hardware Error */ 21#define IRQ_RTC BFIN_IRQ(7) /* RTC Interrupt */
61#define IRQ_CORETMR 6 /*Core timer */ 22#define IRQ_PPI BFIN_IRQ(8) /* DMA0 Interrupt (PPI) */
23#define IRQ_SPORT0_RX BFIN_IRQ(9) /* DMA1 Interrupt (SPORT0 RX) */
24#define IRQ_SPORT0_TX BFIN_IRQ(10) /* DMA2 Interrupt (SPORT0 TX) */
25#define IRQ_SPORT1_RX BFIN_IRQ(11) /* DMA3 Interrupt (SPORT1 RX) */
26#define IRQ_SPORT1_TX BFIN_IRQ(12) /* DMA4 Interrupt (SPORT1 TX) */
27#define IRQ_SPI BFIN_IRQ(13) /* DMA5 Interrupt (SPI) */
28#define IRQ_UART0_RX BFIN_IRQ(14) /* DMA6 Interrupt (UART RX) */
29#define IRQ_UART0_TX BFIN_IRQ(15) /* DMA7 Interrupt (UART TX) */
30#define IRQ_TIMER0 BFIN_IRQ(16) /* Timer 0 */
31#define IRQ_TIMER1 BFIN_IRQ(17) /* Timer 1 */
32#define IRQ_TIMER2 BFIN_IRQ(18) /* Timer 2 */
33#define IRQ_PROG_INTA BFIN_IRQ(19) /* Programmable Flags A (8) */
34#define IRQ_PROG_INTB BFIN_IRQ(20) /* Programmable Flags B (8) */
35#define IRQ_MEM_DMA0 BFIN_IRQ(21) /* DMA8/9 Interrupt (Memory DMA Stream 0) */
36#define IRQ_MEM_DMA1 BFIN_IRQ(22) /* DMA10/11 Interrupt (Memory DMA Stream 1) */
37#define IRQ_WATCH BFIN_IRQ(23) /* Watch Dog Timer */
62 38
63#define IRQ_PLL_WAKEUP 7 /*PLL Wakeup Interrupt */ 39#define SYS_IRQS 31
64#define IRQ_DMA_ERROR 8 /*DMA Error (general) */
65#define IRQ_PPI_ERROR 9 /*PPI Error Interrupt */
66#define IRQ_SPORT0_ERROR 10 /*SPORT0 Error Interrupt */
67#define IRQ_SPORT1_ERROR 11 /*SPORT1 Error Interrupt */
68#define IRQ_SPI_ERROR 12 /*SPI Error Interrupt */
69#define IRQ_UART0_ERROR 13 /*UART Error Interrupt */
70#define IRQ_RTC 14 /*RTC Interrupt */
71#define IRQ_PPI 15 /*DMA0 Interrupt (PPI) */
72#define IRQ_SPORT0_RX 16 /*DMA1 Interrupt (SPORT0 RX) */
73#define IRQ_SPORT0_TX 17 /*DMA2 Interrupt (SPORT0 TX) */
74#define IRQ_SPORT1_RX 18 /*DMA3 Interrupt (SPORT1 RX) */
75#define IRQ_SPORT1_TX 19 /*DMA4 Interrupt (SPORT1 TX) */
76#define IRQ_SPI 20 /*DMA5 Interrupt (SPI) */
77#define IRQ_UART0_RX 21 /*DMA6 Interrupt (UART RX) */
78#define IRQ_UART0_TX 22 /*DMA7 Interrupt (UART TX) */
79#define IRQ_TIMER0 23 /*Timer 0 */
80#define IRQ_TIMER1 24 /*Timer 1 */
81#define IRQ_TIMER2 25 /*Timer 2 */
82#define IRQ_PROG_INTA 26 /*Programmable Flags A (8) */
83#define IRQ_PROG_INTB 27 /*Programmable Flags B (8) */
84#define IRQ_MEM_DMA0 28 /*DMA8/9 Interrupt (Memory DMA Stream 0) */
85#define IRQ_MEM_DMA1 29 /*DMA10/11 Interrupt (Memory DMA Stream 1) */
86#define IRQ_WATCH 30 /*Watch Dog Timer */
87 40
88#define IRQ_PF0 33 41#define IRQ_PF0 33
89#define IRQ_PF1 34 42#define IRQ_PF1 34
@@ -105,46 +58,35 @@ Core Emulation **
105#define GPIO_IRQ_BASE IRQ_PF0 58#define GPIO_IRQ_BASE IRQ_PF0
106 59
107#define NR_MACH_IRQS (IRQ_PF15 + 1) 60#define NR_MACH_IRQS (IRQ_PF15 + 1)
108#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
109
110#define IVG7 7
111#define IVG8 8
112#define IVG9 9
113#define IVG10 10
114#define IVG11 11
115#define IVG12 12
116#define IVG13 13
117#define IVG14 14
118#define IVG15 15
119 61
120/* IAR0 BIT FIELDS*/ 62/* IAR0 BIT FIELDS */
121#define RTC_ERROR_POS 28 63#define RTC_ERROR_POS 28
122#define UART_ERROR_POS 24 64#define UART_ERROR_POS 24
123#define SPORT1_ERROR_POS 20 65#define SPORT1_ERROR_POS 20
124#define SPI_ERROR_POS 16 66#define SPI_ERROR_POS 16
125#define SPORT0_ERROR_POS 12 67#define SPORT0_ERROR_POS 12
126#define PPI_ERROR_POS 8 68#define PPI_ERROR_POS 8
127#define DMA_ERROR_POS 4 69#define DMA_ERROR_POS 4
128#define PLLWAKE_ERROR_POS 0 70#define PLLWAKE_ERROR_POS 0
129 71
130/* IAR1 BIT FIELDS*/ 72/* IAR1 BIT FIELDS */
131#define DMA7_UARTTX_POS 28 73#define DMA7_UARTTX_POS 28
132#define DMA6_UARTRX_POS 24 74#define DMA6_UARTRX_POS 24
133#define DMA5_SPI_POS 20 75#define DMA5_SPI_POS 20
134#define DMA4_SPORT1TX_POS 16 76#define DMA4_SPORT1TX_POS 16
135#define DMA3_SPORT1RX_POS 12 77#define DMA3_SPORT1RX_POS 12
136#define DMA2_SPORT0TX_POS 8 78#define DMA2_SPORT0TX_POS 8
137#define DMA1_SPORT0RX_POS 4 79#define DMA1_SPORT0RX_POS 4
138#define DMA0_PPI_POS 0 80#define DMA0_PPI_POS 0
139 81
140/* IAR2 BIT FIELDS*/ 82/* IAR2 BIT FIELDS */
141#define WDTIMER_POS 28 83#define WDTIMER_POS 28
142#define MEMDMA1_POS 24 84#define MEMDMA1_POS 24
143#define MEMDMA0_POS 20 85#define MEMDMA0_POS 20
144#define PFB_POS 16 86#define PFB_POS 16
145#define PFA_POS 12 87#define PFA_POS 12
146#define TIMER2_POS 8 88#define TIMER2_POS 8
147#define TIMER1_POS 4 89#define TIMER1_POS 4
148#define TIMER0_POS 0 90#define TIMER0_POS 0
149 91
150#endif /* _BF533_IRQ_H_ */ 92#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 3fa335405b31..e16dc4560048 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -35,6 +35,7 @@
35#include <asm/reboot.h> 35#include <asm/reboot.h>
36#include <asm/portmux.h> 36#include <asm/portmux.h>
37#include <asm/dpmc.h> 37#include <asm/dpmc.h>
38#include <asm/bfin_sport.h>
38#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE 39#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
39#include <linux/regulator/fixed.h> 40#include <linux/regulator/fixed.h>
40#endif 41#endif
@@ -2585,27 +2586,103 @@ static struct platform_device bfin_dpmc = {
2585 }, 2586 },
2586}; 2587};
2587 2588
2588#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2589#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
2590 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
2591 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
2592
2593#define SPORT_REQ(x) \
2594 [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
2595 P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0}
2596
2597static const u16 bfin_snd_pin[][7] = {
2598 SPORT_REQ(0),
2599 SPORT_REQ(1),
2600};
2601
2602static struct bfin_snd_platform_data bfin_snd_data[] = {
2603 {
2604 .pin_req = &bfin_snd_pin[0][0],
2605 },
2606 {
2607 .pin_req = &bfin_snd_pin[1][0],
2608 },
2609};
2610
2611#define BFIN_SND_RES(x) \
2612 [x] = { \
2613 { \
2614 .start = SPORT##x##_TCR1, \
2615 .end = SPORT##x##_TCR1, \
2616 .flags = IORESOURCE_MEM \
2617 }, \
2618 { \
2619 .start = CH_SPORT##x##_RX, \
2620 .end = CH_SPORT##x##_RX, \
2621 .flags = IORESOURCE_DMA, \
2622 }, \
2623 { \
2624 .start = CH_SPORT##x##_TX, \
2625 .end = CH_SPORT##x##_TX, \
2626 .flags = IORESOURCE_DMA, \
2627 }, \
2628 { \
2629 .start = IRQ_SPORT##x##_ERROR, \
2630 .end = IRQ_SPORT##x##_ERROR, \
2631 .flags = IORESOURCE_IRQ, \
2632 } \
2633 }
2634
2635static struct resource bfin_snd_resources[][4] = {
2636 BFIN_SND_RES(0),
2637 BFIN_SND_RES(1),
2638};
2639
2640static struct platform_device bfin_pcm = {
2641 .name = "bfin-pcm-audio",
2642 .id = -1,
2643};
2644#endif
2645
2646#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
2647static struct platform_device bfin_ad73311_codec_device = {
2648 .name = "ad73311",
2649 .id = -1,
2650};
2651#endif
2652
2653#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
2589static struct platform_device bfin_i2s = { 2654static struct platform_device bfin_i2s = {
2590 .name = "bfin-i2s", 2655 .name = "bfin-i2s",
2591 .id = CONFIG_SND_BF5XX_SPORT_NUM, 2656 .id = CONFIG_SND_BF5XX_SPORT_NUM,
2592 /* TODO: add platform data here */ 2657 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
2658 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
2659 .dev = {
2660 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
2661 },
2593}; 2662};
2594#endif 2663#endif
2595 2664
2596#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 2665#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
2597static struct platform_device bfin_tdm = { 2666static struct platform_device bfin_tdm = {
2598 .name = "bfin-tdm", 2667 .name = "bfin-tdm",
2599 .id = CONFIG_SND_BF5XX_SPORT_NUM, 2668 .id = CONFIG_SND_BF5XX_SPORT_NUM,
2600 /* TODO: add platform data here */ 2669 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
2670 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
2671 .dev = {
2672 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
2673 },
2601}; 2674};
2602#endif 2675#endif
2603 2676
2604#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2677#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
2605static struct platform_device bfin_ac97 = { 2678static struct platform_device bfin_ac97 = {
2606 .name = "bfin-ac97", 2679 .name = "bfin-ac97",
2607 .id = CONFIG_SND_BF5XX_SPORT_NUM, 2680 .id = CONFIG_SND_BF5XX_SPORT_NUM,
2608 /* TODO: add platform data here */ 2681 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
2682 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
2683 .dev = {
2684 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
2685 },
2609}; 2686};
2610#endif 2687#endif
2611 2688
@@ -2796,17 +2873,28 @@ static struct platform_device *stamp_devices[] __initdata = {
2796 &stamp_flash_device, 2873 &stamp_flash_device,
2797#endif 2874#endif
2798 2875
2799#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2876#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
2877 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
2878 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
2879 &bfin_pcm,
2880#endif
2881
2882#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
2883 &bfin_ad73311_codec_device,
2884#endif
2885
2886#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
2800 &bfin_i2s, 2887 &bfin_i2s,
2801#endif 2888#endif
2802 2889
2803#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 2890#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
2804 &bfin_tdm, 2891 &bfin_tdm,
2805#endif 2892#endif
2806 2893
2807#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2894#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
2808 &bfin_ac97, 2895 &bfin_ac97,
2809#endif 2896#endif
2897
2810#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) 2898#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
2811#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \ 2899#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \
2812 defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE) 2900 defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index 43df6afd22ad..7f8e5a9f5db6 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -5,13 +5,13 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision D, 09/18/2008; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List 14 * - Revision E, 05/25/2010; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -160,12 +160,16 @@
160#define ANOMALY_05000443 (1) 160#define ANOMALY_05000443 (1)
161/* False Hardware Error when RETI Points to Invalid Memory */ 161/* False Hardware Error when RETI Points to Invalid Memory */
162#define ANOMALY_05000461 (1) 162#define ANOMALY_05000461 (1)
163/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
164#define ANOMALY_05000462 (1)
163/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 165/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
164#define ANOMALY_05000473 (1) 166#define ANOMALY_05000473 (1)
165/* Possible Lockup Condition whem Modifying PLL from External Memory */ 167/* Possible Lockup Condition whem Modifying PLL from External Memory */
166#define ANOMALY_05000475 (1) 168#define ANOMALY_05000475 (1)
167/* TESTSET Instruction Cannot Be Interrupted */ 169/* TESTSET Instruction Cannot Be Interrupted */
168#define ANOMALY_05000477 (1) 170#define ANOMALY_05000477 (1)
171/* Multiple Simultaneous Urgent DMA Requests May Cause DMA System Instability */
172#define ANOMALY_05000480 (__SILICON_REVISION__ < 3)
169/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 173/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
170#define ANOMALY_05000481 (1) 174#define ANOMALY_05000481 (1)
171/* IFLUSH sucks at life */ 175/* IFLUSH sucks at life */
@@ -204,6 +208,7 @@
204#define ANOMALY_05000363 (0) 208#define ANOMALY_05000363 (0)
205#define ANOMALY_05000364 (0) 209#define ANOMALY_05000364 (0)
206#define ANOMALY_05000380 (0) 210#define ANOMALY_05000380 (0)
211#define ANOMALY_05000383 (0)
207#define ANOMALY_05000386 (1) 212#define ANOMALY_05000386 (1)
208#define ANOMALY_05000389 (0) 213#define ANOMALY_05000389 (0)
209#define ANOMALY_05000400 (0) 214#define ANOMALY_05000400 (0)
@@ -211,6 +216,7 @@
211#define ANOMALY_05000430 (0) 216#define ANOMALY_05000430 (0)
212#define ANOMALY_05000432 (0) 217#define ANOMALY_05000432 (0)
213#define ANOMALY_05000435 (0) 218#define ANOMALY_05000435 (0)
219#define ANOMALY_05000440 (0)
214#define ANOMALY_05000447 (0) 220#define ANOMALY_05000447 (0)
215#define ANOMALY_05000448 (0) 221#define ANOMALY_05000448 (0)
216#define ANOMALY_05000456 (0) 222#define ANOMALY_05000456 (0)
diff --git a/arch/blackfin/mach-bf537/include/mach/irq.h b/arch/blackfin/mach-bf537/include/mach/irq.h
index 1a6d617c5fcf..b6ed8235bda4 100644
--- a/arch/blackfin/mach-bf537/include/mach/irq.h
+++ b/arch/blackfin/mach-bf537/include/mach/irq.h
@@ -7,193 +7,178 @@
7#ifndef _BF537_IRQ_H_ 7#ifndef _BF537_IRQ_H_
8#define _BF537_IRQ_H_ 8#define _BF537_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions 11
12 * Event Source Core Event Name 12#define NR_PERI_INTS 32
13 * Core Emulation ** 13
14 * Events (highest priority) EMU 0 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
15 * Reset RST 1 15#define IRQ_DMA_ERROR BFIN_IRQ(1) /* DMA Error (general) */
16 * NMI NMI 2 16#define IRQ_GENERIC_ERROR BFIN_IRQ(2) /* GENERIC Error Interrupt */
17 * Exception EVX 3 17#define IRQ_RTC BFIN_IRQ(3) /* RTC Interrupt */
18 * Reserved -- 4 18#define IRQ_PPI BFIN_IRQ(4) /* DMA0 Interrupt (PPI) */
19 * Hardware Error IVHW 5 19#define IRQ_SPORT0_RX BFIN_IRQ(5) /* DMA3 Interrupt (SPORT0 RX) */
20 * Core Timer IVTMR 6 20#define IRQ_SPORT0_TX BFIN_IRQ(6) /* DMA4 Interrupt (SPORT0 TX) */
21 * ..... 21#define IRQ_SPORT1_RX BFIN_IRQ(7) /* DMA5 Interrupt (SPORT1 RX) */
22 * 22#define IRQ_SPORT1_TX BFIN_IRQ(8) /* DMA6 Interrupt (SPORT1 TX) */
23 * Softirq IVG14 23#define IRQ_TWI BFIN_IRQ(9) /* TWI Interrupt */
24 * System Call -- 24#define IRQ_SPI BFIN_IRQ(10) /* DMA7 Interrupt (SPI) */
25 * (lowest priority) IVG15 25#define IRQ_UART0_RX BFIN_IRQ(11) /* DMA8 Interrupt (UART0 RX) */
26 */ 26#define IRQ_UART0_TX BFIN_IRQ(12) /* DMA9 Interrupt (UART0 TX) */
27 27#define IRQ_UART1_RX BFIN_IRQ(13) /* DMA10 Interrupt (UART1 RX) */
28#define SYS_IRQS 39 28#define IRQ_UART1_TX BFIN_IRQ(14) /* DMA11 Interrupt (UART1 TX) */
29#define NR_PERI_INTS 32 29#define IRQ_CAN_RX BFIN_IRQ(15) /* CAN Receive Interrupt */
30 30#define IRQ_CAN_TX BFIN_IRQ(16) /* CAN Transmit Interrupt */
31/* The ABSTRACT IRQ definitions */ 31#define IRQ_PH_INTA_MAC_RX BFIN_IRQ(17) /* Port H Interrupt A & DMA1 Interrupt (Ethernet RX) */
32/** the first seven of the following are fixed, the rest you change if you need to **/ 32#define IRQ_PH_INTB_MAC_TX BFIN_IRQ(18) /* Port H Interrupt B & DMA2 Interrupt (Ethernet TX) */
33#define IRQ_EMU 0 /*Emulation */ 33#define IRQ_TIMER0 BFIN_IRQ(19) /* Timer 0 */
34#define IRQ_RST 1 /*reset */ 34#define IRQ_TIMER1 BFIN_IRQ(20) /* Timer 1 */
35#define IRQ_NMI 2 /*Non Maskable */ 35#define IRQ_TIMER2 BFIN_IRQ(21) /* Timer 2 */
36#define IRQ_EVX 3 /*Exception */ 36#define IRQ_TIMER3 BFIN_IRQ(22) /* Timer 3 */
37#define IRQ_UNUSED 4 /*- unused interrupt*/ 37#define IRQ_TIMER4 BFIN_IRQ(23) /* Timer 4 */
38#define IRQ_HWERR 5 /*Hardware Error */ 38#define IRQ_TIMER5 BFIN_IRQ(24) /* Timer 5 */
39#define IRQ_CORETMR 6 /*Core timer */ 39#define IRQ_TIMER6 BFIN_IRQ(25) /* Timer 6 */
40 40#define IRQ_TIMER7 BFIN_IRQ(26) /* Timer 7 */
41#define IRQ_PLL_WAKEUP 7 /*PLL Wakeup Interrupt */ 41#define IRQ_PF_INTA_PG_INTA BFIN_IRQ(27) /* Ports F&G Interrupt A */
42#define IRQ_DMA_ERROR 8 /*DMA Error (general) */ 42#define IRQ_PORTG_INTB BFIN_IRQ(28) /* Port G Interrupt B */
43#define IRQ_GENERIC_ERROR 9 /*GENERIC Error Interrupt */ 43#define IRQ_MEM_DMA0 BFIN_IRQ(29) /* (Memory DMA Stream 0) */
44#define IRQ_RTC 10 /*RTC Interrupt */ 44#define IRQ_MEM_DMA1 BFIN_IRQ(30) /* (Memory DMA Stream 1) */
45#define IRQ_PPI 11 /*DMA0 Interrupt (PPI) */ 45#define IRQ_PF_INTB_WATCH BFIN_IRQ(31) /* Watchdog & Port F Interrupt B */
46#define IRQ_SPORT0_RX 12 /*DMA3 Interrupt (SPORT0 RX) */ 46
47#define IRQ_SPORT0_TX 13 /*DMA4 Interrupt (SPORT0 TX) */ 47#define SYS_IRQS 39
48#define IRQ_SPORT1_RX 14 /*DMA5 Interrupt (SPORT1 RX) */ 48
49#define IRQ_SPORT1_TX 15 /*DMA6 Interrupt (SPORT1 TX) */ 49#define IRQ_PPI_ERROR 42 /* PPI Error Interrupt */
50#define IRQ_TWI 16 /*TWI Interrupt */ 50#define IRQ_CAN_ERROR 43 /* CAN Error Interrupt */
51#define IRQ_SPI 17 /*DMA7 Interrupt (SPI) */ 51#define IRQ_MAC_ERROR 44 /* MAC Status/Error Interrupt */
52#define IRQ_UART0_RX 18 /*DMA8 Interrupt (UART0 RX) */ 52#define IRQ_SPORT0_ERROR 45 /* SPORT0 Error Interrupt */
53#define IRQ_UART0_TX 19 /*DMA9 Interrupt (UART0 TX) */ 53#define IRQ_SPORT1_ERROR 46 /* SPORT1 Error Interrupt */
54#define IRQ_UART1_RX 20 /*DMA10 Interrupt (UART1 RX) */ 54#define IRQ_SPI_ERROR 47 /* SPI Error Interrupt */
55#define IRQ_UART1_TX 21 /*DMA11 Interrupt (UART1 TX) */ 55#define IRQ_UART0_ERROR 48 /* UART Error Interrupt */
56#define IRQ_CAN_RX 22 /*CAN Receive Interrupt */ 56#define IRQ_UART1_ERROR 49 /* UART Error Interrupt */
57#define IRQ_CAN_TX 23 /*CAN Transmit Interrupt */ 57
58#define IRQ_MAC_RX 24 /*DMA1 (Ethernet RX) Interrupt */ 58#define IRQ_PF0 50
59#define IRQ_MAC_TX 25 /*DMA2 (Ethernet TX) Interrupt */ 59#define IRQ_PF1 51
60#define IRQ_TIMER0 26 /*Timer 0 */ 60#define IRQ_PF2 52
61#define IRQ_TIMER1 27 /*Timer 1 */ 61#define IRQ_PF3 53
62#define IRQ_TIMER2 28 /*Timer 2 */ 62#define IRQ_PF4 54
63#define IRQ_TIMER3 29 /*Timer 3 */ 63#define IRQ_PF5 55
64#define IRQ_TIMER4 30 /*Timer 4 */ 64#define IRQ_PF6 56
65#define IRQ_TIMER5 31 /*Timer 5 */ 65#define IRQ_PF7 57
66#define IRQ_TIMER6 32 /*Timer 6 */ 66#define IRQ_PF8 58
67#define IRQ_TIMER7 33 /*Timer 7 */ 67#define IRQ_PF9 59
68#define IRQ_PROG_INTA 34 /* PF Ports F&G (PF15:0) Interrupt A */ 68#define IRQ_PF10 60
69#define IRQ_PORTG_INTB 35 /* PF Port G (PF15:0) Interrupt B */ 69#define IRQ_PF11 61
70#define IRQ_MEM_DMA0 36 /*(Memory DMA Stream 0) */ 70#define IRQ_PF12 62
71#define IRQ_MEM_DMA1 37 /*(Memory DMA Stream 1) */ 71#define IRQ_PF13 63
72#define IRQ_PROG_INTB 38 /* PF Ports F (PF15:0) Interrupt B */ 72#define IRQ_PF14 64
73#define IRQ_WATCH 38 /*Watch Dog Timer */ 73#define IRQ_PF15 65
74 74
75#define IRQ_PPI_ERROR 42 /*PPI Error Interrupt */ 75#define IRQ_PG0 66
76#define IRQ_CAN_ERROR 43 /*CAN Error Interrupt */ 76#define IRQ_PG1 67
77#define IRQ_MAC_ERROR 44 /*MAC Status/Error Interrupt */ 77#define IRQ_PG2 68
78#define IRQ_SPORT0_ERROR 45 /*SPORT0 Error Interrupt */ 78#define IRQ_PG3 69
79#define IRQ_SPORT1_ERROR 46 /*SPORT1 Error Interrupt */ 79#define IRQ_PG4 70
80#define IRQ_SPI_ERROR 47 /*SPI Error Interrupt */ 80#define IRQ_PG5 71
81#define IRQ_UART0_ERROR 48 /*UART Error Interrupt */ 81#define IRQ_PG6 72
82#define IRQ_UART1_ERROR 49 /*UART Error Interrupt */ 82#define IRQ_PG7 73
83 83#define IRQ_PG8 74
84#define IRQ_PF0 50 84#define IRQ_PG9 75
85#define IRQ_PF1 51 85#define IRQ_PG10 76
86#define IRQ_PF2 52 86#define IRQ_PG11 77
87#define IRQ_PF3 53 87#define IRQ_PG12 78
88#define IRQ_PF4 54 88#define IRQ_PG13 79
89#define IRQ_PF5 55 89#define IRQ_PG14 80
90#define IRQ_PF6 56 90#define IRQ_PG15 81
91#define IRQ_PF7 57 91
92#define IRQ_PF8 58 92#define IRQ_PH0 82
93#define IRQ_PF9 59 93#define IRQ_PH1 83
94#define IRQ_PF10 60 94#define IRQ_PH2 84
95#define IRQ_PF11 61 95#define IRQ_PH3 85
96#define IRQ_PF12 62 96#define IRQ_PH4 86
97#define IRQ_PF13 63 97#define IRQ_PH5 87
98#define IRQ_PF14 64 98#define IRQ_PH6 88
99#define IRQ_PF15 65 99#define IRQ_PH7 89
100 100#define IRQ_PH8 90
101#define IRQ_PG0 66 101#define IRQ_PH9 91
102#define IRQ_PG1 67 102#define IRQ_PH10 92
103#define IRQ_PG2 68 103#define IRQ_PH11 93
104#define IRQ_PG3 69 104#define IRQ_PH12 94
105#define IRQ_PG4 70 105#define IRQ_PH13 95
106#define IRQ_PG5 71 106#define IRQ_PH14 96
107#define IRQ_PG6 72 107#define IRQ_PH15 97
108#define IRQ_PG7 73 108
109#define IRQ_PG8 74 109#define GPIO_IRQ_BASE IRQ_PF0
110#define IRQ_PG9 75 110
111#define IRQ_PG10 76 111#define IRQ_MAC_PHYINT 98 /* PHY_INT Interrupt */
112#define IRQ_PG11 77 112#define IRQ_MAC_MMCINT 99 /* MMC Counter Interrupt */
113#define IRQ_PG12 78 113#define IRQ_MAC_RXFSINT 100 /* RX Frame-Status Interrupt */
114#define IRQ_PG13 79 114#define IRQ_MAC_TXFSINT 101 /* TX Frame-Status Interrupt */
115#define IRQ_PG14 80 115#define IRQ_MAC_WAKEDET 102 /* Wake-Up Interrupt */
116#define IRQ_PG15 81 116#define IRQ_MAC_RXDMAERR 103 /* RX DMA Direction Error Interrupt */
117 117#define IRQ_MAC_TXDMAERR 104 /* TX DMA Direction Error Interrupt */
118#define IRQ_PH0 82 118#define IRQ_MAC_STMDONE 105 /* Station Mgt. Transfer Done Interrupt */
119#define IRQ_PH1 83 119
120#define IRQ_PH2 84 120#define IRQ_MAC_RX 106 /* DMA1 Interrupt (Ethernet RX) */
121#define IRQ_PH3 85 121#define IRQ_PORTH_INTA 107 /* Port H Interrupt A */
122#define IRQ_PH4 86 122
123#define IRQ_PH5 87 123#if 0 /* No Interrupt B support (yet) */
124#define IRQ_PH6 88 124#define IRQ_MAC_TX 108 /* DMA2 Interrupt (Ethernet TX) */
125#define IRQ_PH7 89 125#define IRQ_PORTH_INTB 109 /* Port H Interrupt B */
126#define IRQ_PH8 90 126#else
127#define IRQ_PH9 91 127#define IRQ_MAC_TX IRQ_PH_INTB_MAC_TX
128#define IRQ_PH10 92 128#endif
129#define IRQ_PH11 93 129
130#define IRQ_PH12 94 130#define IRQ_PORTF_INTA 110 /* Port F Interrupt A */
131#define IRQ_PH13 95 131#define IRQ_PORTG_INTA 111 /* Port G Interrupt A */
132#define IRQ_PH14 96 132
133#define IRQ_PH15 97 133#if 0 /* No Interrupt B support (yet) */
134 134#define IRQ_WATCH 112 /* Watchdog Timer */
135#define GPIO_IRQ_BASE IRQ_PF0 135#define IRQ_PORTF_INTB 113 /* Port F Interrupt B */
136 136#else
137#define IRQ_MAC_PHYINT 98 /* PHY_INT Interrupt */ 137#define IRQ_WATCH IRQ_PF_INTB_WATCH
138#define IRQ_MAC_MMCINT 99 /* MMC Counter Interrupt */ 138#endif
139#define IRQ_MAC_RXFSINT 100 /* RX Frame-Status Interrupt */ 139
140#define IRQ_MAC_TXFSINT 101 /* TX Frame-Status Interrupt */ 140#define NR_MACH_IRQS (113 + 1)
141#define IRQ_MAC_WAKEDET 102 /* Wake-Up Interrupt */ 141
142#define IRQ_MAC_RXDMAERR 103 /* RX DMA Direction Error Interrupt */ 142/* IAR0 BIT FIELDS */
143#define IRQ_MAC_TXDMAERR 104 /* TX DMA Direction Error Interrupt */ 143#define IRQ_PLL_WAKEUP_POS 0
144#define IRQ_MAC_STMDONE 105 /* Station Mgt. Transfer Done Interrupt */ 144#define IRQ_DMA_ERROR_POS 4
145 145#define IRQ_ERROR_POS 8
146#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) 146#define IRQ_RTC_POS 12
147#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) 147#define IRQ_PPI_POS 16
148 148#define IRQ_SPORT0_RX_POS 20
149#define IVG7 7 149#define IRQ_SPORT0_TX_POS 24
150#define IVG8 8 150#define IRQ_SPORT1_RX_POS 28
151#define IVG9 9 151
152#define IVG10 10 152/* IAR1 BIT FIELDS */
153#define IVG11 11 153#define IRQ_SPORT1_TX_POS 0
154#define IVG12 12 154#define IRQ_TWI_POS 4
155#define IVG13 13 155#define IRQ_SPI_POS 8
156#define IVG14 14 156#define IRQ_UART0_RX_POS 12
157#define IVG15 15 157#define IRQ_UART0_TX_POS 16
158 158#define IRQ_UART1_RX_POS 20
159/* IAR0 BIT FIELDS*/ 159#define IRQ_UART1_TX_POS 24
160#define IRQ_PLL_WAKEUP_POS 0 160#define IRQ_CAN_RX_POS 28
161#define IRQ_DMA_ERROR_POS 4 161
162#define IRQ_ERROR_POS 8 162/* IAR2 BIT FIELDS */
163#define IRQ_RTC_POS 12 163#define IRQ_CAN_TX_POS 0
164#define IRQ_PPI_POS 16 164#define IRQ_MAC_RX_POS 4
165#define IRQ_SPORT0_RX_POS 20 165#define IRQ_MAC_TX_POS 8
166#define IRQ_SPORT0_TX_POS 24 166#define IRQ_TIMER0_POS 12
167#define IRQ_SPORT1_RX_POS 28 167#define IRQ_TIMER1_POS 16
168 168#define IRQ_TIMER2_POS 20
169/* IAR1 BIT FIELDS*/ 169#define IRQ_TIMER3_POS 24
170#define IRQ_SPORT1_TX_POS 0 170#define IRQ_TIMER4_POS 28
171#define IRQ_TWI_POS 4 171
172#define IRQ_SPI_POS 8 172/* IAR3 BIT FIELDS */
173#define IRQ_UART0_RX_POS 12 173#define IRQ_TIMER5_POS 0
174#define IRQ_UART0_TX_POS 16 174#define IRQ_TIMER6_POS 4
175#define IRQ_UART1_RX_POS 20 175#define IRQ_TIMER7_POS 8
176#define IRQ_UART1_TX_POS 24 176#define IRQ_PROG_INTA_POS 12
177#define IRQ_CAN_RX_POS 28 177#define IRQ_PORTG_INTB_POS 16
178 178#define IRQ_MEM_DMA0_POS 20
179/* IAR2 BIT FIELDS*/ 179#define IRQ_MEM_DMA1_POS 24
180#define IRQ_CAN_TX_POS 0 180#define IRQ_WATCH_POS 28
181#define IRQ_MAC_RX_POS 4 181
182#define IRQ_MAC_TX_POS 8 182#define init_mach_irq init_mach_irq
183#define IRQ_TIMER0_POS 12 183
184#define IRQ_TIMER1_POS 16 184#endif
185#define IRQ_TIMER2_POS 20
186#define IRQ_TIMER3_POS 24
187#define IRQ_TIMER4_POS 28
188
189/* IAR3 BIT FIELDS*/
190#define IRQ_TIMER5_POS 0
191#define IRQ_TIMER6_POS 4
192#define IRQ_TIMER7_POS 8
193#define IRQ_PROG_INTA_POS 12
194#define IRQ_PORTG_INTB_POS 16
195#define IRQ_MEM_DMA0_POS 20
196#define IRQ_MEM_DMA1_POS 24
197#define IRQ_WATCH_POS 28
198
199#endif /* _BF537_IRQ_H_ */
diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c
index f6500622b35d..2137a209a22b 100644
--- a/arch/blackfin/mach-bf537/ints-priority.c
+++ b/arch/blackfin/mach-bf537/ints-priority.c
@@ -10,6 +10,13 @@
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12 12
13#include <asm/irq_handler.h>
14#include <asm/bfin5xx_spi.h>
15#include <asm/bfin_sport.h>
16#include <asm/bfin_can.h>
17#include <asm/bfin_dma.h>
18#include <asm/dpmc.h>
19
13void __init program_IAR(void) 20void __init program_IAR(void)
14{ 21{
15 /* Program the IAR0 Register with the configured priority */ 22 /* Program the IAR0 Register with the configured priority */
@@ -51,3 +58,159 @@ void __init program_IAR(void)
51 58
52 SSYNC(); 59 SSYNC();
53} 60}
61
62#define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */
63#define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */
64#define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */
65#define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */
66#define UART_ERR_MASK (0x6) /* UART_IIR */
67#define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */
68
69static int error_int_mask;
70
71static void bf537_generic_error_mask_irq(struct irq_data *d)
72{
73 error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR));
74 if (!error_int_mask)
75 bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
76}
77
78static void bf537_generic_error_unmask_irq(struct irq_data *d)
79{
80 bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
81 error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR);
82}
83
84static struct irq_chip bf537_generic_error_irqchip = {
85 .name = "ERROR",
86 .irq_ack = bfin_ack_noop,
87 .irq_mask_ack = bf537_generic_error_mask_irq,
88 .irq_mask = bf537_generic_error_mask_irq,
89 .irq_unmask = bf537_generic_error_unmask_irq,
90};
91
92static void bf537_demux_error_irq(unsigned int int_err_irq,
93 struct irq_desc *inta_desc)
94{
95 int irq = 0;
96
97#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
98 if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
99 irq = IRQ_MAC_ERROR;
100 else
101#endif
102 if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
103 irq = IRQ_SPORT0_ERROR;
104 else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
105 irq = IRQ_SPORT1_ERROR;
106 else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
107 irq = IRQ_PPI_ERROR;
108 else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
109 irq = IRQ_CAN_ERROR;
110 else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
111 irq = IRQ_SPI_ERROR;
112 else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
113 irq = IRQ_UART0_ERROR;
114 else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
115 irq = IRQ_UART1_ERROR;
116
117 if (irq) {
118 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
119 bfin_handle_irq(irq);
120 else {
121
122 switch (irq) {
123 case IRQ_PPI_ERROR:
124 bfin_write_PPI_STATUS(PPI_ERR_MASK);
125 break;
126#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
127 case IRQ_MAC_ERROR:
128 bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
129 break;
130#endif
131 case IRQ_SPORT0_ERROR:
132 bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
133 break;
134
135 case IRQ_SPORT1_ERROR:
136 bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
137 break;
138
139 case IRQ_CAN_ERROR:
140 bfin_write_CAN_GIS(CAN_ERR_MASK);
141 break;
142
143 case IRQ_SPI_ERROR:
144 bfin_write_SPI_STAT(SPI_ERR_MASK);
145 break;
146
147 default:
148 break;
149 }
150
151 pr_debug("IRQ %d:"
152 " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
153 irq);
154 }
155 } else
156 pr_err("%s: IRQ ?: PERIPHERAL ERROR INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
157 __func__);
158
159}
160
161#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
162static int mac_rx_int_mask;
163
164static void bf537_mac_rx_mask_irq(struct irq_data *d)
165{
166 mac_rx_int_mask &= ~(1L << (d->irq - IRQ_MAC_RX));
167 if (!mac_rx_int_mask)
168 bfin_internal_mask_irq(IRQ_PH_INTA_MAC_RX);
169}
170
171static void bf537_mac_rx_unmask_irq(struct irq_data *d)
172{
173 bfin_internal_unmask_irq(IRQ_PH_INTA_MAC_RX);
174 mac_rx_int_mask |= 1L << (d->irq - IRQ_MAC_RX);
175}
176
177static struct irq_chip bf537_mac_rx_irqchip = {
178 .name = "ERROR",
179 .irq_ack = bfin_ack_noop,
180 .irq_mask_ack = bf537_mac_rx_mask_irq,
181 .irq_mask = bf537_mac_rx_mask_irq,
182 .irq_unmask = bf537_mac_rx_unmask_irq,
183};
184
185static void bf537_demux_mac_rx_irq(unsigned int int_irq,
186 struct irq_desc *desc)
187{
188 if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR))
189 bfin_handle_irq(IRQ_MAC_RX);
190 else
191 bfin_demux_gpio_irq(int_irq, desc);
192}
193#endif
194
195void __init init_mach_irq(void)
196{
197 int irq;
198
199#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
200 /* Clear EMAC Interrupt Status bits so we can demux it later */
201 bfin_write_EMAC_SYSTAT(-1);
202#endif
203
204 irq_set_chained_handler(IRQ_GENERIC_ERROR, bf537_demux_error_irq);
205 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
206 irq_set_chip_and_handler(irq, &bf537_generic_error_irqchip,
207 handle_level_irq);
208
209#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
210 irq_set_chained_handler(IRQ_PH_INTA_MAC_RX, bf537_demux_mac_rx_irq);
211 irq_set_chip_and_handler(IRQ_MAC_RX, &bf537_mac_rx_irqchip, handle_level_irq);
212 irq_set_chip_and_handler(IRQ_PORTH_INTA, &bf537_mac_rx_irqchip, handle_level_irq);
213
214 irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
215#endif
216}
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 8774b481c78e..55e7d0712a94 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -5,14 +5,14 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision H, 07/10/2009; ADSP-BF538/BF538F Blackfin Processor Anomaly List 14 * - Revision I, 05/25/2010; ADSP-BF538/BF538F Blackfin Processor Anomaly List
15 * - Revision M, 07/10/2009; ADSP-BF539/BF539F Blackfin Processor Anomaly List 15 * - Revision N, 05/25/2010; ADSP-BF539/BF539F Blackfin Processor Anomaly List
16 */ 16 */
17 17
18#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
@@ -179,6 +179,7 @@
179#define ANOMALY_05000363 (0) 179#define ANOMALY_05000363 (0)
180#define ANOMALY_05000364 (0) 180#define ANOMALY_05000364 (0)
181#define ANOMALY_05000380 (0) 181#define ANOMALY_05000380 (0)
182#define ANOMALY_05000383 (0)
182#define ANOMALY_05000386 (1) 183#define ANOMALY_05000386 (1)
183#define ANOMALY_05000389 (0) 184#define ANOMALY_05000389 (0)
184#define ANOMALY_05000400 (0) 185#define ANOMALY_05000400 (0)
@@ -186,6 +187,7 @@
186#define ANOMALY_05000430 (0) 187#define ANOMALY_05000430 (0)
187#define ANOMALY_05000432 (0) 188#define ANOMALY_05000432 (0)
188#define ANOMALY_05000435 (0) 189#define ANOMALY_05000435 (0)
190#define ANOMALY_05000440 (0)
189#define ANOMALY_05000447 (0) 191#define ANOMALY_05000447 (0)
190#define ANOMALY_05000448 (0) 192#define ANOMALY_05000448 (0)
191#define ANOMALY_05000456 (0) 193#define ANOMALY_05000456 (0)
@@ -193,6 +195,7 @@
193#define ANOMALY_05000465 (0) 195#define ANOMALY_05000465 (0)
194#define ANOMALY_05000467 (0) 196#define ANOMALY_05000467 (0)
195#define ANOMALY_05000474 (0) 197#define ANOMALY_05000474 (0)
198#define ANOMALY_05000480 (0)
196#define ANOMALY_05000485 (0) 199#define ANOMALY_05000485 (0)
197 200
198#endif 201#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/irq.h b/arch/blackfin/mach-bf538/include/mach/irq.h
index 7a479d224dc7..07ca069d37cd 100644
--- a/arch/blackfin/mach-bf538/include/mach/irq.h
+++ b/arch/blackfin/mach-bf538/include/mach/irq.h
@@ -7,38 +7,9 @@
7#ifndef _BF538_IRQ_H_ 7#ifndef _BF538_IRQ_H_
8#define _BF538_IRQ_H_ 8#define _BF538_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions 11
12 Event Source Core Event Name 12#define NR_PERI_INTS (2 * 32)
13 Core Emulation **
14 Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21
22 .....
23
24 Software Interrupt 1 IVG14 31
25 Software Interrupt 2 --
26 (lowest priority) IVG15 32 *
27*/
28
29#define NR_PERI_INTS (2 * 32)
30
31/* The ABSTRACT IRQ definitions */
32/** the first seven of the following are fixed, the rest you change if you need to **/
33#define IRQ_EMU 0 /* Emulation */
34#define IRQ_RST 1 /* reset */
35#define IRQ_NMI 2 /* Non Maskable */
36#define IRQ_EVX 3 /* Exception */
37#define IRQ_UNUSED 4 /* - unused interrupt */
38#define IRQ_HWERR 5 /* Hardware Error */
39#define IRQ_CORETMR 6 /* Core timer */
40
41#define BFIN_IRQ(x) ((x) + 7)
42 13
43#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
44#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */ 15#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */
@@ -91,37 +62,26 @@
91 62
92#define SYS_IRQS BFIN_IRQ(63) /* 70 */ 63#define SYS_IRQS BFIN_IRQ(63) /* 70 */
93 64
94#define IRQ_PF0 71 65#define IRQ_PF0 71
95#define IRQ_PF1 72 66#define IRQ_PF1 72
96#define IRQ_PF2 73 67#define IRQ_PF2 73
97#define IRQ_PF3 74 68#define IRQ_PF3 74
98#define IRQ_PF4 75 69#define IRQ_PF4 75
99#define IRQ_PF5 76 70#define IRQ_PF5 76
100#define IRQ_PF6 77 71#define IRQ_PF6 77
101#define IRQ_PF7 78 72#define IRQ_PF7 78
102#define IRQ_PF8 79 73#define IRQ_PF8 79
103#define IRQ_PF9 80 74#define IRQ_PF9 80
104#define IRQ_PF10 81 75#define IRQ_PF10 81
105#define IRQ_PF11 82 76#define IRQ_PF11 82
106#define IRQ_PF12 83 77#define IRQ_PF12 83
107#define IRQ_PF13 84 78#define IRQ_PF13 84
108#define IRQ_PF14 85 79#define IRQ_PF14 85
109#define IRQ_PF15 86 80#define IRQ_PF15 86
110 81
111#define GPIO_IRQ_BASE IRQ_PF0 82#define GPIO_IRQ_BASE IRQ_PF0
112 83
113#define NR_MACH_IRQS (IRQ_PF15 + 1) 84#define NR_MACH_IRQS (IRQ_PF15 + 1)
114#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
115
116#define IVG7 7
117#define IVG8 8
118#define IVG9 9
119#define IVG10 10
120#define IVG11 11
121#define IVG12 12
122#define IVG13 13
123#define IVG14 14
124#define IVG15 15
125 85
126/* IAR0 BIT FIELDS */ 86/* IAR0 BIT FIELDS */
127#define IRQ_PLL_WAKEUP_POS 0 87#define IRQ_PLL_WAKEUP_POS 0
@@ -184,4 +144,5 @@
184#define IRQ_CAN_TX_POS 0 144#define IRQ_CAN_TX_POS 0
185#define IRQ_MEM1_DMA0_POS 4 145#define IRQ_MEM1_DMA0_POS 4
186#define IRQ_MEM1_DMA1_POS 8 146#define IRQ_MEM1_DMA1_POS 8
187#endif /* _BF538_IRQ_H_ */ 147
148#endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 93e19a54a880..311bf9970fe7 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -22,6 +22,7 @@
22#include <asm/gpio.h> 22#include <asm/gpio.h>
23#include <asm/nand.h> 23#include <asm/nand.h>
24#include <asm/dpmc.h> 24#include <asm/dpmc.h>
25#include <asm/bfin_sport.h>
25#include <asm/portmux.h> 26#include <asm/portmux.h>
26#include <asm/bfin_sdh.h> 27#include <asm/bfin_sdh.h>
27#include <mach/bf54x_keys.h> 28#include <mach/bf54x_keys.h>
@@ -956,7 +957,15 @@ static struct mtd_partition ezkit_partitions[] = {
956 .offset = MTDPART_OFS_APPEND, 957 .offset = MTDPART_OFS_APPEND,
957 }, { 958 }, {
958 .name = "file system(nor)", 959 .name = "file system(nor)",
959 .size = MTDPART_SIZ_FULL, 960 .size = 0x1000000 - 0x80000 - 0x400000 - 0x8000 * 4,
961 .offset = MTDPART_OFS_APPEND,
962 }, {
963 .name = "config(nor)",
964 .size = 0x8000 * 3,
965 .offset = MTDPART_OFS_APPEND,
966 }, {
967 .name = "u-boot env(nor)",
968 .size = 0x8000,
960 .offset = MTDPART_OFS_APPEND, 969 .offset = MTDPART_OFS_APPEND,
961 } 970 }
962}; 971};
@@ -1312,27 +1321,110 @@ static struct platform_device bfin_dpmc = {
1312 }, 1321 },
1313}; 1322};
1314 1323
1315#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1324#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
1325 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
1326 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
1327
1328#define SPORT_REQ(x) \
1329 [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
1330 P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0}
1331
1332static const u16 bfin_snd_pin[][7] = {
1333 SPORT_REQ(0),
1334 SPORT_REQ(1),
1335};
1336
1337static struct bfin_snd_platform_data bfin_snd_data[] = {
1338 {
1339 .pin_req = &bfin_snd_pin[0][0],
1340 },
1341 {
1342 .pin_req = &bfin_snd_pin[1][0],
1343 },
1344};
1345
1346#define BFIN_SND_RES(x) \
1347 [x] = { \
1348 { \
1349 .start = SPORT##x##_TCR1, \
1350 .end = SPORT##x##_TCR1, \
1351 .flags = IORESOURCE_MEM \
1352 }, \
1353 { \
1354 .start = CH_SPORT##x##_RX, \
1355 .end = CH_SPORT##x##_RX, \
1356 .flags = IORESOURCE_DMA, \
1357 }, \
1358 { \
1359 .start = CH_SPORT##x##_TX, \
1360 .end = CH_SPORT##x##_TX, \
1361 .flags = IORESOURCE_DMA, \
1362 }, \
1363 { \
1364 .start = IRQ_SPORT##x##_ERROR, \
1365 .end = IRQ_SPORT##x##_ERROR, \
1366 .flags = IORESOURCE_IRQ, \
1367 } \
1368 }
1369
1370static struct resource bfin_snd_resources[][4] = {
1371 BFIN_SND_RES(0),
1372 BFIN_SND_RES(1),
1373};
1374
1375static struct platform_device bfin_pcm = {
1376 .name = "bfin-pcm-audio",
1377 .id = -1,
1378};
1379#endif
1380
1381#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
1382static struct platform_device bfin_ad73311_codec_device = {
1383 .name = "ad73311",
1384 .id = -1,
1385};
1386#endif
1387
1388#if defined(CONFIG_SND_BF5XX_SOC_AD1980) || defined(CONFIG_SND_BF5XX_SOC_AD1980_MODULE)
1389static struct platform_device bfin_ad1980_codec_device = {
1390 .name = "ad1980",
1391 .id = -1,
1392};
1393#endif
1394
1395#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
1316static struct platform_device bfin_i2s = { 1396static struct platform_device bfin_i2s = {
1317 .name = "bfin-i2s", 1397 .name = "bfin-i2s",
1318 .id = CONFIG_SND_BF5XX_SPORT_NUM, 1398 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1319 /* TODO: add platform data here */ 1399 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
1400 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
1401 .dev = {
1402 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
1403 },
1320}; 1404};
1321#endif 1405#endif
1322 1406
1323#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 1407#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
1324static struct platform_device bfin_tdm = { 1408static struct platform_device bfin_tdm = {
1325 .name = "bfin-tdm", 1409 .name = "bfin-tdm",
1326 .id = CONFIG_SND_BF5XX_SPORT_NUM, 1410 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1327 /* TODO: add platform data here */ 1411 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
1412 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
1413 .dev = {
1414 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
1415 },
1328}; 1416};
1329#endif 1417#endif
1330 1418
1331#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 1419#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
1332static struct platform_device bfin_ac97 = { 1420static struct platform_device bfin_ac97 = {
1333 .name = "bfin-ac97", 1421 .name = "bfin-ac97",
1334 .id = CONFIG_SND_BF5XX_SPORT_NUM, 1422 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1335 /* TODO: add platform data here */ 1423 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
1424 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
1425 .dev = {
1426 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
1427 },
1336}; 1428};
1337#endif 1429#endif
1338 1430
@@ -1450,6 +1542,16 @@ static struct platform_device *ezkit_devices[] __initdata = {
1450 &ezkit_flash_device, 1542 &ezkit_flash_device,
1451#endif 1543#endif
1452 1544
1545#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
1546 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
1547 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
1548 &bfin_pcm,
1549#endif
1550
1551#if defined(CONFIG_SND_BF5XX_SOC_AD1980) || defined(CONFIG_SND_BF5XX_SOC_AD1980_MODULE)
1552 &bfin_ad1980_codec_device,
1553#endif
1554
1453#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1555#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
1454 &bfin_i2s, 1556 &bfin_i2s,
1455#endif 1557#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index ffd0537295ac..9e70785bdde3 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -5,13 +5,13 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision I, 07/23/2009; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List 14 * - Revision J, 06/03/2010; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -220,6 +220,8 @@
220#define ANOMALY_05000481 (1) 220#define ANOMALY_05000481 (1)
221/* Possible USB Data Corruption When Multiple Endpoints Are Accessed by the Core */ 221/* Possible USB Data Corruption When Multiple Endpoints Are Accessed by the Core */
222#define ANOMALY_05000483 (1) 222#define ANOMALY_05000483 (1)
223/* DDR Trim May Not Be Performed for Certain VLEV Values in OTP Page PBS00L */
224#define ANOMALY_05000484 (__SILICON_REVISION__ < 3)
223/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */ 225/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
224#define ANOMALY_05000485 (__SILICON_REVISION__ >= 2) 226#define ANOMALY_05000485 (__SILICON_REVISION__ >= 2)
225/* IFLUSH sucks at life */ 227/* IFLUSH sucks at life */
@@ -274,6 +276,8 @@
274#define ANOMALY_05000412 (0) 276#define ANOMALY_05000412 (0)
275#define ANOMALY_05000432 (0) 277#define ANOMALY_05000432 (0)
276#define ANOMALY_05000435 (0) 278#define ANOMALY_05000435 (0)
279#define ANOMALY_05000440 (0)
277#define ANOMALY_05000475 (0) 280#define ANOMALY_05000475 (0)
281#define ANOMALY_05000480 (0)
278 282
279#endif 283#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 7f87787e7738..533b8095b540 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -7,38 +7,9 @@
7#ifndef _BF548_IRQ_H_ 7#ifndef _BF548_IRQ_H_
8#define _BF548_IRQ_H_ 8#define _BF548_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions
12 Event Source Core Event Name
13Core Emulation **
14Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21
22.....
23
24 Software Interrupt 1 IVG14 31
25 Software Interrupt 2 --
26 (lowest priority) IVG15 32 *
27 */
28
29#define NR_PERI_INTS (32 * 3)
30
31/* The ABSTRACT IRQ definitions */
32/** the first seven of the following are fixed, the rest you change if you need to **/
33#define IRQ_EMU 0 /* Emulation */
34#define IRQ_RST 1 /* reset */
35#define IRQ_NMI 2 /* Non Maskable */
36#define IRQ_EVX 3 /* Exception */
37#define IRQ_UNUSED 4 /* - unused interrupt*/
38#define IRQ_HWERR 5 /* Hardware Error */
39#define IRQ_CORETMR 6 /* Core timer */
40 11
41#define BFIN_IRQ(x) ((x) + 7) 12#define NR_PERI_INTS (3 * 32)
42 13
43#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
44#define IRQ_DMAC0_ERROR BFIN_IRQ(1) /* DMAC0 Status Interrupt */ 15#define IRQ_DMAC0_ERROR BFIN_IRQ(1) /* DMAC0 Status Interrupt */
@@ -311,49 +282,37 @@ Events (highest priority) EMU 0
311#define IRQ_PJ14 BFIN_PJ_IRQ(14) /* N/A */ 282#define IRQ_PJ14 BFIN_PJ_IRQ(14) /* N/A */
312#define IRQ_PJ15 BFIN_PJ_IRQ(15) /* N/A */ 283#define IRQ_PJ15 BFIN_PJ_IRQ(15) /* N/A */
313 284
314#define GPIO_IRQ_BASE IRQ_PA0 285#define GPIO_IRQ_BASE IRQ_PA0
315 286
316#define NR_MACH_IRQS (IRQ_PJ15 + 1) 287#define NR_MACH_IRQS (IRQ_PJ15 + 1)
317#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
318 288
319/* For compatibility reasons with existing code */ 289/* For compatibility reasons with existing code */
320 290
321#define IRQ_DMAC0_ERR IRQ_DMAC0_ERROR 291#define IRQ_DMAC0_ERR IRQ_DMAC0_ERROR
322#define IRQ_EPPI0_ERR IRQ_EPPI0_ERROR 292#define IRQ_EPPI0_ERR IRQ_EPPI0_ERROR
323#define IRQ_SPORT0_ERR IRQ_SPORT0_ERROR 293#define IRQ_SPORT0_ERR IRQ_SPORT0_ERROR
324#define IRQ_SPORT1_ERR IRQ_SPORT1_ERROR 294#define IRQ_SPORT1_ERR IRQ_SPORT1_ERROR
325#define IRQ_SPI0_ERR IRQ_SPI0_ERROR 295#define IRQ_SPI0_ERR IRQ_SPI0_ERROR
326#define IRQ_UART0_ERR IRQ_UART0_ERROR 296#define IRQ_UART0_ERR IRQ_UART0_ERROR
327#define IRQ_DMAC1_ERR IRQ_DMAC1_ERROR 297#define IRQ_DMAC1_ERR IRQ_DMAC1_ERROR
328#define IRQ_SPORT2_ERR IRQ_SPORT2_ERROR 298#define IRQ_SPORT2_ERR IRQ_SPORT2_ERROR
329#define IRQ_SPORT3_ERR IRQ_SPORT3_ERROR 299#define IRQ_SPORT3_ERR IRQ_SPORT3_ERROR
330#define IRQ_SPI1_ERR IRQ_SPI1_ERROR 300#define IRQ_SPI1_ERR IRQ_SPI1_ERROR
331#define IRQ_SPI2_ERR IRQ_SPI2_ERROR 301#define IRQ_SPI2_ERR IRQ_SPI2_ERROR
332#define IRQ_UART1_ERR IRQ_UART1_ERROR 302#define IRQ_UART1_ERR IRQ_UART1_ERROR
333#define IRQ_UART2_ERR IRQ_UART2_ERROR 303#define IRQ_UART2_ERR IRQ_UART2_ERROR
334#define IRQ_CAN0_ERR IRQ_CAN0_ERROR 304#define IRQ_CAN0_ERR IRQ_CAN0_ERROR
335#define IRQ_MXVR_ERR IRQ_MXVR_ERROR 305#define IRQ_MXVR_ERR IRQ_MXVR_ERROR
336#define IRQ_EPPI1_ERR IRQ_EPPI1_ERROR 306#define IRQ_EPPI1_ERR IRQ_EPPI1_ERROR
337#define IRQ_EPPI2_ERR IRQ_EPPI2_ERROR 307#define IRQ_EPPI2_ERR IRQ_EPPI2_ERROR
338#define IRQ_UART3_ERR IRQ_UART3_ERROR 308#define IRQ_UART3_ERR IRQ_UART3_ERROR
339#define IRQ_HOST_ERR IRQ_HOST_ERROR 309#define IRQ_HOST_ERR IRQ_HOST_ERROR
340#define IRQ_PIXC_ERR IRQ_PIXC_ERROR 310#define IRQ_PIXC_ERR IRQ_PIXC_ERROR
341#define IRQ_NFC_ERR IRQ_NFC_ERROR 311#define IRQ_NFC_ERR IRQ_NFC_ERROR
342#define IRQ_ATAPI_ERR IRQ_ATAPI_ERROR 312#define IRQ_ATAPI_ERR IRQ_ATAPI_ERROR
343#define IRQ_CAN1_ERR IRQ_CAN1_ERROR 313#define IRQ_CAN1_ERR IRQ_CAN1_ERROR
344#define IRQ_HS_DMA_ERR IRQ_HS_DMA_ERROR 314#define IRQ_HS_DMA_ERR IRQ_HS_DMA_ERROR
345 315
346
347#define IVG7 7
348#define IVG8 8
349#define IVG9 9
350#define IVG10 10
351#define IVG11 11
352#define IVG12 12
353#define IVG13 13
354#define IVG14 14
355#define IVG15 15
356
357/* IAR0 BIT FIELDS */ 316/* IAR0 BIT FIELDS */
358#define IRQ_PLL_WAKEUP_POS 0 317#define IRQ_PLL_WAKEUP_POS 0
359#define IRQ_DMAC0_ERR_POS 4 318#define IRQ_DMAC0_ERR_POS 4
@@ -492,4 +451,4 @@ struct bfin_pint_regs {
492 451
493#endif 452#endif
494 453
495#endif /* _BF548_IRQ_H_ */ 454#endif
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index f667e7704197..5067984a62e7 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -247,7 +247,15 @@ static struct mtd_partition ezkit_partitions[] = {
247 .offset = MTDPART_OFS_APPEND, 247 .offset = MTDPART_OFS_APPEND,
248 }, { 248 }, {
249 .name = "file system(nor)", 249 .name = "file system(nor)",
250 .size = MTDPART_SIZ_FULL, 250 .size = 0x800000 - 0x40000 - 0x1C0000 - 0x2000 * 8,
251 .offset = MTDPART_OFS_APPEND,
252 }, {
253 .name = "config(nor)",
254 .size = 0x2000 * 7,
255 .offset = MTDPART_OFS_APPEND,
256 }, {
257 .name = "u-boot env(nor)",
258 .size = 0x2000,
251 .offset = MTDPART_OFS_APPEND, 259 .offset = MTDPART_OFS_APPEND,
252 } 260 }
253}; 261};
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 6a3499b02097..22b5ab773027 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -5,13 +5,13 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision Q, 11/07/2008; ADSP-BF561 Blackfin Processor Anomaly List 14 * - Revision R, 05/25/2010; ADSP-BF561 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -290,12 +290,18 @@
290#define ANOMALY_05000428 (__SILICON_REVISION__ > 3) 290#define ANOMALY_05000428 (__SILICON_REVISION__ > 3)
291/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 291/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
292#define ANOMALY_05000443 (1) 292#define ANOMALY_05000443 (1)
293/* SCKELOW Feature Is Not Functional */
294#define ANOMALY_05000458 (1)
293/* False Hardware Error when RETI Points to Invalid Memory */ 295/* False Hardware Error when RETI Points to Invalid Memory */
294#define ANOMALY_05000461 (1) 296#define ANOMALY_05000461 (1)
297/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
298#define ANOMALY_05000462 (1)
299/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
300#define ANOMALY_05000471 (1)
295/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 301/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
296#define ANOMALY_05000473 (1) 302#define ANOMALY_05000473 (1)
297/* Possible Lockup Condition whem Modifying PLL from External Memory */ 303/* Possible Lockup Condition whem Modifying PLL from External Memory */
298#define ANOMALY_05000475 (__SILICON_REVISION__ < 4) 304#define ANOMALY_05000475 (1)
299/* TESTSET Instruction Cannot Be Interrupted */ 305/* TESTSET Instruction Cannot Be Interrupted */
300#define ANOMALY_05000477 (1) 306#define ANOMALY_05000477 (1)
301/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 307/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
@@ -314,12 +320,14 @@
314#define ANOMALY_05000353 (1) 320#define ANOMALY_05000353 (1)
315#define ANOMALY_05000364 (0) 321#define ANOMALY_05000364 (0)
316#define ANOMALY_05000380 (0) 322#define ANOMALY_05000380 (0)
323#define ANOMALY_05000383 (0)
317#define ANOMALY_05000386 (1) 324#define ANOMALY_05000386 (1)
318#define ANOMALY_05000389 (0) 325#define ANOMALY_05000389 (0)
319#define ANOMALY_05000400 (0) 326#define ANOMALY_05000400 (0)
320#define ANOMALY_05000430 (0) 327#define ANOMALY_05000430 (0)
321#define ANOMALY_05000432 (0) 328#define ANOMALY_05000432 (0)
322#define ANOMALY_05000435 (0) 329#define ANOMALY_05000435 (0)
330#define ANOMALY_05000440 (0)
323#define ANOMALY_05000447 (0) 331#define ANOMALY_05000447 (0)
324#define ANOMALY_05000448 (0) 332#define ANOMALY_05000448 (0)
325#define ANOMALY_05000456 (0) 333#define ANOMALY_05000456 (0)
@@ -327,6 +335,7 @@
327#define ANOMALY_05000465 (0) 335#define ANOMALY_05000465 (0)
328#define ANOMALY_05000467 (0) 336#define ANOMALY_05000467 (0)
329#define ANOMALY_05000474 (0) 337#define ANOMALY_05000474 (0)
338#define ANOMALY_05000480 (0)
330#define ANOMALY_05000485 (0) 339#define ANOMALY_05000485 (0)
331 340
332#endif 341#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/irq.h b/arch/blackfin/mach-bf561/include/mach/irq.h
index c95566ade51b..d6998520f70f 100644
--- a/arch/blackfin/mach-bf561/include/mach/irq.h
+++ b/arch/blackfin/mach-bf561/include/mach/irq.h
@@ -7,212 +7,98 @@
7#ifndef _BF561_IRQ_H_ 7#ifndef _BF561_IRQ_H_
8#define _BF561_IRQ_H_ 8#define _BF561_IRQ_H_
9 9
10/*********************************************************************** 10#include <mach-common/irq.h>
11 * Interrupt source definitions: 11
12 Event Source Core Event Name IRQ No 12#define NR_PERI_INTS (2 * 32)
13 (highest priority) 13
14 Emulation Events EMU 0 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
15 Reset RST 1 15#define IRQ_DMA1_ERROR BFIN_IRQ(1) /* DMA1 Error (general) */
16 NMI NMI 2 16#define IRQ_DMA_ERROR IRQ_DMA1_ERROR /* DMA1 Error (general) */
17 Exception EVX 3 17#define IRQ_DMA2_ERROR BFIN_IRQ(2) /* DMA2 Error (general) */
18 Reserved -- 4 18#define IRQ_IMDMA_ERROR BFIN_IRQ(3) /* IMDMA Error Interrupt */
19 Hardware Error IVHW 5 19#define IRQ_PPI1_ERROR BFIN_IRQ(4) /* PPI1 Error Interrupt */
20 Core Timer IVTMR 6 * 20#define IRQ_PPI_ERROR IRQ_PPI1_ERROR /* PPI1 Error Interrupt */
21 21#define IRQ_PPI2_ERROR BFIN_IRQ(5) /* PPI2 Error Interrupt */
22 PLL Wakeup Interrupt IVG7 7 22#define IRQ_SPORT0_ERROR BFIN_IRQ(6) /* SPORT0 Error Interrupt */
23 DMA1 Error (generic) IVG7 8 23#define IRQ_SPORT1_ERROR BFIN_IRQ(7) /* SPORT1 Error Interrupt */
24 DMA2 Error (generic) IVG7 9 24#define IRQ_SPI_ERROR BFIN_IRQ(8) /* SPI Error Interrupt */
25 IMDMA Error (generic) IVG7 10 25#define IRQ_UART_ERROR BFIN_IRQ(9) /* UART Error Interrupt */
26 PPI1 Error Interrupt IVG7 11 26#define IRQ_RESERVED_ERROR BFIN_IRQ(10) /* Reversed */
27 PPI2 Error Interrupt IVG7 12 27#define IRQ_DMA1_0 BFIN_IRQ(11) /* DMA1 0 Interrupt(PPI1) */
28 SPORT0 Error Interrupt IVG7 13 28#define IRQ_PPI IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */
29 SPORT1 Error Interrupt IVG7 14 29#define IRQ_PPI0 IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */
30 SPI Error Interrupt IVG7 15 30#define IRQ_DMA1_1 BFIN_IRQ(12) /* DMA1 1 Interrupt(PPI2) */
31 UART Error Interrupt IVG7 16 31#define IRQ_PPI1 IRQ_DMA1_1 /* DMA1 1 Interrupt(PPI2) */
32 Reserved Interrupt IVG7 17 32#define IRQ_DMA1_2 BFIN_IRQ(13) /* DMA1 2 Interrupt */
33 33#define IRQ_DMA1_3 BFIN_IRQ(14) /* DMA1 3 Interrupt */
34 DMA1 0 Interrupt(PPI1) IVG8 18 34#define IRQ_DMA1_4 BFIN_IRQ(15) /* DMA1 4 Interrupt */
35 DMA1 1 Interrupt(PPI2) IVG8 19 35#define IRQ_DMA1_5 BFIN_IRQ(16) /* DMA1 5 Interrupt */
36 DMA1 2 Interrupt IVG8 20 36#define IRQ_DMA1_6 BFIN_IRQ(17) /* DMA1 6 Interrupt */
37 DMA1 3 Interrupt IVG8 21 37#define IRQ_DMA1_7 BFIN_IRQ(18) /* DMA1 7 Interrupt */
38 DMA1 4 Interrupt IVG8 22 38#define IRQ_DMA1_8 BFIN_IRQ(19) /* DMA1 8 Interrupt */
39 DMA1 5 Interrupt IVG8 23 39#define IRQ_DMA1_9 BFIN_IRQ(20) /* DMA1 9 Interrupt */
40 DMA1 6 Interrupt IVG8 24 40#define IRQ_DMA1_10 BFIN_IRQ(21) /* DMA1 10 Interrupt */
41 DMA1 7 Interrupt IVG8 25 41#define IRQ_DMA1_11 BFIN_IRQ(22) /* DMA1 11 Interrupt */
42 DMA1 8 Interrupt IVG8 26 42#define IRQ_DMA2_0 BFIN_IRQ(23) /* DMA2 0 (SPORT0 RX) */
43 DMA1 9 Interrupt IVG8 27 43#define IRQ_SPORT0_RX IRQ_DMA2_0 /* DMA2 0 (SPORT0 RX) */
44 DMA1 10 Interrupt IVG8 28 44#define IRQ_DMA2_1 BFIN_IRQ(24) /* DMA2 1 (SPORT0 TX) */
45 DMA1 11 Interrupt IVG8 29 45#define IRQ_SPORT0_TX IRQ_DMA2_1 /* DMA2 1 (SPORT0 TX) */
46 46#define IRQ_DMA2_2 BFIN_IRQ(25) /* DMA2 2 (SPORT1 RX) */
47 DMA2 0 (SPORT0 RX) IVG9 30 47#define IRQ_SPORT1_RX IRQ_DMA2_2 /* DMA2 2 (SPORT1 RX) */
48 DMA2 1 (SPORT0 TX) IVG9 31 48#define IRQ_DMA2_3 BFIN_IRQ(26) /* DMA2 3 (SPORT2 TX) */
49 DMA2 2 (SPORT1 RX) IVG9 32 49#define IRQ_SPORT1_TX IRQ_DMA2_3 /* DMA2 3 (SPORT2 TX) */
50 DMA2 3 (SPORT2 TX) IVG9 33 50#define IRQ_DMA2_4 BFIN_IRQ(27) /* DMA2 4 (SPI) */
51 DMA2 4 (SPI) IVG9 34 51#define IRQ_SPI IRQ_DMA2_4 /* DMA2 4 (SPI) */
52 DMA2 5 (UART RX) IVG9 35 52#define IRQ_DMA2_5 BFIN_IRQ(28) /* DMA2 5 (UART RX) */
53 DMA2 6 (UART TX) IVG9 36 53#define IRQ_UART_RX IRQ_DMA2_5 /* DMA2 5 (UART RX) */
54 DMA2 7 Interrupt IVG9 37 54#define IRQ_DMA2_6 BFIN_IRQ(29) /* DMA2 6 (UART TX) */
55 DMA2 8 Interrupt IVG9 38 55#define IRQ_UART_TX IRQ_DMA2_6 /* DMA2 6 (UART TX) */
56 DMA2 9 Interrupt IVG9 39 56#define IRQ_DMA2_7 BFIN_IRQ(30) /* DMA2 7 Interrupt */
57 DMA2 10 Interrupt IVG9 40 57#define IRQ_DMA2_8 BFIN_IRQ(31) /* DMA2 8 Interrupt */
58 DMA2 11 Interrupt IVG9 41 58#define IRQ_DMA2_9 BFIN_IRQ(32) /* DMA2 9 Interrupt */
59 59#define IRQ_DMA2_10 BFIN_IRQ(33) /* DMA2 10 Interrupt */
60 TIMER 0 Interrupt IVG10 42 60#define IRQ_DMA2_11 BFIN_IRQ(34) /* DMA2 11 Interrupt */
61 TIMER 1 Interrupt IVG10 43 61#define IRQ_TIMER0 BFIN_IRQ(35) /* TIMER 0 Interrupt */
62 TIMER 2 Interrupt IVG10 44 62#define IRQ_TIMER1 BFIN_IRQ(36) /* TIMER 1 Interrupt */
63 TIMER 3 Interrupt IVG10 45 63#define IRQ_TIMER2 BFIN_IRQ(37) /* TIMER 2 Interrupt */
64 TIMER 4 Interrupt IVG10 46 64#define IRQ_TIMER3 BFIN_IRQ(38) /* TIMER 3 Interrupt */
65 TIMER 5 Interrupt IVG10 47 65#define IRQ_TIMER4 BFIN_IRQ(39) /* TIMER 4 Interrupt */
66 TIMER 6 Interrupt IVG10 48 66#define IRQ_TIMER5 BFIN_IRQ(40) /* TIMER 5 Interrupt */
67 TIMER 7 Interrupt IVG10 49 67#define IRQ_TIMER6 BFIN_IRQ(41) /* TIMER 6 Interrupt */
68 TIMER 8 Interrupt IVG10 50 68#define IRQ_TIMER7 BFIN_IRQ(42) /* TIMER 7 Interrupt */
69 TIMER 9 Interrupt IVG10 51 69#define IRQ_TIMER8 BFIN_IRQ(43) /* TIMER 8 Interrupt */
70 TIMER 10 Interrupt IVG10 52 70#define IRQ_TIMER9 BFIN_IRQ(44) /* TIMER 9 Interrupt */
71 TIMER 11 Interrupt IVG10 53 71#define IRQ_TIMER10 BFIN_IRQ(45) /* TIMER 10 Interrupt */
72 72#define IRQ_TIMER11 BFIN_IRQ(46) /* TIMER 11 Interrupt */
73 Programmable Flags0 A (8) IVG11 54 73#define IRQ_PROG0_INTA BFIN_IRQ(47) /* Programmable Flags0 A (8) */
74 Programmable Flags0 B (8) IVG11 55 74#define IRQ_PROG_INTA IRQ_PROG0_INTA /* Programmable Flags0 A (8) */
75 Programmable Flags1 A (8) IVG11 56 75#define IRQ_PROG0_INTB BFIN_IRQ(48) /* Programmable Flags0 B (8) */
76 Programmable Flags1 B (8) IVG11 57 76#define IRQ_PROG_INTB IRQ_PROG0_INTB /* Programmable Flags0 B (8) */
77 Programmable Flags2 A (8) IVG11 58 77#define IRQ_PROG1_INTA BFIN_IRQ(49) /* Programmable Flags1 A (8) */
78 Programmable Flags2 B (8) IVG11 59 78#define IRQ_PROG1_INTB BFIN_IRQ(50) /* Programmable Flags1 B (8) */
79 79#define IRQ_PROG2_INTA BFIN_IRQ(51) /* Programmable Flags2 A (8) */
80 MDMA1 0 write/read INT IVG8 60 80#define IRQ_PROG2_INTB BFIN_IRQ(52) /* Programmable Flags2 B (8) */
81 MDMA1 1 write/read INT IVG8 61 81#define IRQ_DMA1_WRRD0 BFIN_IRQ(53) /* MDMA1 0 write/read INT */
82 82#define IRQ_DMA_WRRD0 IRQ_DMA1_WRRD0 /* MDMA1 0 write/read INT */
83 MDMA2 0 write/read INT IVG9 62
84 MDMA2 1 write/read INT IVG9 63
85
86 IMDMA 0 write/read INT IVG12 64
87 IMDMA 1 write/read INT IVG12 65
88
89 Watch Dog Timer IVG13 66
90
91 Reserved interrupt IVG7 67
92 Reserved interrupt IVG7 68
93 Supplemental interrupt 0 IVG7 69
94 supplemental interrupt 1 IVG7 70
95
96 Softirq IVG14
97 System Call --
98 (lowest priority) IVG15
99
100 **********************************************************************/
101
102#define SYS_IRQS 71
103#define NR_PERI_INTS 64
104
105/*
106 * The ABSTRACT IRQ definitions
107 * the first seven of the following are fixed,
108 * the rest you change if you need to.
109 */
110/* IVG 0-6*/
111#define IRQ_EMU 0 /* Emulation */
112#define IRQ_RST 1 /* Reset */
113#define IRQ_NMI 2 /* Non Maskable Interrupt */
114#define IRQ_EVX 3 /* Exception */
115#define IRQ_UNUSED 4 /* Reserved interrupt */
116#define IRQ_HWERR 5 /* Hardware Error */
117#define IRQ_CORETMR 6 /* Core timer */
118
119#define IVG_BASE 7
120/* IVG 7 */
121#define IRQ_PLL_WAKEUP (IVG_BASE + 0) /* PLL Wakeup Interrupt */
122#define IRQ_DMA1_ERROR (IVG_BASE + 1) /* DMA1 Error (general) */
123#define IRQ_DMA_ERROR IRQ_DMA1_ERROR /* DMA1 Error (general) */
124#define IRQ_DMA2_ERROR (IVG_BASE + 2) /* DMA2 Error (general) */
125#define IRQ_IMDMA_ERROR (IVG_BASE + 3) /* IMDMA Error Interrupt */
126#define IRQ_PPI1_ERROR (IVG_BASE + 4) /* PPI1 Error Interrupt */
127#define IRQ_PPI_ERROR IRQ_PPI1_ERROR /* PPI1 Error Interrupt */
128#define IRQ_PPI2_ERROR (IVG_BASE + 5) /* PPI2 Error Interrupt */
129#define IRQ_SPORT0_ERROR (IVG_BASE + 6) /* SPORT0 Error Interrupt */
130#define IRQ_SPORT1_ERROR (IVG_BASE + 7) /* SPORT1 Error Interrupt */
131#define IRQ_SPI_ERROR (IVG_BASE + 8) /* SPI Error Interrupt */
132#define IRQ_UART_ERROR (IVG_BASE + 9) /* UART Error Interrupt */
133#define IRQ_RESERVED_ERROR (IVG_BASE + 10) /* Reversed Interrupt */
134/* IVG 8 */
135#define IRQ_DMA1_0 (IVG_BASE + 11) /* DMA1 0 Interrupt(PPI1) */
136#define IRQ_PPI IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */
137#define IRQ_PPI0 IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */
138#define IRQ_DMA1_1 (IVG_BASE + 12) /* DMA1 1 Interrupt(PPI2) */
139#define IRQ_PPI1 IRQ_DMA1_1 /* DMA1 1 Interrupt(PPI2) */
140#define IRQ_DMA1_2 (IVG_BASE + 13) /* DMA1 2 Interrupt */
141#define IRQ_DMA1_3 (IVG_BASE + 14) /* DMA1 3 Interrupt */
142#define IRQ_DMA1_4 (IVG_BASE + 15) /* DMA1 4 Interrupt */
143#define IRQ_DMA1_5 (IVG_BASE + 16) /* DMA1 5 Interrupt */
144#define IRQ_DMA1_6 (IVG_BASE + 17) /* DMA1 6 Interrupt */
145#define IRQ_DMA1_7 (IVG_BASE + 18) /* DMA1 7 Interrupt */
146#define IRQ_DMA1_8 (IVG_BASE + 19) /* DMA1 8 Interrupt */
147#define IRQ_DMA1_9 (IVG_BASE + 20) /* DMA1 9 Interrupt */
148#define IRQ_DMA1_10 (IVG_BASE + 21) /* DMA1 10 Interrupt */
149#define IRQ_DMA1_11 (IVG_BASE + 22) /* DMA1 11 Interrupt */
150/* IVG 9 */
151#define IRQ_DMA2_0 (IVG_BASE + 23) /* DMA2 0 (SPORT0 RX) */
152#define IRQ_SPORT0_RX IRQ_DMA2_0 /* DMA2 0 (SPORT0 RX) */
153#define IRQ_DMA2_1 (IVG_BASE + 24) /* DMA2 1 (SPORT0 TX) */
154#define IRQ_SPORT0_TX IRQ_DMA2_1 /* DMA2 1 (SPORT0 TX) */
155#define IRQ_DMA2_2 (IVG_BASE + 25) /* DMA2 2 (SPORT1 RX) */
156#define IRQ_SPORT1_RX IRQ_DMA2_2 /* DMA2 2 (SPORT1 RX) */
157#define IRQ_DMA2_3 (IVG_BASE + 26) /* DMA2 3 (SPORT2 TX) */
158#define IRQ_SPORT1_TX IRQ_DMA2_3 /* DMA2 3 (SPORT2 TX) */
159#define IRQ_DMA2_4 (IVG_BASE + 27) /* DMA2 4 (SPI) */
160#define IRQ_SPI IRQ_DMA2_4 /* DMA2 4 (SPI) */
161#define IRQ_DMA2_5 (IVG_BASE + 28) /* DMA2 5 (UART RX) */
162#define IRQ_UART_RX IRQ_DMA2_5 /* DMA2 5 (UART RX) */
163#define IRQ_DMA2_6 (IVG_BASE + 29) /* DMA2 6 (UART TX) */
164#define IRQ_UART_TX IRQ_DMA2_6 /* DMA2 6 (UART TX) */
165#define IRQ_DMA2_7 (IVG_BASE + 30) /* DMA2 7 Interrupt */
166#define IRQ_DMA2_8 (IVG_BASE + 31) /* DMA2 8 Interrupt */
167#define IRQ_DMA2_9 (IVG_BASE + 32) /* DMA2 9 Interrupt */
168#define IRQ_DMA2_10 (IVG_BASE + 33) /* DMA2 10 Interrupt */
169#define IRQ_DMA2_11 (IVG_BASE + 34) /* DMA2 11 Interrupt */
170/* IVG 10 */
171#define IRQ_TIMER0 (IVG_BASE + 35) /* TIMER 0 Interrupt */
172#define IRQ_TIMER1 (IVG_BASE + 36) /* TIMER 1 Interrupt */
173#define IRQ_TIMER2 (IVG_BASE + 37) /* TIMER 2 Interrupt */
174#define IRQ_TIMER3 (IVG_BASE + 38) /* TIMER 3 Interrupt */
175#define IRQ_TIMER4 (IVG_BASE + 39) /* TIMER 4 Interrupt */
176#define IRQ_TIMER5 (IVG_BASE + 40) /* TIMER 5 Interrupt */
177#define IRQ_TIMER6 (IVG_BASE + 41) /* TIMER 6 Interrupt */
178#define IRQ_TIMER7 (IVG_BASE + 42) /* TIMER 7 Interrupt */
179#define IRQ_TIMER8 (IVG_BASE + 43) /* TIMER 8 Interrupt */
180#define IRQ_TIMER9 (IVG_BASE + 44) /* TIMER 9 Interrupt */
181#define IRQ_TIMER10 (IVG_BASE + 45) /* TIMER 10 Interrupt */
182#define IRQ_TIMER11 (IVG_BASE + 46) /* TIMER 11 Interrupt */
183/* IVG 11 */
184#define IRQ_PROG0_INTA (IVG_BASE + 47) /* Programmable Flags0 A (8) */
185#define IRQ_PROG_INTA IRQ_PROG0_INTA /* Programmable Flags0 A (8) */
186#define IRQ_PROG0_INTB (IVG_BASE + 48) /* Programmable Flags0 B (8) */
187#define IRQ_PROG_INTB IRQ_PROG0_INTB /* Programmable Flags0 B (8) */
188#define IRQ_PROG1_INTA (IVG_BASE + 49) /* Programmable Flags1 A (8) */
189#define IRQ_PROG1_INTB (IVG_BASE + 50) /* Programmable Flags1 B (8) */
190#define IRQ_PROG2_INTA (IVG_BASE + 51) /* Programmable Flags2 A (8) */
191#define IRQ_PROG2_INTB (IVG_BASE + 52) /* Programmable Flags2 B (8) */
192/* IVG 8 */
193#define IRQ_DMA1_WRRD0 (IVG_BASE + 53) /* MDMA1 0 write/read INT */
194#define IRQ_DMA_WRRD0 IRQ_DMA1_WRRD0 /* MDMA1 0 write/read INT */
195#define IRQ_MEM_DMA0 IRQ_DMA1_WRRD0 83#define IRQ_MEM_DMA0 IRQ_DMA1_WRRD0
196#define IRQ_DMA1_WRRD1 (IVG_BASE + 54) /* MDMA1 1 write/read INT */ 84#define IRQ_DMA1_WRRD1 BFIN_IRQ(54) /* MDMA1 1 write/read INT */
197#define IRQ_DMA_WRRD1 IRQ_DMA1_WRRD1 /* MDMA1 1 write/read INT */ 85#define IRQ_DMA_WRRD1 IRQ_DMA1_WRRD1 /* MDMA1 1 write/read INT */
198#define IRQ_MEM_DMA1 IRQ_DMA1_WRRD1 86#define IRQ_MEM_DMA1 IRQ_DMA1_WRRD1
199/* IVG 9 */ 87#define IRQ_DMA2_WRRD0 BFIN_IRQ(55) /* MDMA2 0 write/read INT */
200#define IRQ_DMA2_WRRD0 (IVG_BASE + 55) /* MDMA2 0 write/read INT */
201#define IRQ_MEM_DMA2 IRQ_DMA2_WRRD0 88#define IRQ_MEM_DMA2 IRQ_DMA2_WRRD0
202#define IRQ_DMA2_WRRD1 (IVG_BASE + 56) /* MDMA2 1 write/read INT */ 89#define IRQ_DMA2_WRRD1 BFIN_IRQ(56) /* MDMA2 1 write/read INT */
203#define IRQ_MEM_DMA3 IRQ_DMA2_WRRD1 90#define IRQ_MEM_DMA3 IRQ_DMA2_WRRD1
204/* IVG 12 */ 91#define IRQ_IMDMA_WRRD0 BFIN_IRQ(57) /* IMDMA 0 write/read INT */
205#define IRQ_IMDMA_WRRD0 (IVG_BASE + 57) /* IMDMA 0 write/read INT */
206#define IRQ_IMEM_DMA0 IRQ_IMDMA_WRRD0 92#define IRQ_IMEM_DMA0 IRQ_IMDMA_WRRD0
207#define IRQ_IMDMA_WRRD1 (IVG_BASE + 58) /* IMDMA 1 write/read INT */ 93#define IRQ_IMDMA_WRRD1 BFIN_IRQ(58) /* IMDMA 1 write/read INT */
208#define IRQ_IMEM_DMA1 IRQ_IMDMA_WRRD1 94#define IRQ_IMEM_DMA1 IRQ_IMDMA_WRRD1
209/* IVG 13 */ 95#define IRQ_WATCH BFIN_IRQ(59) /* Watch Dog Timer */
210#define IRQ_WATCH (IVG_BASE + 59) /* Watch Dog Timer */ 96#define IRQ_RESERVED_1 BFIN_IRQ(60) /* Reserved interrupt */
211/* IVG 7 */ 97#define IRQ_RESERVED_2 BFIN_IRQ(61) /* Reserved interrupt */
212#define IRQ_RESERVED_1 (IVG_BASE + 60) /* Reserved interrupt */ 98#define IRQ_SUPPLE_0 BFIN_IRQ(62) /* Supplemental interrupt 0 */
213#define IRQ_RESERVED_2 (IVG_BASE + 61) /* Reserved interrupt */ 99#define IRQ_SUPPLE_1 BFIN_IRQ(63) /* supplemental interrupt 1 */
214#define IRQ_SUPPLE_0 (IVG_BASE + 62) /* Supplemental interrupt 0 */ 100
215#define IRQ_SUPPLE_1 (IVG_BASE + 63) /* supplemental interrupt 1 */ 101#define SYS_IRQS 71
216 102
217#define IRQ_PF0 73 103#define IRQ_PF0 73
218#define IRQ_PF1 74 104#define IRQ_PF1 74
@@ -266,158 +152,85 @@
266#define GPIO_IRQ_BASE IRQ_PF0 152#define GPIO_IRQ_BASE IRQ_PF0
267 153
268#define NR_MACH_IRQS (IRQ_PF47 + 1) 154#define NR_MACH_IRQS (IRQ_PF47 + 1)
269#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
270
271#define IVG7 7
272#define IVG8 8
273#define IVG9 9
274#define IVG10 10
275#define IVG11 11
276#define IVG12 12
277#define IVG13 13
278#define IVG14 14
279#define IVG15 15
280
281/*
282 * DEFAULT PRIORITIES:
283 */
284
285#define CONFIG_DEF_PLL_WAKEUP 7
286#define CONFIG_DEF_DMA1_ERROR 7
287#define CONFIG_DEF_DMA2_ERROR 7
288#define CONFIG_DEF_IMDMA_ERROR 7
289#define CONFIG_DEF_PPI1_ERROR 7
290#define CONFIG_DEF_PPI2_ERROR 7
291#define CONFIG_DEF_SPORT0_ERROR 7
292#define CONFIG_DEF_SPORT1_ERROR 7
293#define CONFIG_DEF_SPI_ERROR 7
294#define CONFIG_DEF_UART_ERROR 7
295#define CONFIG_DEF_RESERVED_ERROR 7
296#define CONFIG_DEF_DMA1_0 8
297#define CONFIG_DEF_DMA1_1 8
298#define CONFIG_DEF_DMA1_2 8
299#define CONFIG_DEF_DMA1_3 8
300#define CONFIG_DEF_DMA1_4 8
301#define CONFIG_DEF_DMA1_5 8
302#define CONFIG_DEF_DMA1_6 8
303#define CONFIG_DEF_DMA1_7 8
304#define CONFIG_DEF_DMA1_8 8
305#define CONFIG_DEF_DMA1_9 8
306#define CONFIG_DEF_DMA1_10 8
307#define CONFIG_DEF_DMA1_11 8
308#define CONFIG_DEF_DMA2_0 9
309#define CONFIG_DEF_DMA2_1 9
310#define CONFIG_DEF_DMA2_2 9
311#define CONFIG_DEF_DMA2_3 9
312#define CONFIG_DEF_DMA2_4 9
313#define CONFIG_DEF_DMA2_5 9
314#define CONFIG_DEF_DMA2_6 9
315#define CONFIG_DEF_DMA2_7 9
316#define CONFIG_DEF_DMA2_8 9
317#define CONFIG_DEF_DMA2_9 9
318#define CONFIG_DEF_DMA2_10 9
319#define CONFIG_DEF_DMA2_11 9
320#define CONFIG_DEF_TIMER0 10
321#define CONFIG_DEF_TIMER1 10
322#define CONFIG_DEF_TIMER2 10
323#define CONFIG_DEF_TIMER3 10
324#define CONFIG_DEF_TIMER4 10
325#define CONFIG_DEF_TIMER5 10
326#define CONFIG_DEF_TIMER6 10
327#define CONFIG_DEF_TIMER7 10
328#define CONFIG_DEF_TIMER8 10
329#define CONFIG_DEF_TIMER9 10
330#define CONFIG_DEF_TIMER10 10
331#define CONFIG_DEF_TIMER11 10
332#define CONFIG_DEF_PROG0_INTA 11
333#define CONFIG_DEF_PROG0_INTB 11
334#define CONFIG_DEF_PROG1_INTA 11
335#define CONFIG_DEF_PROG1_INTB 11
336#define CONFIG_DEF_PROG2_INTA 11
337#define CONFIG_DEF_PROG2_INTB 11
338#define CONFIG_DEF_DMA1_WRRD0 8
339#define CONFIG_DEF_DMA1_WRRD1 8
340#define CONFIG_DEF_DMA2_WRRD0 9
341#define CONFIG_DEF_DMA2_WRRD1 9
342#define CONFIG_DEF_IMDMA_WRRD0 12
343#define CONFIG_DEF_IMDMA_WRRD1 12
344#define CONFIG_DEF_WATCH 13
345#define CONFIG_DEF_RESERVED_1 7
346#define CONFIG_DEF_RESERVED_2 7
347#define CONFIG_DEF_SUPPLE_0 7
348#define CONFIG_DEF_SUPPLE_1 7
349 155
350/* IAR0 BIT FIELDS */ 156/* IAR0 BIT FIELDS */
351#define IRQ_PLL_WAKEUP_POS 0 157#define IRQ_PLL_WAKEUP_POS 0
352#define IRQ_DMA1_ERROR_POS 4 158#define IRQ_DMA1_ERROR_POS 4
353#define IRQ_DMA2_ERROR_POS 8 159#define IRQ_DMA2_ERROR_POS 8
354#define IRQ_IMDMA_ERROR_POS 12 160#define IRQ_IMDMA_ERROR_POS 12
355#define IRQ_PPI0_ERROR_POS 16 161#define IRQ_PPI0_ERROR_POS 16
356#define IRQ_PPI1_ERROR_POS 20 162#define IRQ_PPI1_ERROR_POS 20
357#define IRQ_SPORT0_ERROR_POS 24 163#define IRQ_SPORT0_ERROR_POS 24
358#define IRQ_SPORT1_ERROR_POS 28 164#define IRQ_SPORT1_ERROR_POS 28
165
359/* IAR1 BIT FIELDS */ 166/* IAR1 BIT FIELDS */
360#define IRQ_SPI_ERROR_POS 0 167#define IRQ_SPI_ERROR_POS 0
361#define IRQ_UART_ERROR_POS 4 168#define IRQ_UART_ERROR_POS 4
362#define IRQ_RESERVED_ERROR_POS 8 169#define IRQ_RESERVED_ERROR_POS 8
363#define IRQ_DMA1_0_POS 12 170#define IRQ_DMA1_0_POS 12
364#define IRQ_DMA1_1_POS 16 171#define IRQ_DMA1_1_POS 16
365#define IRQ_DMA1_2_POS 20 172#define IRQ_DMA1_2_POS 20
366#define IRQ_DMA1_3_POS 24 173#define IRQ_DMA1_3_POS 24
367#define IRQ_DMA1_4_POS 28 174#define IRQ_DMA1_4_POS 28
175
368/* IAR2 BIT FIELDS */ 176/* IAR2 BIT FIELDS */
369#define IRQ_DMA1_5_POS 0 177#define IRQ_DMA1_5_POS 0
370#define IRQ_DMA1_6_POS 4 178#define IRQ_DMA1_6_POS 4
371#define IRQ_DMA1_7_POS 8 179#define IRQ_DMA1_7_POS 8
372#define IRQ_DMA1_8_POS 12 180#define IRQ_DMA1_8_POS 12
373#define IRQ_DMA1_9_POS 16 181#define IRQ_DMA1_9_POS 16
374#define IRQ_DMA1_10_POS 20 182#define IRQ_DMA1_10_POS 20
375#define IRQ_DMA1_11_POS 24 183#define IRQ_DMA1_11_POS 24
376#define IRQ_DMA2_0_POS 28 184#define IRQ_DMA2_0_POS 28
185
377/* IAR3 BIT FIELDS */ 186/* IAR3 BIT FIELDS */
378#define IRQ_DMA2_1_POS 0 187#define IRQ_DMA2_1_POS 0
379#define IRQ_DMA2_2_POS 4 188#define IRQ_DMA2_2_POS 4
380#define IRQ_DMA2_3_POS 8 189#define IRQ_DMA2_3_POS 8
381#define IRQ_DMA2_4_POS 12 190#define IRQ_DMA2_4_POS 12
382#define IRQ_DMA2_5_POS 16 191#define IRQ_DMA2_5_POS 16
383#define IRQ_DMA2_6_POS 20 192#define IRQ_DMA2_6_POS 20
384#define IRQ_DMA2_7_POS 24 193#define IRQ_DMA2_7_POS 24
385#define IRQ_DMA2_8_POS 28 194#define IRQ_DMA2_8_POS 28
195
386/* IAR4 BIT FIELDS */ 196/* IAR4 BIT FIELDS */
387#define IRQ_DMA2_9_POS 0 197#define IRQ_DMA2_9_POS 0
388#define IRQ_DMA2_10_POS 4 198#define IRQ_DMA2_10_POS 4
389#define IRQ_DMA2_11_POS 8 199#define IRQ_DMA2_11_POS 8
390#define IRQ_TIMER0_POS 12 200#define IRQ_TIMER0_POS 12
391#define IRQ_TIMER1_POS 16 201#define IRQ_TIMER1_POS 16
392#define IRQ_TIMER2_POS 20 202#define IRQ_TIMER2_POS 20
393#define IRQ_TIMER3_POS 24 203#define IRQ_TIMER3_POS 24
394#define IRQ_TIMER4_POS 28 204#define IRQ_TIMER4_POS 28
205
395/* IAR5 BIT FIELDS */ 206/* IAR5 BIT FIELDS */
396#define IRQ_TIMER5_POS 0 207#define IRQ_TIMER5_POS 0
397#define IRQ_TIMER6_POS 4 208#define IRQ_TIMER6_POS 4
398#define IRQ_TIMER7_POS 8 209#define IRQ_TIMER7_POS 8
399#define IRQ_TIMER8_POS 12 210#define IRQ_TIMER8_POS 12
400#define IRQ_TIMER9_POS 16 211#define IRQ_TIMER9_POS 16
401#define IRQ_TIMER10_POS 20 212#define IRQ_TIMER10_POS 20
402#define IRQ_TIMER11_POS 24 213#define IRQ_TIMER11_POS 24
403#define IRQ_PROG0_INTA_POS 28 214#define IRQ_PROG0_INTA_POS 28
215
404/* IAR6 BIT FIELDS */ 216/* IAR6 BIT FIELDS */
405#define IRQ_PROG0_INTB_POS 0 217#define IRQ_PROG0_INTB_POS 0
406#define IRQ_PROG1_INTA_POS 4 218#define IRQ_PROG1_INTA_POS 4
407#define IRQ_PROG1_INTB_POS 8 219#define IRQ_PROG1_INTB_POS 8
408#define IRQ_PROG2_INTA_POS 12 220#define IRQ_PROG2_INTA_POS 12
409#define IRQ_PROG2_INTB_POS 16 221#define IRQ_PROG2_INTB_POS 16
410#define IRQ_DMA1_WRRD0_POS 20 222#define IRQ_DMA1_WRRD0_POS 20
411#define IRQ_DMA1_WRRD1_POS 24 223#define IRQ_DMA1_WRRD1_POS 24
412#define IRQ_DMA2_WRRD0_POS 28 224#define IRQ_DMA2_WRRD0_POS 28
413/* IAR7 BIT FIELDS */
414#define IRQ_DMA2_WRRD1_POS 0
415#define IRQ_IMDMA_WRRD0_POS 4
416#define IRQ_IMDMA_WRRD1_POS 8
417#define IRQ_WDTIMER_POS 12
418#define IRQ_RESERVED_1_POS 16
419#define IRQ_RESERVED_2_POS 20
420#define IRQ_SUPPLE_0_POS 24
421#define IRQ_SUPPLE_1_POS 28
422 225
423#endif /* _BF561_IRQ_H_ */ 226/* IAR7 BIT FIELDS */
227#define IRQ_DMA2_WRRD1_POS 0
228#define IRQ_IMDMA_WRRD0_POS 4
229#define IRQ_IMDMA_WRRD1_POS 8
230#define IRQ_WDTIMER_POS 12
231#define IRQ_RESERVED_1_POS 16
232#define IRQ_RESERVED_2_POS 20
233#define IRQ_SUPPLE_0_POS 24
234#define IRQ_SUPPLE_1_POS 28
235
236#endif
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index 7b07740cf68c..85abd8be1343 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -24,17 +24,23 @@ static DEFINE_SPINLOCK(boot_lock);
24 24
25void __init platform_init_cpus(void) 25void __init platform_init_cpus(void)
26{ 26{
27 cpu_set(0, cpu_possible_map); /* CoreA */ 27 struct cpumask mask;
28 cpu_set(1, cpu_possible_map); /* CoreB */ 28
29 cpumask_set_cpu(0, &mask); /* CoreA */
30 cpumask_set_cpu(1, &mask); /* CoreB */
31 init_cpu_possible(&mask);
29} 32}
30 33
31void __init platform_prepare_cpus(unsigned int max_cpus) 34void __init platform_prepare_cpus(unsigned int max_cpus)
32{ 35{
36 struct cpumask mask;
37
33 bfin_relocate_coreb_l1_mem(); 38 bfin_relocate_coreb_l1_mem();
34 39
35 /* Both cores ought to be present on a bf561! */ 40 /* Both cores ought to be present on a bf561! */
36 cpu_set(0, cpu_present_map); /* CoreA */ 41 cpumask_set_cpu(0, &mask); /* CoreA */
37 cpu_set(1, cpu_present_map); /* CoreB */ 42 cpumask_set_cpu(1, &mask); /* CoreB */
43 init_cpu_present(&mask);
38} 44}
39 45
40int __init setup_profiling_timer(unsigned int multiplier) /* not supported */ 46int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
@@ -62,9 +68,6 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
62 bfin_write_SICB_IWR1(IWR_DISABLE_ALL); 68 bfin_write_SICB_IWR1(IWR_DISABLE_ALL);
63 SSYNC(); 69 SSYNC();
64 70
65 /* Store CPU-private information to the cpu_data array. */
66 bfin_setup_cpudata(cpu);
67
68 /* We are done with local CPU inits, unblock the boot CPU. */ 71 /* We are done with local CPU inits, unblock the boot CPU. */
69 set_cpu_online(cpu, true); 72 set_cpu_online(cpu, true);
70 spin_lock(&boot_lock); 73 spin_lock(&boot_lock);
diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c
index 5e4112e518a9..f5685a496c58 100644
--- a/arch/blackfin/mach-common/dpmc.c
+++ b/arch/blackfin/mach-common/dpmc.c
@@ -85,10 +85,11 @@ static void bfin_wakeup_cpu(void)
85{ 85{
86 unsigned int cpu; 86 unsigned int cpu;
87 unsigned int this_cpu = smp_processor_id(); 87 unsigned int this_cpu = smp_processor_id();
88 cpumask_t mask = cpu_online_map; 88 cpumask_t mask;
89 89
90 cpu_clear(this_cpu, mask); 90 cpumask_copy(&mask, cpu_online_mask);
91 for_each_cpu_mask(cpu, mask) 91 cpumask_clear_cpu(this_cpu, &mask);
92 for_each_cpu(cpu, &mask)
92 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); 93 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
93} 94}
94 95
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 43d9fb195c1e..1177369f9922 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -19,32 +19,14 @@
19#ifdef CONFIG_IPIPE 19#ifdef CONFIG_IPIPE
20#include <linux/ipipe.h> 20#include <linux/ipipe.h>
21#endif 21#endif
22#ifdef CONFIG_KGDB
23#include <linux/kgdb.h>
24#endif
25#include <asm/traps.h> 22#include <asm/traps.h>
26#include <asm/blackfin.h> 23#include <asm/blackfin.h>
27#include <asm/gpio.h> 24#include <asm/gpio.h>
28#include <asm/irq_handler.h> 25#include <asm/irq_handler.h>
29#include <asm/dpmc.h> 26#include <asm/dpmc.h>
30#include <asm/bfin5xx_spi.h>
31#include <asm/bfin_sport.h>
32#include <asm/bfin_can.h>
33 27
34#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) 28#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
35 29
36#ifdef BF537_FAMILY
37# define BF537_GENERIC_ERROR_INT_DEMUX
38# define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */
39# define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */
40# define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */
41# define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */
42# define UART_ERR_MASK (0x6) /* UART_IIR */
43# define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */
44#else
45# undef BF537_GENERIC_ERROR_INT_DEMUX
46#endif
47
48/* 30/*
49 * NOTES: 31 * NOTES:
50 * - we have separated the physical Hardware interrupt from the 32 * - we have separated the physical Hardware interrupt from the
@@ -63,22 +45,19 @@ unsigned long bfin_irq_flags = 0x1f;
63EXPORT_SYMBOL(bfin_irq_flags); 45EXPORT_SYMBOL(bfin_irq_flags);
64#endif 46#endif
65 47
66/* The number of spurious interrupts */
67atomic_t num_spurious;
68
69#ifdef CONFIG_PM 48#ifdef CONFIG_PM
70unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */ 49unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
71unsigned vr_wakeup; 50unsigned vr_wakeup;
72#endif 51#endif
73 52
74struct ivgx { 53static struct ivgx {
75 /* irq number for request_irq, available in mach-bf5xx/irq.h */ 54 /* irq number for request_irq, available in mach-bf5xx/irq.h */
76 unsigned int irqno; 55 unsigned int irqno;
77 /* corresponding bit in the SIC_ISR register */ 56 /* corresponding bit in the SIC_ISR register */
78 unsigned int isrflag; 57 unsigned int isrflag;
79} ivg_table[NR_PERI_INTS]; 58} ivg_table[NR_PERI_INTS];
80 59
81struct ivg_slice { 60static struct ivg_slice {
82 /* position of first irq in ivg_table for given ivg */ 61 /* position of first irq in ivg_table for given ivg */
83 struct ivgx *ifirst; 62 struct ivgx *ifirst;
84 struct ivgx *istop; 63 struct ivgx *istop;
@@ -125,7 +104,7 @@ static void __init search_IAR(void)
125 * This is for core internal IRQs 104 * This is for core internal IRQs
126 */ 105 */
127 106
128static void bfin_ack_noop(struct irq_data *d) 107void bfin_ack_noop(struct irq_data *d)
129{ 108{
130 /* Dummy function. */ 109 /* Dummy function. */
131} 110}
@@ -154,26 +133,24 @@ static void bfin_core_unmask_irq(struct irq_data *d)
154 return; 133 return;
155} 134}
156 135
157static void bfin_internal_mask_irq(unsigned int irq) 136void bfin_internal_mask_irq(unsigned int irq)
158{ 137{
159 unsigned long flags; 138 unsigned long flags = hard_local_irq_save();
160 139
161#ifdef CONFIG_BF53x 140#ifdef SIC_IMASK0
162 flags = hard_local_irq_save(); 141 unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
163 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & 142 unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
164 ~(1 << SIC_SYSIRQ(irq)));
165#else
166 unsigned mask_bank, mask_bit;
167 flags = hard_local_irq_save();
168 mask_bank = SIC_SYSIRQ(irq) / 32;
169 mask_bit = SIC_SYSIRQ(irq) % 32;
170 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & 143 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
171 ~(1 << mask_bit)); 144 ~(1 << mask_bit));
172#ifdef CONFIG_SMP 145# ifdef CONFIG_SMP
173 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) & 146 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
174 ~(1 << mask_bit)); 147 ~(1 << mask_bit));
148# endif
149#else
150 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
151 ~(1 << SIC_SYSIRQ(irq)));
175#endif 152#endif
176#endif 153
177 hard_local_irq_restore(flags); 154 hard_local_irq_restore(flags);
178} 155}
179 156
@@ -186,33 +163,31 @@ static void bfin_internal_mask_irq_chip(struct irq_data *d)
186static void bfin_internal_unmask_irq_affinity(unsigned int irq, 163static void bfin_internal_unmask_irq_affinity(unsigned int irq,
187 const struct cpumask *affinity) 164 const struct cpumask *affinity)
188#else 165#else
189static void bfin_internal_unmask_irq(unsigned int irq) 166void bfin_internal_unmask_irq(unsigned int irq)
190#endif 167#endif
191{ 168{
192 unsigned long flags; 169 unsigned long flags = hard_local_irq_save();
193 170
194#ifdef CONFIG_BF53x 171#ifdef SIC_IMASK0
195 flags = hard_local_irq_save(); 172 unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
196 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 173 unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
197 (1 << SIC_SYSIRQ(irq))); 174# ifdef CONFIG_SMP
198#else
199 unsigned mask_bank, mask_bit;
200 flags = hard_local_irq_save();
201 mask_bank = SIC_SYSIRQ(irq) / 32;
202 mask_bit = SIC_SYSIRQ(irq) % 32;
203#ifdef CONFIG_SMP
204 if (cpumask_test_cpu(0, affinity)) 175 if (cpumask_test_cpu(0, affinity))
205#endif 176# endif
206 bfin_write_SIC_IMASK(mask_bank, 177 bfin_write_SIC_IMASK(mask_bank,
207 bfin_read_SIC_IMASK(mask_bank) | 178 bfin_read_SIC_IMASK(mask_bank) |
208 (1 << mask_bit)); 179 (1 << mask_bit));
209#ifdef CONFIG_SMP 180# ifdef CONFIG_SMP
210 if (cpumask_test_cpu(1, affinity)) 181 if (cpumask_test_cpu(1, affinity))
211 bfin_write_SICB_IMASK(mask_bank, 182 bfin_write_SICB_IMASK(mask_bank,
212 bfin_read_SICB_IMASK(mask_bank) | 183 bfin_read_SICB_IMASK(mask_bank) |
213 (1 << mask_bit)); 184 (1 << mask_bit));
185# endif
186#else
187 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
188 (1 << SIC_SYSIRQ(irq)));
214#endif 189#endif
215#endif 190
216 hard_local_irq_restore(flags); 191 hard_local_irq_restore(flags);
217} 192}
218 193
@@ -295,6 +270,8 @@ static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
295{ 270{
296 return bfin_internal_set_wake(d->irq, state); 271 return bfin_internal_set_wake(d->irq, state);
297} 272}
273#else
274# define bfin_internal_set_wake_chip NULL
298#endif 275#endif
299 276
300static struct irq_chip bfin_core_irqchip = { 277static struct irq_chip bfin_core_irqchip = {
@@ -315,12 +292,10 @@ static struct irq_chip bfin_internal_irqchip = {
315#ifdef CONFIG_SMP 292#ifdef CONFIG_SMP
316 .irq_set_affinity = bfin_internal_set_affinity, 293 .irq_set_affinity = bfin_internal_set_affinity,
317#endif 294#endif
318#ifdef CONFIG_PM
319 .irq_set_wake = bfin_internal_set_wake_chip, 295 .irq_set_wake = bfin_internal_set_wake_chip,
320#endif
321}; 296};
322 297
323static void bfin_handle_irq(unsigned irq) 298void bfin_handle_irq(unsigned irq)
324{ 299{
325#ifdef CONFIG_IPIPE 300#ifdef CONFIG_IPIPE
326 struct pt_regs regs; /* Contents not used. */ 301 struct pt_regs regs; /* Contents not used. */
@@ -332,102 +307,6 @@ static void bfin_handle_irq(unsigned irq)
332#endif /* !CONFIG_IPIPE */ 307#endif /* !CONFIG_IPIPE */
333} 308}
334 309
335#ifdef BF537_GENERIC_ERROR_INT_DEMUX
336static int error_int_mask;
337
338static void bfin_generic_error_mask_irq(struct irq_data *d)
339{
340 error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR));
341 if (!error_int_mask)
342 bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
343}
344
345static void bfin_generic_error_unmask_irq(struct irq_data *d)
346{
347 bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
348 error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR);
349}
350
351static struct irq_chip bfin_generic_error_irqchip = {
352 .name = "ERROR",
353 .irq_ack = bfin_ack_noop,
354 .irq_mask_ack = bfin_generic_error_mask_irq,
355 .irq_mask = bfin_generic_error_mask_irq,
356 .irq_unmask = bfin_generic_error_unmask_irq,
357};
358
359static void bfin_demux_error_irq(unsigned int int_err_irq,
360 struct irq_desc *inta_desc)
361{
362 int irq = 0;
363
364#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
365 if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
366 irq = IRQ_MAC_ERROR;
367 else
368#endif
369 if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
370 irq = IRQ_SPORT0_ERROR;
371 else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
372 irq = IRQ_SPORT1_ERROR;
373 else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
374 irq = IRQ_PPI_ERROR;
375 else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
376 irq = IRQ_CAN_ERROR;
377 else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
378 irq = IRQ_SPI_ERROR;
379 else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
380 irq = IRQ_UART0_ERROR;
381 else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
382 irq = IRQ_UART1_ERROR;
383
384 if (irq) {
385 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
386 bfin_handle_irq(irq);
387 else {
388
389 switch (irq) {
390 case IRQ_PPI_ERROR:
391 bfin_write_PPI_STATUS(PPI_ERR_MASK);
392 break;
393#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
394 case IRQ_MAC_ERROR:
395 bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
396 break;
397#endif
398 case IRQ_SPORT0_ERROR:
399 bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
400 break;
401
402 case IRQ_SPORT1_ERROR:
403 bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
404 break;
405
406 case IRQ_CAN_ERROR:
407 bfin_write_CAN_GIS(CAN_ERR_MASK);
408 break;
409
410 case IRQ_SPI_ERROR:
411 bfin_write_SPI_STAT(SPI_ERR_MASK);
412 break;
413
414 default:
415 break;
416 }
417
418 pr_debug("IRQ %d:"
419 " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
420 irq);
421 }
422 } else
423 printk(KERN_ERR
424 "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
425 " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
426 __func__, __FILE__, __LINE__);
427
428}
429#endif /* BF537_GENERIC_ERROR_INT_DEMUX */
430
431#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 310#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
432static int mac_stat_int_mask; 311static int mac_stat_int_mask;
433 312
@@ -468,7 +347,7 @@ static void bfin_mac_status_mask_irq(struct irq_data *d)
468 unsigned int irq = d->irq; 347 unsigned int irq = d->irq;
469 348
470 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT)); 349 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
471#ifdef BF537_GENERIC_ERROR_INT_DEMUX 350#ifdef BF537_FAMILY
472 switch (irq) { 351 switch (irq) {
473 case IRQ_MAC_PHYINT: 352 case IRQ_MAC_PHYINT:
474 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE); 353 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
@@ -487,7 +366,7 @@ static void bfin_mac_status_unmask_irq(struct irq_data *d)
487{ 366{
488 unsigned int irq = d->irq; 367 unsigned int irq = d->irq;
489 368
490#ifdef BF537_GENERIC_ERROR_INT_DEMUX 369#ifdef BF537_FAMILY
491 switch (irq) { 370 switch (irq) {
492 case IRQ_MAC_PHYINT: 371 case IRQ_MAC_PHYINT:
493 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE); 372 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
@@ -505,12 +384,14 @@ static void bfin_mac_status_unmask_irq(struct irq_data *d)
505#ifdef CONFIG_PM 384#ifdef CONFIG_PM
506int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state) 385int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
507{ 386{
508#ifdef BF537_GENERIC_ERROR_INT_DEMUX 387#ifdef BF537_FAMILY
509 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state); 388 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
510#else 389#else
511 return bfin_internal_set_wake(IRQ_MAC_ERROR, state); 390 return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
512#endif 391#endif
513} 392}
393#else
394# define bfin_mac_status_set_wake NULL
514#endif 395#endif
515 396
516static struct irq_chip bfin_mac_status_irqchip = { 397static struct irq_chip bfin_mac_status_irqchip = {
@@ -519,13 +400,11 @@ static struct irq_chip bfin_mac_status_irqchip = {
519 .irq_mask_ack = bfin_mac_status_mask_irq, 400 .irq_mask_ack = bfin_mac_status_mask_irq,
520 .irq_mask = bfin_mac_status_mask_irq, 401 .irq_mask = bfin_mac_status_mask_irq,
521 .irq_unmask = bfin_mac_status_unmask_irq, 402 .irq_unmask = bfin_mac_status_unmask_irq,
522#ifdef CONFIG_PM
523 .irq_set_wake = bfin_mac_status_set_wake, 403 .irq_set_wake = bfin_mac_status_set_wake,
524#endif
525}; 404};
526 405
527static void bfin_demux_mac_status_irq(unsigned int int_err_irq, 406void bfin_demux_mac_status_irq(unsigned int int_err_irq,
528 struct irq_desc *inta_desc) 407 struct irq_desc *inta_desc)
529{ 408{
530 int i, irq = 0; 409 int i, irq = 0;
531 u32 status = bfin_read_EMAC_SYSTAT(); 410 u32 status = bfin_read_EMAC_SYSTAT();
@@ -680,29 +559,48 @@ static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
680} 559}
681 560
682#ifdef CONFIG_PM 561#ifdef CONFIG_PM
683int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) 562static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
684{ 563{
685 return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state); 564 return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
686} 565}
566#else
567# define bfin_gpio_set_wake NULL
687#endif 568#endif
688 569
689static void bfin_demux_gpio_irq(unsigned int inta_irq, 570static void bfin_demux_gpio_block(unsigned int irq)
690 struct irq_desc *desc)
691{ 571{
692 unsigned int i, gpio, mask, irq, search = 0; 572 unsigned int gpio, mask;
573
574 gpio = irq_to_gpio(irq);
575 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
576
577 while (mask) {
578 if (mask & 1)
579 bfin_handle_irq(irq);
580 irq++;
581 mask >>= 1;
582 }
583}
584
585void bfin_demux_gpio_irq(unsigned int inta_irq,
586 struct irq_desc *desc)
587{
588 unsigned int irq;
693 589
694 switch (inta_irq) { 590 switch (inta_irq) {
695#if defined(CONFIG_BF53x) 591#if defined(BF537_FAMILY)
696 case IRQ_PROG_INTA: 592 case IRQ_PF_INTA_PG_INTA:
697 irq = IRQ_PF0; 593 bfin_demux_gpio_block(IRQ_PF0);
698 search = 1; 594 irq = IRQ_PG0;
699 break; 595 break;
700# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) 596 case IRQ_PH_INTA_MAC_RX:
701 case IRQ_MAC_RX:
702 irq = IRQ_PH0; 597 irq = IRQ_PH0;
703 break; 598 break;
704# endif 599#elif defined(BF533_FAMILY)
705#elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 600 case IRQ_PROG_INTA:
601 irq = IRQ_PF0;
602 break;
603#elif defined(BF538_FAMILY)
706 case IRQ_PORTF_INTA: 604 case IRQ_PORTF_INTA:
707 irq = IRQ_PF0; 605 irq = IRQ_PF0;
708 break; 606 break;
@@ -732,31 +630,7 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
732 return; 630 return;
733 } 631 }
734 632
735 if (search) { 633 bfin_demux_gpio_block(irq);
736 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
737 irq += i;
738
739 mask = get_gpiop_data(i) & get_gpiop_maska(i);
740
741 while (mask) {
742 if (mask & 1)
743 bfin_handle_irq(irq);
744 irq++;
745 mask >>= 1;
746 }
747 }
748 } else {
749 gpio = irq_to_gpio(irq);
750 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
751
752 do {
753 if (mask & 1)
754 bfin_handle_irq(irq);
755 irq++;
756 mask >>= 1;
757 } while (mask);
758 }
759
760} 634}
761 635
762#else /* CONFIG_BF54x */ 636#else /* CONFIG_BF54x */
@@ -974,15 +848,11 @@ static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
974} 848}
975 849
976#ifdef CONFIG_PM 850#ifdef CONFIG_PM
977u32 pint_saved_masks[NR_PINT_SYS_IRQS]; 851static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
978u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
979
980int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
981{ 852{
982 u32 pint_irq; 853 u32 pint_irq;
983 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; 854 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
984 u32 bank = PINT_2_BANK(pint_val); 855 u32 bank = PINT_2_BANK(pint_val);
985 u32 pintbit = PINT_BIT(pint_val);
986 856
987 switch (bank) { 857 switch (bank) {
988 case 0: 858 case 0:
@@ -1003,46 +873,14 @@ int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
1003 873
1004 bfin_internal_set_wake(pint_irq, state); 874 bfin_internal_set_wake(pint_irq, state);
1005 875
1006 if (state)
1007 pint_wakeup_masks[bank] |= pintbit;
1008 else
1009 pint_wakeup_masks[bank] &= ~pintbit;
1010
1011 return 0; 876 return 0;
1012} 877}
1013 878#else
1014u32 bfin_pm_setup(void) 879# define bfin_gpio_set_wake NULL
1015{
1016 u32 val, i;
1017
1018 for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
1019 val = pint[i]->mask_clear;
1020 pint_saved_masks[i] = val;
1021 if (val ^ pint_wakeup_masks[i]) {
1022 pint[i]->mask_clear = val;
1023 pint[i]->mask_set = pint_wakeup_masks[i];
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030void bfin_pm_restore(void)
1031{
1032 u32 i, val;
1033
1034 for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
1035 val = pint_saved_masks[i];
1036 if (val ^ pint_wakeup_masks[i]) {
1037 pint[i]->mask_clear = pint[i]->mask_clear;
1038 pint[i]->mask_set = val;
1039 }
1040 }
1041}
1042#endif 880#endif
1043 881
1044static void bfin_demux_gpio_irq(unsigned int inta_irq, 882void bfin_demux_gpio_irq(unsigned int inta_irq,
1045 struct irq_desc *desc) 883 struct irq_desc *desc)
1046{ 884{
1047 u32 bank, pint_val; 885 u32 bank, pint_val;
1048 u32 request, irq; 886 u32 request, irq;
@@ -1091,9 +929,7 @@ static struct irq_chip bfin_gpio_irqchip = {
1091 .irq_set_type = bfin_gpio_irq_type, 929 .irq_set_type = bfin_gpio_irq_type,
1092 .irq_startup = bfin_gpio_irq_startup, 930 .irq_startup = bfin_gpio_irq_startup,
1093 .irq_shutdown = bfin_gpio_irq_shutdown, 931 .irq_shutdown = bfin_gpio_irq_shutdown,
1094#ifdef CONFIG_PM
1095 .irq_set_wake = bfin_gpio_set_wake, 932 .irq_set_wake = bfin_gpio_set_wake,
1096#endif
1097}; 933};
1098 934
1099void __cpuinit init_exception_vectors(void) 935void __cpuinit init_exception_vectors(void)
@@ -1127,12 +963,12 @@ int __init init_arch_irq(void)
1127{ 963{
1128 int irq; 964 int irq;
1129 unsigned long ilat = 0; 965 unsigned long ilat = 0;
966
1130 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ 967 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
1131#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \ 968#ifdef SIC_IMASK0
1132 || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
1133 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); 969 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
1134 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); 970 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
1135# ifdef CONFIG_BF54x 971# ifdef SIC_IMASK2
1136 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); 972 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
1137# endif 973# endif
1138# ifdef CONFIG_SMP 974# ifdef CONFIG_SMP
@@ -1145,11 +981,6 @@ int __init init_arch_irq(void)
1145 981
1146 local_irq_disable(); 982 local_irq_disable();
1147 983
1148#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
1149 /* Clear EMAC Interrupt Status bits so we can demux it later */
1150 bfin_write_EMAC_SYSTAT(-1);
1151#endif
1152
1153#ifdef CONFIG_BF54x 984#ifdef CONFIG_BF54x
1154# ifdef CONFIG_PINTx_REASSIGN 985# ifdef CONFIG_PINTx_REASSIGN
1155 pint[0]->assign = CONFIG_PINT0_ASSIGN; 986 pint[0]->assign = CONFIG_PINT0_ASSIGN;
@@ -1168,11 +999,11 @@ int __init init_arch_irq(void)
1168 irq_set_chip(irq, &bfin_internal_irqchip); 999 irq_set_chip(irq, &bfin_internal_irqchip);
1169 1000
1170 switch (irq) { 1001 switch (irq) {
1171#if defined(CONFIG_BF53x) 1002#if defined(BF537_FAMILY)
1003 case IRQ_PH_INTA_MAC_RX:
1004 case IRQ_PF_INTA_PG_INTA:
1005#elif defined(BF533_FAMILY)
1172 case IRQ_PROG_INTA: 1006 case IRQ_PROG_INTA:
1173# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1174 case IRQ_MAC_RX:
1175# endif
1176#elif defined(CONFIG_BF54x) 1007#elif defined(CONFIG_BF54x)
1177 case IRQ_PINT0: 1008 case IRQ_PINT0:
1178 case IRQ_PINT1: 1009 case IRQ_PINT1:
@@ -1186,16 +1017,11 @@ int __init init_arch_irq(void)
1186 case IRQ_PROG0_INTA: 1017 case IRQ_PROG0_INTA:
1187 case IRQ_PROG1_INTA: 1018 case IRQ_PROG1_INTA:
1188 case IRQ_PROG2_INTA: 1019 case IRQ_PROG2_INTA:
1189#elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 1020#elif defined(BF538_FAMILY)
1190 case IRQ_PORTF_INTA: 1021 case IRQ_PORTF_INTA:
1191#endif 1022#endif
1192 irq_set_chained_handler(irq, bfin_demux_gpio_irq); 1023 irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1193 break; 1024 break;
1194#ifdef BF537_GENERIC_ERROR_INT_DEMUX
1195 case IRQ_GENERIC_ERROR:
1196 irq_set_chained_handler(irq, bfin_demux_error_irq);
1197 break;
1198#endif
1199#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1025#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1200 case IRQ_MAC_ERROR: 1026 case IRQ_MAC_ERROR:
1201 irq_set_chained_handler(irq, 1027 irq_set_chained_handler(irq,
@@ -1213,11 +1039,10 @@ int __init init_arch_irq(void)
1213 case IRQ_CORETMR: 1039 case IRQ_CORETMR:
1214# ifdef CONFIG_SMP 1040# ifdef CONFIG_SMP
1215 irq_set_handler(irq, handle_percpu_irq); 1041 irq_set_handler(irq, handle_percpu_irq);
1216 break;
1217# else 1042# else
1218 irq_set_handler(irq, handle_simple_irq); 1043 irq_set_handler(irq, handle_simple_irq);
1219 break;
1220# endif 1044# endif
1045 break;
1221#endif 1046#endif
1222 1047
1223#ifdef CONFIG_TICKSOURCE_GPTMR0 1048#ifdef CONFIG_TICKSOURCE_GPTMR0
@@ -1226,26 +1051,17 @@ int __init init_arch_irq(void)
1226 break; 1051 break;
1227#endif 1052#endif
1228 1053
1229#ifdef CONFIG_IPIPE
1230 default: 1054 default:
1055#ifdef CONFIG_IPIPE
1231 irq_set_handler(irq, handle_level_irq); 1056 irq_set_handler(irq, handle_level_irq);
1232 break; 1057#else
1233#else /* !CONFIG_IPIPE */
1234 default:
1235 irq_set_handler(irq, handle_simple_irq); 1058 irq_set_handler(irq, handle_simple_irq);
1059#endif
1236 break; 1060 break;
1237#endif /* !CONFIG_IPIPE */
1238 } 1061 }
1239 } 1062 }
1240 1063
1241#ifdef BF537_GENERIC_ERROR_INT_DEMUX 1064 init_mach_irq();
1242 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
1243 irq_set_chip_and_handler(irq, &bfin_generic_error_irqchip,
1244 handle_level_irq);
1245#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1246 irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
1247#endif
1248#endif
1249 1065
1250#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1066#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1251 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) 1067 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
@@ -1307,53 +1123,54 @@ int __init init_arch_irq(void)
1307#ifdef CONFIG_DO_IRQ_L1 1123#ifdef CONFIG_DO_IRQ_L1
1308__attribute__((l1_text)) 1124__attribute__((l1_text))
1309#endif 1125#endif
1310void do_irq(int vec, struct pt_regs *fp) 1126static int vec_to_irq(int vec)
1311{ 1127{
1312 if (vec == EVT_IVTMR_P) { 1128 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1313 vec = IRQ_CORETMR; 1129 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1314 } else { 1130 unsigned long sic_status[3];
1315 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; 1131
1316 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; 1132 if (likely(vec == EVT_IVTMR_P))
1317#if defined(SIC_ISR0) 1133 return IRQ_CORETMR;
1318 unsigned long sic_status[3];
1319 1134
1320 if (smp_processor_id()) { 1135#ifdef SIC_ISR
1136 sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1137#else
1138 if (smp_processor_id()) {
1321# ifdef SICB_ISR0 1139# ifdef SICB_ISR0
1322 /* This will be optimized out in UP mode. */ 1140 /* This will be optimized out in UP mode. */
1323 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0(); 1141 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1324 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1(); 1142 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1325# endif
1326 } else {
1327 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1328 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1329 }
1330# ifdef SIC_ISR2
1331 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1332# endif 1143# endif
1333 for (;; ivg++) { 1144 } else {
1334 if (ivg >= ivg_stop) { 1145 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1335 atomic_inc(&num_spurious); 1146 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1336 return; 1147 }
1337 } 1148#endif
1338 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag) 1149#ifdef SIC_ISR2
1339 break; 1150 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1340 } 1151#endif
1341#else
1342 unsigned long sic_status;
1343
1344 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1345 1152
1346 for (;; ivg++) { 1153 for (;; ivg++) {
1347 if (ivg >= ivg_stop) { 1154 if (ivg >= ivg_stop)
1348 atomic_inc(&num_spurious); 1155 return -1;
1349 return; 1156#ifdef SIC_ISR
1350 } else if (sic_status & ivg->isrflag) 1157 if (sic_status[0] & ivg->isrflag)
1351 break; 1158#else
1352 } 1159 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1353#endif 1160#endif
1354 vec = ivg->irqno; 1161 return ivg->irqno;
1355 } 1162 }
1356 asm_do_IRQ(vec, fp); 1163}
1164
1165#ifdef CONFIG_DO_IRQ_L1
1166__attribute__((l1_text))
1167#endif
1168void do_irq(int vec, struct pt_regs *fp)
1169{
1170 int irq = vec_to_irq(vec);
1171 if (irq == -1)
1172 return;
1173 asm_do_IRQ(irq, fp);
1357} 1174}
1358 1175
1359#ifdef CONFIG_IPIPE 1176#ifdef CONFIG_IPIPE
@@ -1391,40 +1208,9 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1391 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; 1208 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1392 int irq, s = 0; 1209 int irq, s = 0;
1393 1210
1394 if (likely(vec == EVT_IVTMR_P)) 1211 irq = vec_to_irq(vec);
1395 irq = IRQ_CORETMR; 1212 if (irq == -1)
1396 else { 1213 return 0;
1397#if defined(SIC_ISR0)
1398 unsigned long sic_status[3];
1399
1400 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1401 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1402# ifdef SIC_ISR2
1403 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1404# endif
1405 for (;; ivg++) {
1406 if (ivg >= ivg_stop) {
1407 atomic_inc(&num_spurious);
1408 return 0;
1409 }
1410 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1411 break;
1412 }
1413#else
1414 unsigned long sic_status;
1415
1416 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1417
1418 for (;; ivg++) {
1419 if (ivg >= ivg_stop) {
1420 atomic_inc(&num_spurious);
1421 return 0;
1422 } else if (sic_status & ivg->isrflag)
1423 break;
1424 }
1425#endif
1426 irq = ivg->irqno;
1427 }
1428 1214
1429 if (irq == IRQ_SYSTMR) { 1215 if (irq == IRQ_SYSTMR) {
1430#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0) 1216#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1fbd94c44457..35e7e1eb0188 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <asm/atomic.h> 26#include <asm/atomic.h>
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/irq_handler.h>
28#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
30#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
@@ -96,7 +97,7 @@ static void ipi_cpu_stop(unsigned int cpu)
96 dump_stack(); 97 dump_stack();
97 spin_unlock(&stop_lock); 98 spin_unlock(&stop_lock);
98 99
99 cpu_clear(cpu, cpu_online_map); 100 set_cpu_online(cpu, false);
100 101
101 local_irq_disable(); 102 local_irq_disable();
102 103
@@ -146,7 +147,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
146 */ 147 */
147 resync_core_dcache(); 148 resync_core_dcache();
148#endif 149#endif
149 cpu_clear(cpu, *msg->call_struct.waitmask); 150 cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
150 } 151 }
151} 152}
152 153
@@ -222,9 +223,10 @@ static inline void smp_send_message(cpumask_t callmap, unsigned long type,
222 struct ipi_message_queue *msg_queue; 223 struct ipi_message_queue *msg_queue;
223 struct ipi_message *msg; 224 struct ipi_message *msg;
224 unsigned long flags, next_msg; 225 unsigned long flags, next_msg;
225 cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */ 226 cpumask_t waitmask; /* waitmask is shared by all cpus */
226 227
227 for_each_cpu_mask(cpu, callmap) { 228 cpumask_copy(&waitmask, &callmap);
229 for_each_cpu(cpu, &callmap) {
228 msg_queue = &per_cpu(ipi_msg_queue, cpu); 230 msg_queue = &per_cpu(ipi_msg_queue, cpu);
229 spin_lock_irqsave(&msg_queue->lock, flags); 231 spin_lock_irqsave(&msg_queue->lock, flags);
230 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { 232 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
@@ -246,7 +248,7 @@ static inline void smp_send_message(cpumask_t callmap, unsigned long type,
246 } 248 }
247 249
248 if (wait) { 250 if (wait) {
249 while (!cpus_empty(waitmask)) 251 while (!cpumask_empty(&waitmask))
250 blackfin_dcache_invalidate_range( 252 blackfin_dcache_invalidate_range(
251 (unsigned long)(&waitmask), 253 (unsigned long)(&waitmask),
252 (unsigned long)(&waitmask)); 254 (unsigned long)(&waitmask));
@@ -265,9 +267,9 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
265 cpumask_t callmap; 267 cpumask_t callmap;
266 268
267 preempt_disable(); 269 preempt_disable();
268 callmap = cpu_online_map; 270 cpumask_copy(&callmap, cpu_online_mask);
269 cpu_clear(smp_processor_id(), callmap); 271 cpumask_clear_cpu(smp_processor_id(), &callmap);
270 if (!cpus_empty(callmap)) 272 if (!cpumask_empty(&callmap))
271 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); 273 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
272 274
273 preempt_enable(); 275 preempt_enable();
@@ -284,8 +286,8 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
284 286
285 if (cpu_is_offline(cpu)) 287 if (cpu_is_offline(cpu))
286 return 0; 288 return 0;
287 cpus_clear(callmap); 289 cpumask_clear(&callmap);
288 cpu_set(cpu, callmap); 290 cpumask_set_cpu(cpu, &callmap);
289 291
290 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); 292 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
291 293
@@ -308,9 +310,9 @@ void smp_send_stop(void)
308 cpumask_t callmap; 310 cpumask_t callmap;
309 311
310 preempt_disable(); 312 preempt_disable();
311 callmap = cpu_online_map; 313 cpumask_copy(&callmap, cpu_online_mask);
312 cpu_clear(smp_processor_id(), callmap); 314 cpumask_clear_cpu(smp_processor_id(), &callmap);
313 if (!cpus_empty(callmap)) 315 if (!cpumask_empty(&callmap))
314 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); 316 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
315 317
316 preempt_enable(); 318 preempt_enable();
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index dfd304a4a3ea..29d98faa1efd 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/poll.h> 16#include <linux/poll.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
18#include <linux/spinlock.h> 19#include <linux/spinlock.h>
19#include <linux/rtc.h> 20#include <linux/rtc.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
@@ -764,7 +765,7 @@ EXPORT_SYMBOL(sram_alloc_with_lsl);
764/* Need to keep line of output the same. Currently, that is 44 bytes 765/* Need to keep line of output the same. Currently, that is 44 bytes
765 * (including newline). 766 * (including newline).
766 */ 767 */
767static int _sram_proc_read(char *buf, int *len, int count, const char *desc, 768static int _sram_proc_show(struct seq_file *m, const char *desc,
768 struct sram_piece *pfree_head, 769 struct sram_piece *pfree_head,
769 struct sram_piece *pused_head) 770 struct sram_piece *pused_head)
770{ 771{
@@ -773,13 +774,13 @@ static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
773 if (!pfree_head || !pused_head) 774 if (!pfree_head || !pused_head)
774 return -1; 775 return -1;
775 776
776 *len += sprintf(&buf[*len], "--- SRAM %-14s Size PID State \n", desc); 777 seq_printf(m, "--- SRAM %-14s Size PID State \n", desc);
777 778
778 /* search the relevant memory slot */ 779 /* search the relevant memory slot */
779 pslot = pused_head->next; 780 pslot = pused_head->next;
780 781
781 while (pslot != NULL) { 782 while (pslot != NULL) {
782 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n", 783 seq_printf(m, "%p-%p %10i %5i %-10s\n",
783 pslot->paddr, pslot->paddr + pslot->size, 784 pslot->paddr, pslot->paddr + pslot->size,
784 pslot->size, pslot->pid, "ALLOCATED"); 785 pslot->size, pslot->pid, "ALLOCATED");
785 786
@@ -789,7 +790,7 @@ static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
789 pslot = pfree_head->next; 790 pslot = pfree_head->next;
790 791
791 while (pslot != NULL) { 792 while (pslot != NULL) {
792 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n", 793 seq_printf(m, "%p-%p %10i %5i %-10s\n",
793 pslot->paddr, pslot->paddr + pslot->size, 794 pslot->paddr, pslot->paddr + pslot->size,
794 pslot->size, pslot->pid, "FREE"); 795 pslot->size, pslot->pid, "FREE");
795 796
@@ -798,54 +799,62 @@ static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
798 799
799 return 0; 800 return 0;
800} 801}
801static int sram_proc_read(char *buf, char **start, off_t offset, int count, 802static int sram_proc_show(struct seq_file *m, void *v)
802 int *eof, void *data)
803{ 803{
804 int len = 0;
805 unsigned int cpu; 804 unsigned int cpu;
806 805
807 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 806 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
808 if (_sram_proc_read(buf, &len, count, "Scratchpad", 807 if (_sram_proc_show(m, "Scratchpad",
809 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu))) 808 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
810 goto not_done; 809 goto not_done;
811#if L1_DATA_A_LENGTH != 0 810#if L1_DATA_A_LENGTH != 0
812 if (_sram_proc_read(buf, &len, count, "L1 Data A", 811 if (_sram_proc_show(m, "L1 Data A",
813 &per_cpu(free_l1_data_A_sram_head, cpu), 812 &per_cpu(free_l1_data_A_sram_head, cpu),
814 &per_cpu(used_l1_data_A_sram_head, cpu))) 813 &per_cpu(used_l1_data_A_sram_head, cpu)))
815 goto not_done; 814 goto not_done;
816#endif 815#endif
817#if L1_DATA_B_LENGTH != 0 816#if L1_DATA_B_LENGTH != 0
818 if (_sram_proc_read(buf, &len, count, "L1 Data B", 817 if (_sram_proc_show(m, "L1 Data B",
819 &per_cpu(free_l1_data_B_sram_head, cpu), 818 &per_cpu(free_l1_data_B_sram_head, cpu),
820 &per_cpu(used_l1_data_B_sram_head, cpu))) 819 &per_cpu(used_l1_data_B_sram_head, cpu)))
821 goto not_done; 820 goto not_done;
822#endif 821#endif
823#if L1_CODE_LENGTH != 0 822#if L1_CODE_LENGTH != 0
824 if (_sram_proc_read(buf, &len, count, "L1 Instruction", 823 if (_sram_proc_show(m, "L1 Instruction",
825 &per_cpu(free_l1_inst_sram_head, cpu), 824 &per_cpu(free_l1_inst_sram_head, cpu),
826 &per_cpu(used_l1_inst_sram_head, cpu))) 825 &per_cpu(used_l1_inst_sram_head, cpu)))
827 goto not_done; 826 goto not_done;
828#endif 827#endif
829 } 828 }
830#if L2_LENGTH != 0 829#if L2_LENGTH != 0
831 if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head, 830 if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head))
832 &used_l2_sram_head))
833 goto not_done; 831 goto not_done;
834#endif 832#endif
835 *eof = 1;
836 not_done: 833 not_done:
837 return len; 834 return 0;
835}
836
837static int sram_proc_open(struct inode *inode, struct file *file)
838{
839 return single_open(file, sram_proc_show, NULL);
838} 840}
839 841
842static const struct file_operations sram_proc_ops = {
843 .open = sram_proc_open,
844 .read = seq_read,
845 .llseek = seq_lseek,
846 .release = single_release,
847};
848
840static int __init sram_proc_init(void) 849static int __init sram_proc_init(void)
841{ 850{
842 struct proc_dir_entry *ptr; 851 struct proc_dir_entry *ptr;
843 ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL); 852
853 ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops);
844 if (!ptr) { 854 if (!ptr) {
845 printk(KERN_WARNING "unable to create /proc/sram\n"); 855 printk(KERN_WARNING "unable to create /proc/sram\n");
846 return -1; 856 return -1;
847 } 857 }
848 ptr->read_proc = sram_proc_read;
849 return 0; 858 return 0;
850} 859}
851late_initcall(sram_proc_init); 860late_initcall(sram_proc_init);
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index a6d03069d0ff..b6b94a27d276 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -31,10 +31,6 @@ config ARCH_HAS_ILOG2_U64
31 bool 31 bool
32 default n 32 default n
33 33
34config GENERIC_FIND_NEXT_BIT
35 bool
36 default y
37
38config GENERIC_HWEIGHT 34config GENERIC_HWEIGHT
39 bool 35 bool
40 default y 36 default y
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 68a1a5901ca5..5ebe6e841820 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -266,11 +266,11 @@ static int irq_cpu(int irq)
266 266
267 267
268 /* Let the interrupt stay if possible */ 268 /* Let the interrupt stay if possible */
269 if (cpu_isset(cpu, irq_allocations[irq - FIRST_IRQ].mask)) 269 if (cpumask_test_cpu(cpu, &irq_allocations[irq - FIRST_IRQ].mask))
270 goto out; 270 goto out;
271 271
272 /* IRQ must be moved to another CPU. */ 272 /* IRQ must be moved to another CPU. */
273 cpu = first_cpu(irq_allocations[irq - FIRST_IRQ].mask); 273 cpu = cpumask_first(&irq_allocations[irq - FIRST_IRQ].mask);
274 irq_allocations[irq - FIRST_IRQ].cpu = cpu; 274 irq_allocations[irq - FIRST_IRQ].cpu = cpu;
275out: 275out:
276 spin_unlock_irqrestore(&irq_lock, flags); 276 spin_unlock_irqrestore(&irq_lock, flags);
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 66cc75657e2f..a0843a71aaee 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -81,7 +81,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
81 81
82 /* Mark all possible CPUs as present */ 82 /* Mark all possible CPUs as present */
83 for (i = 0; i < max_cpus; i++) 83 for (i = 0; i < max_cpus; i++)
84 cpu_set(i, phys_cpu_present_map); 84 cpumask_set_cpu(i, &phys_cpu_present_map);
85} 85}
86 86
87void __devinit smp_prepare_boot_cpu(void) 87void __devinit smp_prepare_boot_cpu(void)
@@ -98,7 +98,7 @@ void __devinit smp_prepare_boot_cpu(void)
98 SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 98 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
99 99
100 set_cpu_online(0, true); 100 set_cpu_online(0, true);
101 cpu_set(0, phys_cpu_present_map); 101 cpumask_set_cpu(0, &phys_cpu_present_map);
102 set_cpu_possible(0, true); 102 set_cpu_possible(0, true);
103} 103}
104 104
@@ -112,8 +112,9 @@ smp_boot_one_cpu(int cpuid)
112{ 112{
113 unsigned timeout; 113 unsigned timeout;
114 struct task_struct *idle; 114 struct task_struct *idle;
115 cpumask_t cpu_mask = CPU_MASK_NONE; 115 cpumask_t cpu_mask;
116 116
117 cpumask_clear(&cpu_mask);
117 idle = fork_idle(cpuid); 118 idle = fork_idle(cpuid);
118 if (IS_ERR(idle)) 119 if (IS_ERR(idle))
119 panic("SMP: fork failed for CPU:%d", cpuid); 120 panic("SMP: fork failed for CPU:%d", cpuid);
@@ -125,10 +126,10 @@ smp_boot_one_cpu(int cpuid)
125 cpu_now_booting = cpuid; 126 cpu_now_booting = cpuid;
126 127
127 /* Kick it */ 128 /* Kick it */
128 cpu_set(cpuid, cpu_online_map); 129 set_cpu_online(cpuid, true);
129 cpu_set(cpuid, cpu_mask); 130 cpumask_set_cpu(cpuid, &cpu_mask);
130 send_ipi(IPI_BOOT, 0, cpu_mask); 131 send_ipi(IPI_BOOT, 0, cpu_mask);
131 cpu_clear(cpuid, cpu_online_map); 132 set_cpu_online(cpuid, false);
132 133
133 /* Wait for CPU to come online */ 134 /* Wait for CPU to come online */
134 for (timeout = 0; timeout < 10000; timeout++) { 135 for (timeout = 0; timeout < 10000; timeout++) {
@@ -176,7 +177,7 @@ void __init smp_callin(void)
176 notify_cpu_starting(cpu); 177 notify_cpu_starting(cpu);
177 local_irq_enable(); 178 local_irq_enable();
178 179
179 cpu_set(cpu, cpu_online_map); 180 set_cpu_online(cpu, true);
180 cpu_idle(); 181 cpu_idle();
181} 182}
182 183
@@ -214,8 +215,9 @@ int __cpuinit __cpu_up(unsigned int cpu)
214 215
215void smp_send_reschedule(int cpu) 216void smp_send_reschedule(int cpu)
216{ 217{
217 cpumask_t cpu_mask = CPU_MASK_NONE; 218 cpumask_t cpu_mask;
218 cpu_set(cpu, cpu_mask); 219 cpumask_clear(&cpu_mask);
220 cpumask_set_cpu(cpu, &cpu_mask);
219 send_ipi(IPI_SCHEDULE, 0, cpu_mask); 221 send_ipi(IPI_SCHEDULE, 0, cpu_mask);
220} 222}
221 223
@@ -232,7 +234,7 @@ void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned
232 234
233 spin_lock_irqsave(&tlbstate_lock, flags); 235 spin_lock_irqsave(&tlbstate_lock, flags);
234 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm)); 236 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
235 cpu_clear(smp_processor_id(), cpu_mask); 237 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
236 flush_mm = mm; 238 flush_mm = mm;
237 flush_vma = vma; 239 flush_vma = vma;
238 flush_addr = addr; 240 flush_addr = addr;
@@ -277,10 +279,10 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
277 int ret = 0; 279 int ret = 0;
278 280
279 /* Calculate CPUs to send to. */ 281 /* Calculate CPUs to send to. */
280 cpus_and(cpu_mask, cpu_mask, cpu_online_map); 282 cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask);
281 283
282 /* Send the IPI. */ 284 /* Send the IPI. */
283 for_each_cpu_mask(i, cpu_mask) 285 for_each_cpu(i, &cpu_mask)
284 { 286 {
285 ipi.vector |= vector; 287 ipi.vector |= vector;
286 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); 288 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
@@ -288,7 +290,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
288 290
289 /* Wait for IPI to finish on other CPUS */ 291 /* Wait for IPI to finish on other CPUS */
290 if (wait) { 292 if (wait) {
291 for_each_cpu_mask(i, cpu_mask) { 293 for_each_cpu(i, &cpu_mask) {
292 int j; 294 int j;
293 for (j = 0 ; j < 1000; j++) { 295 for (j = 0 ; j < 1000; j++) {
294 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); 296 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
@@ -314,11 +316,12 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
314 */ 316 */
315int smp_call_function(void (*func)(void *info), void *info, int wait) 317int smp_call_function(void (*func)(void *info), void *info, int wait)
316{ 318{
317 cpumask_t cpu_mask = CPU_MASK_ALL; 319 cpumask_t cpu_mask;
318 struct call_data_struct data; 320 struct call_data_struct data;
319 int ret; 321 int ret;
320 322
321 cpu_clear(smp_processor_id(), cpu_mask); 323 cpumask_setall(&cpu_mask);
324 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
322 325
323 WARN_ON(irqs_disabled()); 326 WARN_ON(irqs_disabled());
324 327
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index df33ab89d70f..d72ab58fd83e 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -13,8 +13,6 @@
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <asm/tlb.h> 14#include <asm/tlb.h>
15 15
16DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
17
18unsigned long empty_zero_page; 16unsigned long empty_zero_page;
19 17
20extern char _stext, _edata, _etext; /* From linkerscript */ 18extern char _stext, _edata, _etext; /* From linkerscript */
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 064f62196745..cb884e489425 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -19,14 +19,6 @@ config RWSEM_GENERIC_SPINLOCK
19config RWSEM_XCHGADD_ALGORITHM 19config RWSEM_XCHGADD_ALGORITHM
20 bool 20 bool
21 21
22config GENERIC_FIND_NEXT_BIT
23 bool
24 default y
25
26config GENERIC_FIND_BIT_LE
27 bool
28 default y
29
30config GENERIC_HWEIGHT 22config GENERIC_HWEIGHT
31 bool 23 bool
32 default y 24 default y
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index ed64588ac3a7..fbe5f0dbae06 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -41,8 +41,6 @@
41 41
42#undef DEBUG 42#undef DEBUG
43 43
44DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
45
46/* 44/*
47 * BAD_PAGE is the page that is used for page faults when linux 45 * BAD_PAGE is the page that is used for page faults when linux
48 * is out-of-memory. Older versions of linux just did a 46 * is out-of-memory. Older versions of linux just did a
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index e20322ffcaf8..091ed6192ae8 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -41,14 +41,6 @@ config ARCH_HAS_ILOG2_U64
41 bool 41 bool
42 default n 42 default n
43 43
44config GENERIC_FIND_NEXT_BIT
45 bool
46 default y
47
48config GENERIC_FIND_BIT_LE
49 bool
50 default y
51
52config GENERIC_HWEIGHT 44config GENERIC_HWEIGHT
53 bool 45 bool
54 default y 46 default y
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e5cc56ae6ce3..38280ef4a2af 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -78,10 +78,6 @@ config HUGETLB_PAGE_SIZE_VARIABLE
78 depends on HUGETLB_PAGE 78 depends on HUGETLB_PAGE
79 default y 79 default y
80 80
81config GENERIC_FIND_NEXT_BIT
82 bool
83 default y
84
85config GENERIC_CALIBRATE_DELAY 81config GENERIC_CALIBRATE_DELAY
86 bool 82 bool
87 default y 83 default y
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 23cce999eb1c..c3ffe3e54edc 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,27 @@
47#include <asm/machvec.h> 47#include <asm/machvec.h>
48 48
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50# define FREE_PTE_NR 2048
51# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 50# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
52#else 51#else
53# define FREE_PTE_NR 0
54# define tlb_fast_mode(tlb) (1) 52# define tlb_fast_mode(tlb) (1)
55#endif 53#endif
56 54
55/*
56 * If we can't allocate a page to make a big batch of page pointers
57 * to work on, then just handle a few from the on-stack structure.
58 */
59#define IA64_GATHER_BUNDLE 8
60
57struct mmu_gather { 61struct mmu_gather {
58 struct mm_struct *mm; 62 struct mm_struct *mm;
59 unsigned int nr; /* == ~0U => fast mode */ 63 unsigned int nr; /* == ~0U => fast mode */
64 unsigned int max;
60 unsigned char fullmm; /* non-zero means full mm flush */ 65 unsigned char fullmm; /* non-zero means full mm flush */
61 unsigned char need_flush; /* really unmapped some PTEs? */ 66 unsigned char need_flush; /* really unmapped some PTEs? */
62 unsigned long start_addr; 67 unsigned long start_addr;
63 unsigned long end_addr; 68 unsigned long end_addr;
64 struct page *pages[FREE_PTE_NR]; 69 struct page **pages;
70 struct page *local[IA64_GATHER_BUNDLE];
65}; 71};
66 72
67struct ia64_tr_entry { 73struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
90#define RR_RID_MASK 0x00000000ffffff00L 96#define RR_RID_MASK 0x00000000ffffff00L
91#define RR_TO_RID(val) ((val >> 8) & 0xffffff) 97#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
92 98
93/* Users of the generic TLB shootdown code must declare this storage space. */
94DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
95
96/* 99/*
97 * Flush the TLB for address range START to END and, if not in fast mode, release the 100 * Flush the TLB for address range START to END and, if not in fast mode, release the
98 * freed pages that where gathered up to this point. 101 * freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
147 } 150 }
148} 151}
149 152
150/* 153static inline void __tlb_alloc_page(struct mmu_gather *tlb)
151 * Return a pointer to an initialized struct mmu_gather.
152 */
153static inline struct mmu_gather *
154tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
155{ 154{
156 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 155 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
157 156
157 if (addr) {
158 tlb->pages = (void *)addr;
159 tlb->max = PAGE_SIZE / sizeof(void *);
160 }
161}
162
163
164static inline void
165tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
166{
158 tlb->mm = mm; 167 tlb->mm = mm;
168 tlb->max = ARRAY_SIZE(tlb->local);
169 tlb->pages = tlb->local;
159 /* 170 /*
160 * Use fast mode if only 1 CPU is online. 171 * Use fast mode if only 1 CPU is online.
161 * 172 *
@@ -172,7 +183,6 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
172 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; 183 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
173 tlb->fullmm = full_mm_flush; 184 tlb->fullmm = full_mm_flush;
174 tlb->start_addr = ~0UL; 185 tlb->start_addr = ~0UL;
175 return tlb;
176} 186}
177 187
178/* 188/*
@@ -180,7 +190,7 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
180 * collected. 190 * collected.
181 */ 191 */
182static inline void 192static inline void
183tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 193tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
184{ 194{
185 /* 195 /*
186 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 196 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,7 +201,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
191 /* keep the page table cache within bounds */ 201 /* keep the page table cache within bounds */
192 check_pgt_cache(); 202 check_pgt_cache();
193 203
194 put_cpu_var(mmu_gathers); 204 if (tlb->pages != tlb->local)
205 free_pages((unsigned long)tlb->pages, 0);
195} 206}
196 207
197/* 208/*
@@ -199,18 +210,33 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
199 * must be delayed until after the TLB has been flushed (see comments at the beginning of 210 * must be delayed until after the TLB has been flushed (see comments at the beginning of
200 * this file). 211 * this file).
201 */ 212 */
202static inline void 213static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
203tlb_remove_page (struct mmu_gather *tlb, struct page *page)
204{ 214{
205 tlb->need_flush = 1; 215 tlb->need_flush = 1;
206 216
207 if (tlb_fast_mode(tlb)) { 217 if (tlb_fast_mode(tlb)) {
208 free_page_and_swap_cache(page); 218 free_page_and_swap_cache(page);
209 return; 219 return 1; /* avoid calling tlb_flush_mmu */
210 } 220 }
221
222 if (!tlb->nr && tlb->pages == tlb->local)
223 __tlb_alloc_page(tlb);
224
211 tlb->pages[tlb->nr++] = page; 225 tlb->pages[tlb->nr++] = page;
212 if (tlb->nr >= FREE_PTE_NR) 226 VM_BUG_ON(tlb->nr > tlb->max);
213 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 227
228 return tlb->max - tlb->nr;
229}
230
231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
232{
233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
234}
235
236static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
237{
238 if (!__tlb_remove_page(tlb, page))
239 tlb_flush_mmu(tlb);
214} 240}
215 241
216/* 242/*
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 04440cc09b40..85118dfe9bb5 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -36,7 +36,7 @@
36static cycle_t itc_get_cycles(struct clocksource *cs); 36static cycle_t itc_get_cycles(struct clocksource *cs);
37 37
38struct fsyscall_gtod_data_t fsyscall_gtod_data = { 38struct fsyscall_gtod_data_t fsyscall_gtod_data = {
39 .lock = SEQLOCK_UNLOCKED, 39 .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
40}; 40};
41 41
42struct itc_jitter_data_t itc_jitter_data; 42struct itc_jitter_data_t itc_jitter_data;
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 9a018cde5d84..f114a3b14c6a 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -44,13 +44,16 @@ void show_mem(unsigned int filter)
44 pg_data_t *pgdat; 44 pg_data_t *pgdat;
45 45
46 printk(KERN_INFO "Mem-info:\n"); 46 printk(KERN_INFO "Mem-info:\n");
47 show_free_areas(); 47 show_free_areas(filter);
48 printk(KERN_INFO "Node memory in pages:\n"); 48 printk(KERN_INFO "Node memory in pages:\n");
49 for_each_online_pgdat(pgdat) { 49 for_each_online_pgdat(pgdat) {
50 unsigned long present; 50 unsigned long present;
51 unsigned long flags; 51 unsigned long flags;
52 int shared = 0, cached = 0, reserved = 0; 52 int shared = 0, cached = 0, reserved = 0;
53 int nid = pgdat->node_id;
53 54
55 if (skip_free_areas_node(filter, nid))
56 continue;
54 pgdat_resize_lock(pgdat, &flags); 57 pgdat_resize_lock(pgdat, &flags);
55 present = pgdat->node_present_pages; 58 present = pgdat->node_present_pages;
56 for(i = 0; i < pgdat->node_spanned_pages; i++) { 59 for(i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -64,8 +67,7 @@ void show_mem(unsigned int filter)
64 if (max_gap < LARGE_GAP) 67 if (max_gap < LARGE_GAP)
65 continue; 68 continue;
66#endif 69#endif
67 i = vmemmap_find_next_valid_pfn(pgdat->node_id, 70 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
68 i) - 1;
69 continue; 71 continue;
70 } 72 }
71 if (PageReserved(page)) 73 if (PageReserved(page))
@@ -81,7 +83,7 @@ void show_mem(unsigned int filter)
81 total_cached += cached; 83 total_cached += cached;
82 total_shared += shared; 84 total_shared += shared;
83 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 85 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
84 "shrd: %10d, swpd: %10d\n", pgdat->node_id, 86 "shrd: %10d, swpd: %10d\n", nid,
85 present, reserved, shared, cached); 87 present, reserved, shared, cached);
86 } 88 }
87 printk(KERN_INFO "%ld pages of RAM\n", total_present); 89 printk(KERN_INFO "%ld pages of RAM\n", total_present);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 82ab1bc6afb1..c641333cd997 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -622,13 +622,16 @@ void show_mem(unsigned int filter)
622 pg_data_t *pgdat; 622 pg_data_t *pgdat;
623 623
624 printk(KERN_INFO "Mem-info:\n"); 624 printk(KERN_INFO "Mem-info:\n");
625 show_free_areas(); 625 show_free_areas(filter);
626 printk(KERN_INFO "Node memory in pages:\n"); 626 printk(KERN_INFO "Node memory in pages:\n");
627 for_each_online_pgdat(pgdat) { 627 for_each_online_pgdat(pgdat) {
628 unsigned long present; 628 unsigned long present;
629 unsigned long flags; 629 unsigned long flags;
630 int shared = 0, cached = 0, reserved = 0; 630 int shared = 0, cached = 0, reserved = 0;
631 int nid = pgdat->node_id;
631 632
633 if (skip_free_areas_node(filter, nid))
634 continue;
632 pgdat_resize_lock(pgdat, &flags); 635 pgdat_resize_lock(pgdat, &flags);
633 present = pgdat->node_present_pages; 636 present = pgdat->node_present_pages;
634 for(i = 0; i < pgdat->node_spanned_pages; i++) { 637 for(i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -638,8 +641,7 @@ void show_mem(unsigned int filter)
638 if (pfn_valid(pgdat->node_start_pfn + i)) 641 if (pfn_valid(pgdat->node_start_pfn + i))
639 page = pfn_to_page(pgdat->node_start_pfn + i); 642 page = pfn_to_page(pgdat->node_start_pfn + i);
640 else { 643 else {
641 i = vmemmap_find_next_valid_pfn(pgdat->node_id, 644 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
642 i) - 1;
643 continue; 645 continue;
644 } 646 }
645 if (PageReserved(page)) 647 if (PageReserved(page))
@@ -655,7 +657,7 @@ void show_mem(unsigned int filter)
655 total_cached += cached; 657 total_cached += cached;
656 total_shared += shared; 658 total_shared += shared;
657 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 659 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
658 "shrd: %10d, swpd: %10d\n", pgdat->node_id, 660 "shrd: %10d, swpd: %10d\n", nid,
659 present, reserved, shared, cached); 661 present, reserved, shared, cached);
660 } 662 }
661 printk(KERN_INFO "%ld pages of RAM\n", total_present); 663 printk(KERN_INFO "%ld pages of RAM\n", total_present);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ed41759efcac..00cb0e26c64e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -36,8 +36,6 @@
36#include <asm/mca.h> 36#include <asm/mca.h>
37#include <asm/paravirt.h> 37#include <asm/paravirt.h>
38 38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40
41extern void ia64_tlb_init (void); 39extern void ia64_tlb_init (void);
42 40
43unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 41unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 736b808d2291..85b44e858225 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -256,14 +256,6 @@ config ARCH_HAS_ILOG2_U64
256 bool 256 bool
257 default n 257 default n
258 258
259config GENERIC_FIND_NEXT_BIT
260 bool
261 default y
262
263config GENERIC_FIND_BIT_LE
264 bool
265 default y
266
267config GENERIC_HWEIGHT 259config GENERIC_HWEIGHT
268 bool 260 bool
269 default y 261 default y
diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug
index 2e1019ddbb22..bb1afc1a31cc 100644
--- a/arch/m32r/Kconfig.debug
+++ b/arch/m32r/Kconfig.debug
@@ -9,15 +9,6 @@ config DEBUG_STACKOVERFLOW
9 This option will cause messages to be printed if free stack space 9 This option will cause messages to be printed if free stack space
10 drops below a certain limit. 10 drops below a certain limit.
11 11
12config DEBUG_STACK_USAGE
13 bool "Stack utilization instrumentation"
14 depends on DEBUG_KERNEL
15 help
16 Enables the display of the minimum amount of free stack which each
17 task has ever had available in the sysrq-T and sysrq-P debug output.
18
19 This option will slow down process creation somewhat.
20
21config DEBUG_PAGEALLOC 12config DEBUG_PAGEALLOC
22 bool "Debug page memory allocations" 13 bool "Debug page memory allocations"
23 depends on DEBUG_KERNEL && BROKEN 14 depends on DEBUG_KERNEL && BROKEN
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index e67ded1aab91..cf7829a61551 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -81,11 +81,11 @@ static __inline__ int cpu_number_map(int cpu)
81 81
82static __inline__ unsigned int num_booting_cpus(void) 82static __inline__ unsigned int num_booting_cpus(void)
83{ 83{
84 return cpus_weight(cpu_callout_map); 84 return cpumask_weight(&cpu_callout_map);
85} 85}
86 86
87extern void smp_send_timer(void); 87extern void smp_send_timer(void);
88extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); 88extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
89 89
90extern void arch_send_call_function_single_ipi(int cpu); 90extern void arch_send_call_function_single_ipi(int cpu);
91extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 91extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -94,8 +94,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
94 94
95#define NO_PROC_ID (0xff) /* No processor magic marker */ 95#define NO_PROC_ID (0xff) /* No processor magic marker */
96 96
97#define PROC_CHANGE_PENALTY (15) /* Schedule penalty */
98
99/* 97/*
100 * M32R-mp IPI 98 * M32R-mp IPI
101 */ 99 */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index fc10b39893d4..092d40a6708e 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -30,6 +30,7 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
32#include <asm/m32r.h> 32#include <asm/m32r.h>
33#include <asm/tlbflush.h>
33 34
34/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 35/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
35/* Data structures and variables */ 36/* Data structures and variables */
@@ -61,33 +62,22 @@ extern spinlock_t ipi_lock[];
61/* Function Prototypes */ 62/* Function Prototypes */
62/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 63/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
63 64
64void smp_send_reschedule(int);
65void smp_reschedule_interrupt(void); 65void smp_reschedule_interrupt(void);
66
67void smp_flush_cache_all(void);
68void smp_flush_cache_all_interrupt(void); 66void smp_flush_cache_all_interrupt(void);
69 67
70void smp_flush_tlb_all(void);
71static void flush_tlb_all_ipi(void *); 68static void flush_tlb_all_ipi(void *);
72
73void smp_flush_tlb_mm(struct mm_struct *);
74void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
75 unsigned long);
76void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
77static void flush_tlb_others(cpumask_t, struct mm_struct *, 69static void flush_tlb_others(cpumask_t, struct mm_struct *,
78 struct vm_area_struct *, unsigned long); 70 struct vm_area_struct *, unsigned long);
71
79void smp_invalidate_interrupt(void); 72void smp_invalidate_interrupt(void);
80 73
81void smp_send_stop(void);
82static void stop_this_cpu(void *); 74static void stop_this_cpu(void *);
83 75
84void smp_send_timer(void);
85void smp_ipi_timer_interrupt(struct pt_regs *); 76void smp_ipi_timer_interrupt(struct pt_regs *);
86void smp_local_timer_interrupt(void); 77void smp_local_timer_interrupt(void);
87 78
88static void send_IPI_allbutself(int, int); 79static void send_IPI_allbutself(int, int);
89static void send_IPI_mask(const struct cpumask *, int, int); 80static void send_IPI_mask(const struct cpumask *, int, int);
90unsigned long send_IPI_mask_phys(cpumask_t, int, int);
91 81
92/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 82/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
93/* Rescheduling request Routines */ 83/* Rescheduling request Routines */
@@ -162,10 +152,10 @@ void smp_flush_cache_all(void)
162 unsigned long *mask; 152 unsigned long *mask;
163 153
164 preempt_disable(); 154 preempt_disable();
165 cpumask = cpu_online_map; 155 cpumask_copy(&cpumask, cpu_online_mask);
166 cpu_clear(smp_processor_id(), cpumask); 156 cpumask_clear_cpu(smp_processor_id(), &cpumask);
167 spin_lock(&flushcache_lock); 157 spin_lock(&flushcache_lock);
168 mask=cpus_addr(cpumask); 158 mask=cpumask_bits(&cpumask);
169 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 159 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
170 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); 160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
171 _flush_cache_copyback_all(); 161 _flush_cache_copyback_all();
@@ -263,8 +253,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
263 preempt_disable(); 253 preempt_disable();
264 cpu_id = smp_processor_id(); 254 cpu_id = smp_processor_id();
265 mmc = &mm->context[cpu_id]; 255 mmc = &mm->context[cpu_id];
266 cpu_mask = *mm_cpumask(mm); 256 cpumask_copy(&cpu_mask, mm_cpumask(mm));
267 cpu_clear(cpu_id, cpu_mask); 257 cpumask_clear_cpu(cpu_id, &cpu_mask);
268 258
269 if (*mmc != NO_CONTEXT) { 259 if (*mmc != NO_CONTEXT) {
270 local_irq_save(flags); 260 local_irq_save(flags);
@@ -275,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
275 cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); 265 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
276 local_irq_restore(flags); 266 local_irq_restore(flags);
277 } 267 }
278 if (!cpus_empty(cpu_mask)) 268 if (!cpumask_empty(&cpu_mask))
279 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); 269 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
280 270
281 preempt_enable(); 271 preempt_enable();
@@ -333,8 +323,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
333 preempt_disable(); 323 preempt_disable();
334 cpu_id = smp_processor_id(); 324 cpu_id = smp_processor_id();
335 mmc = &mm->context[cpu_id]; 325 mmc = &mm->context[cpu_id];
336 cpu_mask = *mm_cpumask(mm); 326 cpumask_copy(&cpu_mask, mm_cpumask(mm));
337 cpu_clear(cpu_id, cpu_mask); 327 cpumask_clear_cpu(cpu_id, &cpu_mask);
338 328
339#ifdef DEBUG_SMP 329#ifdef DEBUG_SMP
340 if (!mm) 330 if (!mm)
@@ -348,7 +338,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
348 __flush_tlb_page(va); 338 __flush_tlb_page(va);
349 local_irq_restore(flags); 339 local_irq_restore(flags);
350 } 340 }
351 if (!cpus_empty(cpu_mask)) 341 if (!cpumask_empty(&cpu_mask))
352 flush_tlb_others(cpu_mask, mm, vma, va); 342 flush_tlb_others(cpu_mask, mm, vma, va);
353 343
354 preempt_enable(); 344 preempt_enable();
@@ -395,14 +385,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
395 * - current CPU must not be in mask 385 * - current CPU must not be in mask
396 * - mask must exist :) 386 * - mask must exist :)
397 */ 387 */
398 BUG_ON(cpus_empty(cpumask)); 388 BUG_ON(cpumask_empty(&cpumask));
399 389
400 BUG_ON(cpu_isset(smp_processor_id(), cpumask)); 390 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
401 BUG_ON(!mm); 391 BUG_ON(!mm);
402 392
403 /* If a CPU which we ran on has gone down, OK. */ 393 /* If a CPU which we ran on has gone down, OK. */
404 cpus_and(cpumask, cpumask, cpu_online_map); 394 cpumask_and(&cpumask, &cpumask, cpu_online_mask);
405 if (cpus_empty(cpumask)) 395 if (cpumask_empty(&cpumask))
406 return; 396 return;
407 397
408 /* 398 /*
@@ -416,7 +406,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
416 flush_mm = mm; 406 flush_mm = mm;
417 flush_vma = vma; 407 flush_vma = vma;
418 flush_va = va; 408 flush_va = va;
419 mask=cpus_addr(cpumask); 409 mask=cpumask_bits(&cpumask);
420 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); 410 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
421 411
422 /* 412 /*
@@ -425,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
425 */ 415 */
426 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); 416 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
427 417
428 while (!cpus_empty(flush_cpumask)) { 418 while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
429 /* nothing. lockup detection does not belong here */ 419 /* nothing. lockup detection does not belong here */
430 mb(); 420 mb();
431 } 421 }
@@ -460,7 +450,7 @@ void smp_invalidate_interrupt(void)
460 int cpu_id = smp_processor_id(); 450 int cpu_id = smp_processor_id();
461 unsigned long *mmc = &flush_mm->context[cpu_id]; 451 unsigned long *mmc = &flush_mm->context[cpu_id];
462 452
463 if (!cpu_isset(cpu_id, flush_cpumask)) 453 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
464 return; 454 return;
465 455
466 if (flush_va == FLUSH_ALL) { 456 if (flush_va == FLUSH_ALL) {
@@ -478,7 +468,7 @@ void smp_invalidate_interrupt(void)
478 __flush_tlb_page(va); 468 __flush_tlb_page(va);
479 } 469 }
480 } 470 }
481 cpu_clear(cpu_id, flush_cpumask); 471 cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
482} 472}
483 473
484/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 474/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -530,7 +520,7 @@ static void stop_this_cpu(void *dummy)
530 /* 520 /*
531 * Remove this CPU: 521 * Remove this CPU:
532 */ 522 */
533 cpu_clear(cpu_id, cpu_online_map); 523 set_cpu_online(cpu_id, false);
534 524
535 /* 525 /*
536 * PSW IE = 1; 526 * PSW IE = 1;
@@ -725,8 +715,8 @@ static void send_IPI_allbutself(int ipi_num, int try)
725{ 715{
726 cpumask_t cpumask; 716 cpumask_t cpumask;
727 717
728 cpumask = cpu_online_map; 718 cpumask_copy(&cpumask, cpu_online_mask);
729 cpu_clear(smp_processor_id(), cpumask); 719 cpumask_clear_cpu(smp_processor_id(), &cpumask);
730 720
731 send_IPI_mask(&cpumask, ipi_num, try); 721 send_IPI_mask(&cpumask, ipi_num, try);
732} 722}
@@ -763,13 +753,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
763 cpumask_and(&tmp, cpumask, cpu_online_mask); 753 cpumask_and(&tmp, cpumask, cpu_online_mask);
764 BUG_ON(!cpumask_equal(cpumask, &tmp)); 754 BUG_ON(!cpumask_equal(cpumask, &tmp));
765 755
766 physid_mask = CPU_MASK_NONE; 756 cpumask_clear(&physid_mask);
767 for_each_cpu(cpu_id, cpumask) { 757 for_each_cpu(cpu_id, cpumask) {
768 if ((phys_id = cpu_to_physid(cpu_id)) != -1) 758 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
769 cpu_set(phys_id, physid_mask); 759 cpumask_set_cpu(phys_id, &physid_mask);
770 } 760 }
771 761
772 send_IPI_mask_phys(physid_mask, ipi_num, try); 762 send_IPI_mask_phys(&physid_mask, ipi_num, try);
773} 763}
774 764
775/*==========================================================================* 765/*==========================================================================*
@@ -792,14 +782,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
792 * ---------- --- -------------------------------------------------------- 782 * ---------- --- --------------------------------------------------------
793 * 783 *
794 *==========================================================================*/ 784 *==========================================================================*/
795unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, 785unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
796 int try) 786 int try)
797{ 787{
798 spinlock_t *ipilock; 788 spinlock_t *ipilock;
799 volatile unsigned long *ipicr_addr; 789 volatile unsigned long *ipicr_addr;
800 unsigned long ipicr_val; 790 unsigned long ipicr_val;
801 unsigned long my_physid_mask; 791 unsigned long my_physid_mask;
802 unsigned long mask = cpus_addr(physid_mask)[0]; 792 unsigned long mask = cpumask_bits(physid_mask)[0];
803 793
804 794
805 if (mask & ~physids_coerce(phys_cpu_present_map)) 795 if (mask & ~physids_coerce(phys_cpu_present_map))
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index e034844cfc0d..cfdbe5d15002 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -135,9 +135,9 @@ void __devinit smp_prepare_boot_cpu(void)
135{ 135{
136 bsp_phys_id = hard_smp_processor_id(); 136 bsp_phys_id = hard_smp_processor_id();
137 physid_set(bsp_phys_id, phys_cpu_present_map); 137 physid_set(bsp_phys_id, phys_cpu_present_map);
138 cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */ 138 set_cpu_online(0, true); /* BSP's cpu_id == 0 */
139 cpu_set(0, cpu_callout_map); 139 cpumask_set_cpu(0, &cpu_callout_map);
140 cpu_set(0, cpu_callin_map); 140 cpumask_set_cpu(0, &cpu_callin_map);
141 141
142 /* 142 /*
143 * Initialize the logical to physical CPU number mapping 143 * Initialize the logical to physical CPU number mapping
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) 178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
179 physid_set(phys_id, phys_cpu_present_map); 179 physid_set(phys_id, phys_cpu_present_map);
180#ifndef CONFIG_HOTPLUG_CPU 180#ifndef CONFIG_HOTPLUG_CPU
181 init_cpu_present(&cpu_possible_map); 181 init_cpu_present(cpu_possible_mask);
182#endif 182#endif
183 183
184 show_mp_info(nr_cpu); 184 show_mp_info(nr_cpu);
@@ -294,10 +294,10 @@ static void __init do_boot_cpu(int phys_id)
294 send_status = 0; 294 send_status = 0;
295 boot_status = 0; 295 boot_status = 0;
296 296
297 cpu_set(phys_id, cpu_bootout_map); 297 cpumask_set_cpu(phys_id, &cpu_bootout_map);
298 298
299 /* Send Startup IPI */ 299 /* Send Startup IPI */
300 send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0); 300 send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
301 301
302 Dprintk("Waiting for send to finish...\n"); 302 Dprintk("Waiting for send to finish...\n");
303 timeout = 0; 303 timeout = 0;
@@ -306,7 +306,7 @@ static void __init do_boot_cpu(int phys_id)
306 do { 306 do {
307 Dprintk("+"); 307 Dprintk("+");
308 udelay(1000); 308 udelay(1000);
309 send_status = !cpu_isset(phys_id, cpu_bootin_map); 309 send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
310 } while (send_status && (timeout++ < 100)); 310 } while (send_status && (timeout++ < 100));
311 311
312 Dprintk("After Startup.\n"); 312 Dprintk("After Startup.\n");
@@ -316,19 +316,19 @@ static void __init do_boot_cpu(int phys_id)
316 * allow APs to start initializing. 316 * allow APs to start initializing.
317 */ 317 */
318 Dprintk("Before Callout %d.\n", cpu_id); 318 Dprintk("Before Callout %d.\n", cpu_id);
319 cpu_set(cpu_id, cpu_callout_map); 319 cpumask_set_cpu(cpu_id, &cpu_callout_map);
320 Dprintk("After Callout %d.\n", cpu_id); 320 Dprintk("After Callout %d.\n", cpu_id);
321 321
322 /* 322 /*
323 * Wait 5s total for a response 323 * Wait 5s total for a response
324 */ 324 */
325 for (timeout = 0; timeout < 5000; timeout++) { 325 for (timeout = 0; timeout < 5000; timeout++) {
326 if (cpu_isset(cpu_id, cpu_callin_map)) 326 if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
327 break; /* It has booted */ 327 break; /* It has booted */
328 udelay(1000); 328 udelay(1000);
329 } 329 }
330 330
331 if (cpu_isset(cpu_id, cpu_callin_map)) { 331 if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
332 /* number CPUs logically, starting from 1 (BSP is 0) */ 332 /* number CPUs logically, starting from 1 (BSP is 0) */
333 Dprintk("OK.\n"); 333 Dprintk("OK.\n");
334 } else { 334 } else {
@@ -340,9 +340,9 @@ static void __init do_boot_cpu(int phys_id)
340 340
341 if (send_status || boot_status) { 341 if (send_status || boot_status) {
342 unmap_cpu_to_physid(cpu_id, phys_id); 342 unmap_cpu_to_physid(cpu_id, phys_id);
343 cpu_clear(cpu_id, cpu_callout_map); 343 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
344 cpu_clear(cpu_id, cpu_callin_map); 344 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
345 cpu_clear(cpu_id, cpu_initialized); 345 cpumask_clear_cpu(cpu_id, &cpu_initialized);
346 cpucount--; 346 cpucount--;
347 } 347 }
348} 348}
@@ -351,17 +351,17 @@ int __cpuinit __cpu_up(unsigned int cpu_id)
351{ 351{
352 int timeout; 352 int timeout;
353 353
354 cpu_set(cpu_id, smp_commenced_mask); 354 cpumask_set_cpu(cpu_id, &smp_commenced_mask);
355 355
356 /* 356 /*
357 * Wait 5s total for a response 357 * Wait 5s total for a response
358 */ 358 */
359 for (timeout = 0; timeout < 5000; timeout++) { 359 for (timeout = 0; timeout < 5000; timeout++) {
360 if (cpu_isset(cpu_id, cpu_online_map)) 360 if (cpu_online(cpu_id))
361 break; 361 break;
362 udelay(1000); 362 udelay(1000);
363 } 363 }
364 if (!cpu_isset(cpu_id, cpu_online_map)) 364 if (!cpu_online(cpu_id))
365 BUG(); 365 BUG();
366 366
367 return 0; 367 return 0;
@@ -373,11 +373,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
373 unsigned long bogosum = 0; 373 unsigned long bogosum = 0;
374 374
375 for (timeout = 0; timeout < 5000; timeout++) { 375 for (timeout = 0; timeout < 5000; timeout++) {
376 if (cpus_equal(cpu_callin_map, cpu_online_map)) 376 if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
377 break; 377 break;
378 udelay(1000); 378 udelay(1000);
379 } 379 }
380 if (!cpus_equal(cpu_callin_map, cpu_online_map)) 380 if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
381 BUG(); 381 BUG();
382 382
383 for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) 383 for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
@@ -388,7 +388,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
388 */ 388 */
389 Dprintk("Before bogomips.\n"); 389 Dprintk("Before bogomips.\n");
390 if (cpucount) { 390 if (cpucount) {
391 for_each_cpu_mask(cpu_id, cpu_online_map) 391 for_each_cpu(cpu_id,cpu_online_mask)
392 bogosum += cpu_data[cpu_id].loops_per_jiffy; 392 bogosum += cpu_data[cpu_id].loops_per_jiffy;
393 393
394 printk(KERN_INFO "Total of %d processors activated " \ 394 printk(KERN_INFO "Total of %d processors activated " \
@@ -425,7 +425,7 @@ int __init start_secondary(void *unused)
425 cpu_init(); 425 cpu_init();
426 preempt_disable(); 426 preempt_disable();
427 smp_callin(); 427 smp_callin();
428 while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) 428 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
429 cpu_relax(); 429 cpu_relax();
430 430
431 smp_online(); 431 smp_online();
@@ -463,7 +463,7 @@ static void __init smp_callin(void)
463 int cpu_id = smp_processor_id(); 463 int cpu_id = smp_processor_id();
464 unsigned long timeout; 464 unsigned long timeout;
465 465
466 if (cpu_isset(cpu_id, cpu_callin_map)) { 466 if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
467 printk("huh, phys CPU#%d, CPU#%d already present??\n", 467 printk("huh, phys CPU#%d, CPU#%d already present??\n",
468 phys_id, cpu_id); 468 phys_id, cpu_id);
469 BUG(); 469 BUG();
@@ -474,7 +474,7 @@ static void __init smp_callin(void)
474 timeout = jiffies + (2 * HZ); 474 timeout = jiffies + (2 * HZ);
475 while (time_before(jiffies, timeout)) { 475 while (time_before(jiffies, timeout)) {
476 /* Has the boot CPU finished it's STARTUP sequence ? */ 476 /* Has the boot CPU finished it's STARTUP sequence ? */
477 if (cpu_isset(cpu_id, cpu_callout_map)) 477 if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
478 break; 478 break;
479 cpu_relax(); 479 cpu_relax();
480 } 480 }
@@ -486,7 +486,7 @@ static void __init smp_callin(void)
486 } 486 }
487 487
488 /* Allow the master to continue. */ 488 /* Allow the master to continue. */
489 cpu_set(cpu_id, cpu_callin_map); 489 cpumask_set_cpu(cpu_id, &cpu_callin_map);
490} 490}
491 491
492static void __init smp_online(void) 492static void __init smp_online(void)
@@ -503,7 +503,7 @@ static void __init smp_online(void)
503 /* Save our processor parameters */ 503 /* Save our processor parameters */
504 smp_store_cpu_info(cpu_id); 504 smp_store_cpu_info(cpu_id);
505 505
506 cpu_set(cpu_id, cpu_online_map); 506 set_cpu_online(cpu_id, true);
507} 507}
508 508
509/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ 509/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index 5d2858f6eede..2c468e8b5853 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -149,6 +149,7 @@ unsigned long __init zone_sizes_init(void)
149 zholes_size[ZONE_DMA] = mp->holes; 149 zholes_size[ZONE_DMA] = mp->holes;
150 holes += zholes_size[ZONE_DMA]; 150 holes += zholes_size[ZONE_DMA];
151 151
152 node_set_state(nid, N_NORMAL_MEMORY);
152 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 153 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
153 } 154 }
154 155
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 73e2205ebf5a..78b660e903da 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -35,8 +35,6 @@ extern char __init_begin, __init_end;
35 35
36pgd_t swapper_pg_dir[1024]; 36pgd_t swapper_pg_dir[1024];
37 37
38DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
39
40/* 38/*
41 * Cache of MMU context last used. 39 * Cache of MMU context last used.
42 */ 40 */
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu
index 273bccab9517..fc98f9b9d4d2 100644
--- a/arch/m68k/Kconfig.nommu
+++ b/arch/m68k/Kconfig.nommu
@@ -2,10 +2,6 @@ config FPU
2 bool 2 bool
3 default n 3 default n
4 4
5config GENERIC_FIND_NEXT_BIT
6 bool
7 default y
8
9config GENERIC_GPIO 5config GENERIC_GPIO
10 bool 6 bool
11 default n 7 default n
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
index e9020f88a748..89cf5b814a4d 100644
--- a/arch/m68k/include/asm/bitops_mm.h
+++ b/arch/m68k/include/asm/bitops_mm.h
@@ -200,6 +200,7 @@ out:
200 res += ((long)p - (long)vaddr - 4) * 8; 200 res += ((long)p - (long)vaddr - 4) * 8;
201 return res < size ? res : size; 201 return res < size ? res : size;
202} 202}
203#define find_first_zero_bit find_first_zero_bit
203 204
204static inline int find_next_zero_bit(const unsigned long *vaddr, int size, 205static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
205 int offset) 206 int offset)
@@ -229,6 +230,7 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
229 /* No zero yet, search remaining full bytes for a zero */ 230 /* No zero yet, search remaining full bytes for a zero */
230 return offset + find_first_zero_bit(p, size - offset); 231 return offset + find_first_zero_bit(p, size - offset);
231} 232}
233#define find_next_zero_bit find_next_zero_bit
232 234
233static inline int find_first_bit(const unsigned long *vaddr, unsigned size) 235static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
234{ 236{
@@ -253,6 +255,7 @@ out:
253 res += ((long)p - (long)vaddr - 4) * 8; 255 res += ((long)p - (long)vaddr - 4) * 8;
254 return res < size ? res : size; 256 return res < size ? res : size;
255} 257}
258#define find_first_bit find_first_bit
256 259
257static inline int find_next_bit(const unsigned long *vaddr, int size, 260static inline int find_next_bit(const unsigned long *vaddr, int size,
258 int offset) 261 int offset)
@@ -282,6 +285,7 @@ static inline int find_next_bit(const unsigned long *vaddr, int size,
282 /* No one yet, search remaining full bytes for a one */ 285 /* No one yet, search remaining full bytes for a one */
283 return offset + find_first_bit(p, size - offset); 286 return offset + find_first_bit(p, size - offset);
284} 287}
288#define find_next_bit find_next_bit
285 289
286/* 290/*
287 * ffz = Find First Zero in word. Undefined if no zero exists, 291 * ffz = Find First Zero in word. Undefined if no zero exists,
@@ -398,6 +402,7 @@ out:
398 res += (p - addr) * 32; 402 res += (p - addr) * 32;
399 return res < size ? res : size; 403 return res < size ? res : size;
400} 404}
405#define find_first_zero_bit_le find_first_zero_bit_le
401 406
402static inline unsigned long find_next_zero_bit_le(const void *addr, 407static inline unsigned long find_next_zero_bit_le(const void *addr,
403 unsigned long size, unsigned long offset) 408 unsigned long size, unsigned long offset)
@@ -427,6 +432,7 @@ static inline unsigned long find_next_zero_bit_le(const void *addr,
427 /* No zero yet, search remaining full bytes for a zero */ 432 /* No zero yet, search remaining full bytes for a zero */
428 return offset + find_first_zero_bit_le(p, size - offset); 433 return offset + find_first_zero_bit_le(p, size - offset);
429} 434}
435#define find_next_zero_bit_le find_next_zero_bit_le
430 436
431static inline int find_first_bit_le(const void *vaddr, unsigned size) 437static inline int find_first_bit_le(const void *vaddr, unsigned size)
432{ 438{
@@ -451,6 +457,7 @@ out:
451 res += (p - addr) * 32; 457 res += (p - addr) * 32;
452 return res < size ? res : size; 458 return res < size ? res : size;
453} 459}
460#define find_first_bit_le find_first_bit_le
454 461
455static inline unsigned long find_next_bit_le(const void *addr, 462static inline unsigned long find_next_bit_le(const void *addr,
456 unsigned long size, unsigned long offset) 463 unsigned long size, unsigned long offset)
@@ -480,6 +487,7 @@ static inline unsigned long find_next_bit_le(const void *addr,
480 /* No set bit yet, search remaining full bytes for a set bit */ 487 /* No set bit yet, search remaining full bytes for a set bit */
481 return offset + find_first_bit_le(p, size - offset); 488 return offset + find_first_bit_le(p, size - offset);
482} 489}
490#define find_next_bit_le find_next_bit_le
483 491
484/* Bitmap functions for the ext2 filesystem. */ 492/* Bitmap functions for the ext2 filesystem. */
485 493
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
index 6b0e2d349f0e..72e85acdd7bd 100644
--- a/arch/m68k/include/asm/bitops_no.h
+++ b/arch/m68k/include/asm/bitops_no.h
@@ -319,6 +319,10 @@ found_first:
319found_middle: 319found_middle:
320 return result + ffz(__swab32(tmp)); 320 return result + ffz(__swab32(tmp));
321} 321}
322#define find_next_zero_bit_le find_next_zero_bit_le
323
324extern unsigned long find_next_bit_le(const void *addr,
325 unsigned long size, unsigned long offset);
322 326
323#endif /* __KERNEL__ */ 327#endif /* __KERNEL__ */
324 328
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index 8bc842554e5b..9113c2f17607 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -32,8 +32,6 @@
32#include <asm/sections.h> 32#include <asm/sections.h>
33#include <asm/tlb.h> 33#include <asm/tlb.h>
34 34
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37pg_data_t pg_data_map[MAX_NUMNODES]; 35pg_data_t pg_data_map[MAX_NUMNODES];
38EXPORT_SYMBOL(pg_data_map); 36EXPORT_SYMBOL(pg_data_map);
39 37
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index eccdefe70d4e..e446bab2427b 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -33,12 +33,6 @@ config ARCH_HAS_ILOG2_U32
33config ARCH_HAS_ILOG2_U64 33config ARCH_HAS_ILOG2_U64
34 def_bool n 34 def_bool n
35 35
36config GENERIC_FIND_NEXT_BIT
37 def_bool y
38
39config GENERIC_FIND_BIT_LE
40 def_bool y
41
42config GENERIC_HWEIGHT 36config GENERIC_HWEIGHT
43 def_bool y 37 def_bool y
44 38
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index c8437866d3b7..213f2d671669 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -32,8 +32,6 @@ unsigned int __page_offset;
32EXPORT_SYMBOL(__page_offset); 32EXPORT_SYMBOL(__page_offset);
33 33
34#else 34#else
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37static int init_bootmem_done; 35static int init_bootmem_done;
38#endif /* CONFIG_MMU */ 36#endif /* CONFIG_MMU */
39 37
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cef1a854487d..653da62d0682 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -821,14 +821,6 @@ config ARCH_SUPPORTS_OPROFILE
821 bool 821 bool
822 default y if !MIPS_MT_SMTC 822 default y if !MIPS_MT_SMTC
823 823
824config GENERIC_FIND_NEXT_BIT
825 bool
826 default y
827
828config GENERIC_FIND_BIT_LE
829 bool
830 default y
831
832config GENERIC_HWEIGHT 824config GENERIC_HWEIGHT
833 bool 825 bool
834 default y 826 default y
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 5358f90b4dd2..83ed00a5644a 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -76,15 +76,6 @@ config DEBUG_STACKOVERFLOW
76 provides another way to check stack overflow happened on kernel mode 76 provides another way to check stack overflow happened on kernel mode
77 stack usually caused by nested interruption. 77 stack usually caused by nested interruption.
78 78
79config DEBUG_STACK_USAGE
80 bool "Enable stack utilization instrumentation"
81 depends on DEBUG_KERNEL
82 help
83 Enables the display of the minimum amount of free stack which each
84 task has ever had available in the sysrq-T and sysrq-P debug output.
85
86 This option will slow down process creation somewhat.
87
88config SMTC_IDLE_HOOK_DEBUG 79config SMTC_IDLE_HOOK_DEBUG
89 bool "Enable additional debug checks before going into CPU idle loop" 80 bool "Enable additional debug checks before going into CPU idle loop"
90 depends on DEBUG_KERNEL && MIPS_MT_SMTC 81 depends on DEBUG_KERNEL && MIPS_MT_SMTC
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 22fdf2f0cc23..ad15fb10322b 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -16,7 +16,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
16CONFIG_AUDIT=y 16CONFIG_AUDIT=y
17CONFIG_TINY_RCU=y 17CONFIG_TINY_RCU=y
18CONFIG_CGROUPS=y 18CONFIG_CGROUPS=y
19CONFIG_CGROUP_NS=y
20CONFIG_CGROUP_CPUACCT=y 19CONFIG_CGROUP_CPUACCT=y
21CONFIG_RELAY=y 20CONFIG_RELAY=y
22CONFIG_BLK_DEV_INITRD=y 21CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 279599e9a779..1aadeb42c5a5 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -64,8 +64,6 @@
64 64
65#endif /* CONFIG_MIPS_MT_SMTC */ 65#endif /* CONFIG_MIPS_MT_SMTC */
66 66
67DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
68
69/* 67/*
70 * We have up to 8 empty zeroed pages so we can map one of the right colour 68 * We have up to 8 empty zeroed pages so we can map one of the right colour
71 * when needed. This is necessary only on R4000 / R4400 SC and MC versions 69 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index feaf09cc8632..1f870340ebdd 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -44,9 +44,6 @@ config GENERIC_CALIBRATE_DELAY
44config GENERIC_CMOS_UPDATE 44config GENERIC_CMOS_UPDATE
45 def_bool n 45 def_bool n
46 46
47config GENERIC_FIND_NEXT_BIT
48 def_bool y
49
50config GENERIC_HWEIGHT 47config GENERIC_HWEIGHT
51 def_bool y 48 def_bool y
52 49
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
index 31d76261a3d5..fbb96ae3122a 100644
--- a/arch/mn10300/configs/asb2364_defconfig
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y
8CONFIG_TASK_IO_ACCOUNTING=y 8CONFIG_TASK_IO_ACCOUNTING=y
9CONFIG_LOG_BUF_SHIFT=14 9CONFIG_LOG_BUF_SHIFT=14
10CONFIG_CGROUPS=y 10CONFIG_CGROUPS=y
11CONFIG_CGROUP_NS=y
12CONFIG_CGROUP_FREEZER=y 11CONFIG_CGROUP_FREEZER=y
13CONFIG_CGROUP_DEVICE=y 12CONFIG_CGROUP_DEVICE=y
14CONFIG_CGROUP_CPUACCT=y 13CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 86af0d7d0771..2623d19f4f4c 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -87,7 +87,7 @@ static void mn10300_cpupic_mask_ack(struct irq_data *d)
87 tmp2 = GxICR(irq); 87 tmp2 = GxICR(irq);
88 88
89 irq_affinity_online[irq] = 89 irq_affinity_online[irq] =
90 any_online_cpu(*d->affinity); 90 cpumask_any_and(d->affinity, cpu_online_mask);
91 CROSS_GxICR(irq, irq_affinity_online[irq]) = 91 CROSS_GxICR(irq, irq_affinity_online[irq]) =
92 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; 92 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
93 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); 93 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
@@ -124,7 +124,8 @@ static void mn10300_cpupic_unmask_clear(struct irq_data *d)
124 } else { 124 } else {
125 tmp = GxICR(irq); 125 tmp = GxICR(irq);
126 126
127 irq_affinity_online[irq] = any_online_cpu(*d->affinity); 127 irq_affinity_online[irq] = cpumask_any_and(d->affinity,
128 cpu_online_mask);
128 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; 129 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
129 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); 130 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
130 } 131 }
@@ -366,11 +367,11 @@ void migrate_irqs(void)
366 if (irqd_is_per_cpu(data)) 367 if (irqd_is_per_cpu(data))
367 continue; 368 continue;
368 369
369 if (cpu_isset(self, data->affinity) && 370 if (cpumask_test_cpu(self, &data->affinity) &&
370 !cpus_intersects(irq_affinity[irq], cpu_online_map)) { 371 !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
371 int cpu_id; 372 int cpu_id;
372 cpu_id = first_cpu(cpu_online_map); 373 cpu_id = cpumask_first(cpu_online_mask);
373 cpu_set(cpu_id, data->affinity); 374 cpumask_set_cpu(cpu_id, &data->affinity);
374 } 375 }
375 /* We need to operate irq_affinity_online atomically. */ 376 /* We need to operate irq_affinity_online atomically. */
376 arch_local_cli_save(flags); 377 arch_local_cli_save(flags);
@@ -381,7 +382,8 @@ void migrate_irqs(void)
381 GxICR(irq) = x & GxICR_LEVEL; 382 GxICR(irq) = x & GxICR_LEVEL;
382 tmp = GxICR(irq); 383 tmp = GxICR(irq);
383 384
384 new = any_online_cpu(data->affinity); 385 new = cpumask_any_and(&data->affinity,
386 cpu_online_mask);
385 irq_affinity_online[irq] = new; 387 irq_affinity_online[irq] = new;
386 388
387 CROSS_GxICR(irq, new) = 389 CROSS_GxICR(irq, new) =
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 83fb27912231..9242e9fcc564 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -309,7 +309,7 @@ static void send_IPI_mask(const cpumask_t *cpumask, int irq)
309 u16 tmp; 309 u16 tmp;
310 310
311 for (i = 0; i < NR_CPUS; i++) { 311 for (i = 0; i < NR_CPUS; i++) {
312 if (cpu_isset(i, *cpumask)) { 312 if (cpumask_test_cpu(i, cpumask)) {
313 /* send IPI */ 313 /* send IPI */
314 tmp = CROSS_GxICR(irq, i); 314 tmp = CROSS_GxICR(irq, i);
315 CROSS_GxICR(irq, i) = 315 CROSS_GxICR(irq, i) =
@@ -342,8 +342,8 @@ void send_IPI_allbutself(int irq)
342{ 342{
343 cpumask_t cpumask; 343 cpumask_t cpumask;
344 344
345 cpumask = cpu_online_map; 345 cpumask_copy(&cpumask, cpu_online_mask);
346 cpu_clear(smp_processor_id(), cpumask); 346 cpumask_clear_cpu(smp_processor_id(), &cpumask);
347 send_IPI_mask(&cpumask, irq); 347 send_IPI_mask(&cpumask, irq);
348} 348}
349 349
@@ -393,8 +393,8 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
393 393
394 data.func = func; 394 data.func = func;
395 data.info = info; 395 data.info = info;
396 data.started = cpu_online_map; 396 cpumask_copy(&data.started, cpu_online_mask);
397 cpu_clear(smp_processor_id(), data.started); 397 cpumask_clear_cpu(smp_processor_id(), &data.started);
398 data.wait = wait; 398 data.wait = wait;
399 if (wait) 399 if (wait)
400 data.finished = data.started; 400 data.finished = data.started;
@@ -410,14 +410,14 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
410 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) { 410 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
411 for (cnt = 0; 411 for (cnt = 0;
412 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && 412 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
413 !cpus_empty(data.started); 413 !cpumask_empty(&data.started);
414 cnt++) 414 cnt++)
415 mdelay(1); 415 mdelay(1);
416 416
417 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) { 417 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
418 for (cnt = 0; 418 for (cnt = 0;
419 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && 419 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
420 !cpus_empty(data.finished); 420 !cpumask_empty(&data.finished);
421 cnt++) 421 cnt++)
422 mdelay(1); 422 mdelay(1);
423 } 423 }
@@ -428,10 +428,10 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
428 } else { 428 } else {
429 /* If timeout value is zero, wait until cpumask has been 429 /* If timeout value is zero, wait until cpumask has been
430 * cleared */ 430 * cleared */
431 while (!cpus_empty(data.started)) 431 while (!cpumask_empty(&data.started))
432 barrier(); 432 barrier();
433 if (wait) 433 if (wait)
434 while (!cpus_empty(data.finished)) 434 while (!cpumask_empty(&data.finished))
435 barrier(); 435 barrier();
436 } 436 }
437 437
@@ -472,12 +472,12 @@ void stop_this_cpu(void *unused)
472#endif /* CONFIG_GDBSTUB */ 472#endif /* CONFIG_GDBSTUB */
473 473
474 flags = arch_local_cli_save(); 474 flags = arch_local_cli_save();
475 cpu_clear(smp_processor_id(), cpu_online_map); 475 set_cpu_online(smp_processor_id(), false);
476 476
477 while (!stopflag) 477 while (!stopflag)
478 cpu_relax(); 478 cpu_relax();
479 479
480 cpu_set(smp_processor_id(), cpu_online_map); 480 set_cpu_online(smp_processor_id(), true);
481 arch_local_irq_restore(flags); 481 arch_local_irq_restore(flags);
482} 482}
483 483
@@ -529,12 +529,13 @@ void smp_nmi_call_function_interrupt(void)
529 * execute the function 529 * execute the function
530 */ 530 */
531 smp_mb(); 531 smp_mb();
532 cpu_clear(smp_processor_id(), nmi_call_data->started); 532 cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
533 (*func)(info); 533 (*func)(info);
534 534
535 if (wait) { 535 if (wait) {
536 smp_mb(); 536 smp_mb();
537 cpu_clear(smp_processor_id(), nmi_call_data->finished); 537 cpumask_clear_cpu(smp_processor_id(),
538 &nmi_call_data->finished);
538 } 539 }
539} 540}
540 541
@@ -657,7 +658,7 @@ int __init start_secondary(void *unused)
657{ 658{
658 smp_cpu_init(); 659 smp_cpu_init();
659 smp_callin(); 660 smp_callin();
660 while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) 661 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
661 cpu_relax(); 662 cpu_relax();
662 663
663 local_flush_tlb(); 664 local_flush_tlb();
@@ -780,13 +781,14 @@ static int __init do_boot_cpu(int phy_id)
780 781
781 if (send_status == 0) { 782 if (send_status == 0) {
782 /* Allow AP to start initializing */ 783 /* Allow AP to start initializing */
783 cpu_set(cpu_id, cpu_callout_map); 784 cpumask_set_cpu(cpu_id, &cpu_callout_map);
784 785
785 /* Wait for setting cpu_callin_map */ 786 /* Wait for setting cpu_callin_map */
786 timeout = 0; 787 timeout = 0;
787 do { 788 do {
788 udelay(1000); 789 udelay(1000);
789 callin_status = cpu_isset(cpu_id, cpu_callin_map); 790 callin_status = cpumask_test_cpu(cpu_id,
791 &cpu_callin_map);
790 } while (callin_status == 0 && timeout++ < 5000); 792 } while (callin_status == 0 && timeout++ < 5000);
791 793
792 if (callin_status == 0) 794 if (callin_status == 0)
@@ -796,9 +798,9 @@ static int __init do_boot_cpu(int phy_id)
796 } 798 }
797 799
798 if (send_status == GxICR_REQUEST || callin_status == 0) { 800 if (send_status == GxICR_REQUEST || callin_status == 0) {
799 cpu_clear(cpu_id, cpu_callout_map); 801 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
800 cpu_clear(cpu_id, cpu_callin_map); 802 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
801 cpu_clear(cpu_id, cpu_initialized); 803 cpumask_clear_cpu(cpu_id, &cpu_initialized);
802 cpucount--; 804 cpucount--;
803 return 1; 805 return 1;
804 } 806 }
@@ -833,7 +835,7 @@ static void __init smp_callin(void)
833 cpu = smp_processor_id(); 835 cpu = smp_processor_id();
834 timeout = jiffies + (2 * HZ); 836 timeout = jiffies + (2 * HZ);
835 837
836 if (cpu_isset(cpu, cpu_callin_map)) { 838 if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
837 printk(KERN_ERR "CPU#%d already present.\n", cpu); 839 printk(KERN_ERR "CPU#%d already present.\n", cpu);
838 BUG(); 840 BUG();
839 } 841 }
@@ -841,7 +843,7 @@ static void __init smp_callin(void)
841 843
842 /* Wait for AP startup 2s total */ 844 /* Wait for AP startup 2s total */
843 while (time_before(jiffies, timeout)) { 845 while (time_before(jiffies, timeout)) {
844 if (cpu_isset(cpu, cpu_callout_map)) 846 if (cpumask_test_cpu(cpu, &cpu_callout_map))
845 break; 847 break;
846 cpu_relax(); 848 cpu_relax();
847 } 849 }
@@ -861,11 +863,11 @@ static void __init smp_callin(void)
861 smp_store_cpu_info(cpu); 863 smp_store_cpu_info(cpu);
862 864
863 /* Allow the boot processor to continue */ 865 /* Allow the boot processor to continue */
864 cpu_set(cpu, cpu_callin_map); 866 cpumask_set_cpu(cpu, &cpu_callin_map);
865} 867}
866 868
867/** 869/**
868 * smp_online - Set cpu_online_map 870 * smp_online - Set cpu_online_mask
869 */ 871 */
870static void __init smp_online(void) 872static void __init smp_online(void)
871{ 873{
@@ -875,7 +877,7 @@ static void __init smp_online(void)
875 877
876 local_irq_enable(); 878 local_irq_enable();
877 879
878 cpu_set(cpu, cpu_online_map); 880 set_cpu_online(cpu, true);
879 smp_wmb(); 881 smp_wmb();
880} 882}
881 883
@@ -892,13 +894,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
892/* 894/*
893 * smp_prepare_boot_cpu - Set up stuff for the boot processor. 895 * smp_prepare_boot_cpu - Set up stuff for the boot processor.
894 * 896 *
895 * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot 897 * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot
896 * processor (CPU 0). 898 * processor (CPU 0).
897 */ 899 */
898void __devinit smp_prepare_boot_cpu(void) 900void __devinit smp_prepare_boot_cpu(void)
899{ 901{
900 cpu_set(0, cpu_callout_map); 902 cpumask_set_cpu(0, &cpu_callout_map);
901 cpu_set(0, cpu_callin_map); 903 cpumask_set_cpu(0, &cpu_callin_map);
902 current_thread_info()->cpu = 0; 904 current_thread_info()->cpu = 0;
903} 905}
904 906
@@ -931,16 +933,16 @@ int __devinit __cpu_up(unsigned int cpu)
931 run_wakeup_cpu(cpu); 933 run_wakeup_cpu(cpu);
932#endif /* CONFIG_HOTPLUG_CPU */ 934#endif /* CONFIG_HOTPLUG_CPU */
933 935
934 cpu_set(cpu, smp_commenced_mask); 936 cpumask_set_cpu(cpu, &smp_commenced_mask);
935 937
936 /* Wait 5s total for a response */ 938 /* Wait 5s total for a response */
937 for (timeout = 0 ; timeout < 5000 ; timeout++) { 939 for (timeout = 0 ; timeout < 5000 ; timeout++) {
938 if (cpu_isset(cpu, cpu_online_map)) 940 if (cpu_online(cpu))
939 break; 941 break;
940 udelay(1000); 942 udelay(1000);
941 } 943 }
942 944
943 BUG_ON(!cpu_isset(cpu, cpu_online_map)); 945 BUG_ON(!cpu_online(cpu));
944 return 0; 946 return 0;
945} 947}
946 948
@@ -986,7 +988,7 @@ int __cpu_disable(void)
986 return -EBUSY; 988 return -EBUSY;
987 989
988 migrate_irqs(); 990 migrate_irqs();
989 cpu_clear(cpu, current->active_mm->cpu_vm_mask); 991 cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
990 return 0; 992 return 0;
991} 993}
992 994
@@ -1091,13 +1093,13 @@ static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1091 do { 1093 do {
1092 mn10300_local_dcache_inv_range(start, end); 1094 mn10300_local_dcache_inv_range(start, end);
1093 barrier(); 1095 barrier();
1094 } while (!cpus_empty(nmi_call_func_mask_data.started)); 1096 } while (!cpumask_empty(&nmi_call_func_mask_data.started));
1095 1097
1096 if (wait) { 1098 if (wait) {
1097 do { 1099 do {
1098 mn10300_local_dcache_inv_range(start, end); 1100 mn10300_local_dcache_inv_range(start, end);
1099 barrier(); 1101 barrier();
1100 } while (!cpus_empty(nmi_call_func_mask_data.finished)); 1102 } while (!cpumask_empty(&nmi_call_func_mask_data.finished));
1101 } 1103 }
1102 1104
1103 spin_unlock(&smp_nmi_call_lock); 1105 spin_unlock(&smp_nmi_call_lock);
@@ -1108,9 +1110,9 @@ static void restart_wakeup_cpu(void)
1108{ 1110{
1109 unsigned int cpu = smp_processor_id(); 1111 unsigned int cpu = smp_processor_id();
1110 1112
1111 cpu_set(cpu, cpu_callin_map); 1113 cpumask_set_cpu(cpu, &cpu_callin_map);
1112 local_flush_tlb(); 1114 local_flush_tlb();
1113 cpu_set(cpu, cpu_online_map); 1115 set_cpu_online(cpu, true);
1114 smp_wmb(); 1116 smp_wmb();
1115} 1117}
1116 1118
@@ -1141,8 +1143,9 @@ static void sleep_cpu(void *unused)
1141static void run_sleep_cpu(unsigned int cpu) 1143static void run_sleep_cpu(unsigned int cpu)
1142{ 1144{
1143 unsigned long flags; 1145 unsigned long flags;
1144 cpumask_t cpumask = cpumask_of(cpu); 1146 cpumask_t cpumask;
1145 1147
1148 cpumask_copy(&cpumask, &cpumask_of(cpu));
1146 flags = arch_local_cli_save(); 1149 flags = arch_local_cli_save();
1147 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1); 1150 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1148 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0); 1151 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
index 4a6e9a4b5b27..2d23b9eeee62 100644
--- a/arch/mn10300/mm/cache-smp.c
+++ b/arch/mn10300/mm/cache-smp.c
@@ -74,7 +74,7 @@ void smp_cache_interrupt(void)
74 break; 74 break;
75 } 75 }
76 76
77 cpu_clear(smp_processor_id(), smp_cache_ipi_map); 77 cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map);
78} 78}
79 79
80/** 80/**
@@ -94,12 +94,12 @@ void smp_cache_call(unsigned long opr_mask,
94 smp_cache_mask = opr_mask; 94 smp_cache_mask = opr_mask;
95 smp_cache_start = start; 95 smp_cache_start = start;
96 smp_cache_end = end; 96 smp_cache_end = end;
97 smp_cache_ipi_map = cpu_online_map; 97 cpumask_copy(&smp_cache_ipi_map, cpu_online_mask);
98 cpu_clear(smp_processor_id(), smp_cache_ipi_map); 98 cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map);
99 99
100 send_IPI_allbutself(FLUSH_CACHE_IPI); 100 send_IPI_allbutself(FLUSH_CACHE_IPI);
101 101
102 while (!cpus_empty(smp_cache_ipi_map)) 102 while (!cpumask_empty(&smp_cache_ipi_map))
103 /* nothing. lockup detection does not belong here */ 103 /* nothing. lockup detection does not belong here */
104 mb(); 104 mb();
105} 105}
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 48907cc3bdb7..13801824e3ee 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -37,8 +37,6 @@
37#include <asm/tlb.h> 37#include <asm/tlb.h>
38#include <asm/sections.h> 38#include <asm/sections.h>
39 39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42unsigned long highstart_pfn, highend_pfn; 40unsigned long highstart_pfn, highend_pfn;
43 41
44#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT 42#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index 0b6a5ad1960e..9a777498a916 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -64,7 +64,7 @@ void smp_flush_tlb(void *unused)
64 64
65 cpu_id = get_cpu(); 65 cpu_id = get_cpu();
66 66
67 if (!cpu_isset(cpu_id, flush_cpumask)) 67 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
68 /* This was a BUG() but until someone can quote me the line 68 /* This was a BUG() but until someone can quote me the line
69 * from the intel manual that guarantees an IPI to multiple 69 * from the intel manual that guarantees an IPI to multiple
70 * CPUs is retried _only_ on the erroring CPUs its staying as a 70 * CPUs is retried _only_ on the erroring CPUs its staying as a
@@ -80,7 +80,7 @@ void smp_flush_tlb(void *unused)
80 local_flush_tlb_page(flush_mm, flush_va); 80 local_flush_tlb_page(flush_mm, flush_va);
81 81
82 smp_mb__before_clear_bit(); 82 smp_mb__before_clear_bit();
83 cpu_clear(cpu_id, flush_cpumask); 83 cpumask_clear_cpu(cpu_id, &flush_cpumask);
84 smp_mb__after_clear_bit(); 84 smp_mb__after_clear_bit();
85out: 85out:
86 put_cpu(); 86 put_cpu();
@@ -103,11 +103,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
103 * - we do not send IPIs to as-yet unbooted CPUs. 103 * - we do not send IPIs to as-yet unbooted CPUs.
104 */ 104 */
105 BUG_ON(!mm); 105 BUG_ON(!mm);
106 BUG_ON(cpus_empty(cpumask)); 106 BUG_ON(cpumask_empty(&cpumask));
107 BUG_ON(cpu_isset(smp_processor_id(), cpumask)); 107 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
108 108
109 cpus_and(tmp, cpumask, cpu_online_map); 109 cpumask_and(&tmp, &cpumask, cpu_online_mask);
110 BUG_ON(!cpus_equal(cpumask, tmp)); 110 BUG_ON(!cpumask_equal(&cpumask, &tmp));
111 111
112 /* I'm not happy about this global shared spinlock in the MM hot path, 112 /* I'm not happy about this global shared spinlock in the MM hot path,
113 * but we'll see how contended it is. 113 * but we'll see how contended it is.
@@ -128,7 +128,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */ 128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
129 smp_call_function(smp_flush_tlb, NULL, 1); 129 smp_call_function(smp_flush_tlb, NULL, 1);
130 130
131 while (!cpus_empty(flush_cpumask)) 131 while (!cpumask_empty(&flush_cpumask))
132 /* Lockup detection does not belong here */ 132 /* Lockup detection does not belong here */
133 smp_mb(); 133 smp_mb();
134 134
@@ -146,11 +146,11 @@ void flush_tlb_mm(struct mm_struct *mm)
146 cpumask_t cpu_mask; 146 cpumask_t cpu_mask;
147 147
148 preempt_disable(); 148 preempt_disable();
149 cpu_mask = mm->cpu_vm_mask; 149 cpumask_copy(&cpu_mask, mm_cpumask(mm));
150 cpu_clear(smp_processor_id(), cpu_mask); 150 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
151 151
152 local_flush_tlb(); 152 local_flush_tlb();
153 if (!cpus_empty(cpu_mask)) 153 if (!cpumask_empty(&cpu_mask))
154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
155 155
156 preempt_enable(); 156 preempt_enable();
@@ -165,11 +165,11 @@ void flush_tlb_current_task(void)
165 cpumask_t cpu_mask; 165 cpumask_t cpu_mask;
166 166
167 preempt_disable(); 167 preempt_disable();
168 cpu_mask = mm->cpu_vm_mask; 168 cpumask_copy(&cpu_mask, mm_cpumask(mm));
169 cpu_clear(smp_processor_id(), cpu_mask); 169 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
170 170
171 local_flush_tlb(); 171 local_flush_tlb();
172 if (!cpus_empty(cpu_mask)) 172 if (!cpumask_empty(&cpu_mask))
173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
174 174
175 preempt_enable(); 175 preempt_enable();
@@ -186,11 +186,11 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
186 cpumask_t cpu_mask; 186 cpumask_t cpu_mask;
187 187
188 preempt_disable(); 188 preempt_disable();
189 cpu_mask = mm->cpu_vm_mask; 189 cpumask_copy(&cpu_mask, mm_cpumask(mm));
190 cpu_clear(smp_processor_id(), cpu_mask); 190 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
191 191
192 local_flush_tlb_page(mm, va); 192 local_flush_tlb_page(mm, va);
193 if (!cpus_empty(cpu_mask)) 193 if (!cpumask_empty(&cpu_mask))
194 flush_tlb_others(cpu_mask, mm, va); 194 flush_tlb_others(cpu_mask, mm, va);
195 195
196 preempt_enable(); 196 preempt_enable();
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 69ff049c8571..65adc86a230e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -47,14 +47,6 @@ config ARCH_HAS_ILOG2_U64
47 bool 47 bool
48 default n 48 default n
49 49
50config GENERIC_FIND_NEXT_BIT
51 bool
52 default y
53
54config GENERIC_FIND_BIT_LE
55 bool
56 default y
57
58config GENERIC_BUG 50config GENERIC_BUG
59 bool 51 bool
60 default y 52 default y
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 2e73623feb6b..e8f8037d872b 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -33,15 +33,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
33 33
34#endif /* !ASSEMBLY */ 34#endif /* !ASSEMBLY */
35 35
36/*
37 * This magic constant controls our willingness to transfer
38 * a process across CPUs. Such a transfer incurs cache and tlb
39 * misses. The current value is inherited from i386. Still needs
40 * to be tuned for parisc.
41 */
42
43#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
44
45#define raw_smp_processor_id() (current_thread_info()->cpu) 36#define raw_smp_processor_id() (current_thread_info()->cpu)
46 37
47#else /* CONFIG_SMP */ 38#else /* CONFIG_SMP */
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 5fa1e273006e..82f364e209fc 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -31,8 +31,6 @@
31#include <asm/mmzone.h> 31#include <asm/mmzone.h>
32#include <asm/sections.h> 32#include <asm/sections.h>
33 33
34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36extern int data_start; 34extern int data_start;
37 35
38#ifdef CONFIG_DISCONTIGMEM 36#ifdef CONFIG_DISCONTIGMEM
@@ -686,7 +684,7 @@ void show_mem(unsigned int filter)
686 int shared = 0, cached = 0; 684 int shared = 0, cached = 0;
687 685
688 printk(KERN_INFO "Mem-info:\n"); 686 printk(KERN_INFO "Mem-info:\n");
689 show_free_areas(); 687 show_free_areas(filter);
690#ifndef CONFIG_DISCONTIGMEM 688#ifndef CONFIG_DISCONTIGMEM
691 i = max_mapnr; 689 i = max_mapnr;
692 while (i-- > 0) { 690 while (i-- > 0) {
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a3128ca0fe11..2729c6663d8a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -91,14 +91,6 @@ config GENERIC_HWEIGHT
91 bool 91 bool
92 default y 92 default y
93 93
94config GENERIC_FIND_NEXT_BIT
95 bool
96 default y
97
98config GENERIC_FIND_BIT_LE
99 bool
100 default y
101
102config GENERIC_GPIO 94config GENERIC_GPIO
103 bool 95 bool
104 help 96 help
@@ -140,6 +132,8 @@ config PPC
140 select IRQ_PER_CPU 132 select IRQ_PER_CPU
141 select GENERIC_IRQ_SHOW 133 select GENERIC_IRQ_SHOW
142 select GENERIC_IRQ_SHOW_LEVEL 134 select GENERIC_IRQ_SHOW_LEVEL
135 select HAVE_RCU_TABLE_FREE if SMP
136 select HAVE_SYSCALL_TRACEPOINTS
143 137
144config EARLY_PRINTK 138config EARLY_PRINTK
145 bool 139 bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index a597dd77b903..e72dcf6a421d 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -35,27 +35,6 @@ config DEBUG_STACKOVERFLOW
35 This option will cause messages to be printed if free stack space 35 This option will cause messages to be printed if free stack space
36 drops below a certain limit. 36 drops below a certain limit.
37 37
38config DEBUG_STACK_USAGE
39 bool "Stack utilization instrumentation"
40 depends on DEBUG_KERNEL
41 help
42 Enables the display of the minimum amount of free stack which each
43 task has ever had available in the sysrq-T and sysrq-P debug output.
44
45 This option will slow down process creation somewhat.
46
47config DEBUG_PER_CPU_MAPS
48 bool "Debug access to per_cpu maps"
49 depends on DEBUG_KERNEL
50 depends on SMP
51 default n
52 ---help---
53 Say Y to verify that the per_cpu map being accessed has
54 been setup. Adds a fair amount of code to kernel memory
55 and decreases performance.
56
57 Say N if unsure.
58
59config HCALL_STATS 38config HCALL_STATS
60 bool "Hypervisor call instrumentation" 39 bool "Hypervisor call instrumentation"
61 depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS 40 depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 2779f08313a5..22dd6ae84da0 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -530,5 +530,23 @@
530 0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */ 530 0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */
531 0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>; 531 0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>;
532 }; 532 };
533
534 MSI: ppc4xx-msi@C10000000 {
535 compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
536 reg = < 0xC 0x10000000 0x100>;
537 sdr-base = <0x36C>;
538 msi-data = <0x00000000>;
539 msi-mask = <0x44440000>;
540 interrupt-count = <3>;
541 interrupts = <0 1 2 3>;
542 interrupt-parent = <&UIC3>;
543 #interrupt-cells = <1>;
544 #address-cells = <0>;
545 #size-cells = <0>;
546 interrupt-map = <0 &UIC3 0x18 1
547 1 &UIC3 0x19 1
548 2 &UIC3 0x1A 1
549 3 &UIC3 0x1B 1>;
550 };
533 }; 551 };
534}; 552};
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
index 7c3be5e45748..f913dbe25d35 100644
--- a/arch/powerpc/boot/dts/katmai.dts
+++ b/arch/powerpc/boot/dts/katmai.dts
@@ -442,6 +442,24 @@
442 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; 442 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
443 }; 443 };
444 444
445 MSI: ppc4xx-msi@400300000 {
446 compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
447 reg = < 0x4 0x00300000 0x100>;
448 sdr-base = <0x3B0>;
449 msi-data = <0x00000000>;
450 msi-mask = <0x44440000>;
451 interrupt-count = <3>;
452 interrupts =<0 1 2 3>;
453 interrupt-parent = <&UIC0>;
454 #interrupt-cells = <1>;
455 #address-cells = <0>;
456 #size-cells = <0>;
457 interrupt-map = <0 &UIC0 0xC 1
458 1 &UIC0 0x0D 1
459 2 &UIC0 0x0E 1
460 3 &UIC0 0x0F 1>;
461 };
462
445 I2O: i2o@400100000 { 463 I2O: i2o@400100000 {
446 compatible = "ibm,i2o-440spe"; 464 compatible = "ibm,i2o-440spe";
447 reg = <0x00000004 0x00100000 0x100>; 465 reg = <0x00000004 0x00100000 0x100>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 89edb16649c3..1613d6e4049e 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -403,5 +403,33 @@
403 0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */ 403 0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */
404 0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>; 404 0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>;
405 }; 405 };
406
407 MSI: ppc4xx-msi@C10000000 {
408 compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
409 reg = < 0x0 0xEF620000 0x100>;
410 sdr-base = <0x4B0>;
411 msi-data = <0x00000000>;
412 msi-mask = <0x44440000>;
413 interrupt-count = <12>;
414 interrupts = <0 1 2 3 4 5 6 7 8 9 0xA 0xB 0xC 0xD>;
415 interrupt-parent = <&UIC2>;
416 #interrupt-cells = <1>;
417 #address-cells = <0>;
418 #size-cells = <0>;
419 interrupt-map = <0 &UIC2 0x10 1
420 1 &UIC2 0x11 1
421 2 &UIC2 0x12 1
422 2 &UIC2 0x13 1
423 2 &UIC2 0x14 1
424 2 &UIC2 0x15 1
425 2 &UIC2 0x16 1
426 2 &UIC2 0x17 1
427 2 &UIC2 0x18 1
428 2 &UIC2 0x19 1
429 2 &UIC2 0x1A 1
430 2 &UIC2 0x1B 1
431 2 &UIC2 0x1C 1
432 3 &UIC2 0x1D 1>;
433 };
406 }; 434 };
407}; 435};
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index 761faa7b6964..ac1eb320c7b4 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -176,6 +176,19 @@
176 sleep = <&pmc 0x00300000>; 176 sleep = <&pmc 0x00300000>;
177 }; 177 };
178 178
179 ptp_clock@24E00 {
180 compatible = "fsl,etsec-ptp";
181 reg = <0x24E00 0xB0>;
182 interrupts = <12 0x8 13 0x8>;
183 interrupt-parent = < &ipic >;
184 fsl,tclk-period = <10>;
185 fsl,tmr-prsc = <100>;
186 fsl,tmr-add = <0x999999A4>;
187 fsl,tmr-fiper1 = <0x3B9AC9F6>;
188 fsl,tmr-fiper2 = <0x00018696>;
189 fsl,max-adj = <659999998>;
190 };
191
179 enet0: ethernet@24000 { 192 enet0: ethernet@24000 {
180 #address-cells = <1>; 193 #address-cells = <1>;
181 #size-cells = <1>; 194 #size-cells = <1>;
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index cafc1285c140..f6c04d25e916 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -324,6 +324,19 @@
324 }; 324 };
325 }; 325 };
326 326
327 ptp_clock@24E00 {
328 compatible = "fsl,etsec-ptp";
329 reg = <0x24E00 0xB0>;
330 interrupts = <68 2 69 2 70 2 71 2>;
331 interrupt-parent = < &mpic >;
332 fsl,tclk-period = <5>;
333 fsl,tmr-prsc = <200>;
334 fsl,tmr-add = <0xAAAAAAAB>;
335 fsl,tmr-fiper1 = <0x3B9AC9FB>;
336 fsl,tmr-fiper2 = <0x3B9AC9FB>;
337 fsl,max-adj = <499999999>;
338 };
339
327 enet0: ethernet@24000 { 340 enet0: ethernet@24000 {
328 #address-cells = <1>; 341 #address-cells = <1>;
329 #size-cells = <1>; 342 #size-cells = <1>;
diff --git a/arch/powerpc/boot/dts/p2020ds.dts b/arch/powerpc/boot/dts/p2020ds.dts
index 2bcf3683d223..dae403100f2f 100644
--- a/arch/powerpc/boot/dts/p2020ds.dts
+++ b/arch/powerpc/boot/dts/p2020ds.dts
@@ -178,6 +178,19 @@
178 178
179 }; 179 };
180 180
181 ptp_clock@24E00 {
182 compatible = "fsl,etsec-ptp";
183 reg = <0x24E00 0xB0>;
184 interrupts = <68 2 69 2 70 2>;
185 interrupt-parent = < &mpic >;
186 fsl,tclk-period = <5>;
187 fsl,tmr-prsc = <200>;
188 fsl,tmr-add = <0xCCCCCCCD>;
189 fsl,tmr-fiper1 = <0x3B9AC9FB>;
190 fsl,tmr-fiper2 = <0x0001869B>;
191 fsl,max-adj = <249999999>;
192 };
193
181 enet0: ethernet@24000 { 194 enet0: ethernet@24000 {
182 tbi-handle = <&tbi0>; 195 tbi-handle = <&tbi0>;
183 phy-handle = <&phy0>; 196 phy-handle = <&phy0>;
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index 3782a58f13be..1d7a05f3021e 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -224,6 +224,19 @@
224 status = "disabled"; 224 status = "disabled";
225 }; 225 };
226 226
227 ptp_clock@24E00 {
228 compatible = "fsl,etsec-ptp";
229 reg = <0x24E00 0xB0>;
230 interrupts = <68 2 69 2 70 2>;
231 interrupt-parent = < &mpic >;
232 fsl,tclk-period = <5>;
233 fsl,tmr-prsc = <200>;
234 fsl,tmr-add = <0xCCCCCCCD>;
235 fsl,tmr-fiper1 = <0x3B9AC9FB>;
236 fsl,tmr-fiper2 = <0x0001869B>;
237 fsl,max-adj = <249999999>;
238 };
239
227 enet0: ethernet@24000 { 240 enet0: ethernet@24000 {
228 fixed-link = <1 1 1000 0 0>; 241 fixed-link = <1 1 1000 0 0>;
229 phy-connection-type = "rgmii-id"; 242 phy-connection-type = "rgmii-id";
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts
index 81636c01d906..d86a3a498118 100644
--- a/arch/powerpc/boot/dts/redwood.dts
+++ b/arch/powerpc/boot/dts/redwood.dts
@@ -358,8 +358,28 @@
358 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; 358 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
359 }; 359 };
360 360
361 MSI: ppc4xx-msi@400300000 {
362 compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
363 reg = < 0x4 0x00300000 0x100
364 0x4 0x00300000 0x100>;
365 sdr-base = <0x3B0>;
366 msi-data = <0x00000000>;
367 msi-mask = <0x44440000>;
368 interrupt-count = <3>;
369 interrupts =<0 1 2 3>;
370 interrupt-parent = <&UIC0>;
371 #interrupt-cells = <1>;
372 #address-cells = <0>;
373 #size-cells = <0>;
374 interrupt-map = <0 &UIC0 0xC 1
375 1 &UIC0 0x0D 1
376 2 &UIC0 0x0E 1
377 3 &UIC0 0x0F 1>;
378 };
379
361 }; 380 };
362 381
382
363 chosen { 383 chosen {
364 linux,stdout-path = "/plb/opb/serial@ef600200"; 384 linux,stdout-path = "/plb/opb/serial@ef600200";
365 }; 385 };
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 214208924a9c..04360f9b0109 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -10,7 +10,6 @@ CONFIG_TASK_XACCT=y
10CONFIG_TASK_IO_ACCOUNTING=y 10CONFIG_TASK_IO_ACCOUNTING=y
11CONFIG_AUDIT=y 11CONFIG_AUDIT=y
12CONFIG_CGROUPS=y 12CONFIG_CGROUPS=y
13CONFIG_CGROUP_NS=y
14CONFIG_CGROUP_DEVICE=y 13CONFIG_CGROUP_DEVICE=y
15CONFIG_CGROUP_CPUACCT=y 14CONFIG_CGROUP_CPUACCT=y
16CONFIG_RESOURCE_COUNTERS=y 15CONFIG_RESOURCE_COUNTERS=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 7de13865508c..c9f212b5f3de 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -15,7 +15,6 @@ CONFIG_AUDITSYSCALL=y
15CONFIG_IKCONFIG=y 15CONFIG_IKCONFIG=y
16CONFIG_IKCONFIG_PROC=y 16CONFIG_IKCONFIG_PROC=y
17CONFIG_CGROUPS=y 17CONFIG_CGROUPS=y
18CONFIG_CGROUP_NS=y
19CONFIG_CGROUP_FREEZER=y 18CONFIG_CGROUP_FREEZER=y
20CONFIG_CGROUP_DEVICE=y 19CONFIG_CGROUP_DEVICE=y
21CONFIG_CPUSETS=y 20CONFIG_CPUSETS=y
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h
index 5c1bf3466749..8a0b5ece8f76 100644
--- a/arch/powerpc/include/asm/fsl_lbc.h
+++ b/arch/powerpc/include/asm/fsl_lbc.h
@@ -157,6 +157,8 @@ struct fsl_lbc_regs {
157#define LBCR_EPAR_SHIFT 16 157#define LBCR_EPAR_SHIFT 16
158#define LBCR_BMT 0x0000FF00 158#define LBCR_BMT 0x0000FF00
159#define LBCR_BMT_SHIFT 8 159#define LBCR_BMT_SHIFT 8
160#define LBCR_BMTPS 0x0000000F
161#define LBCR_BMTPS_SHIFT 0
160#define LBCR_INIT 0x00040000 162#define LBCR_INIT 0x00040000
161 __be32 lcrr; /**< Clock Ratio Register */ 163 __be32 lcrr; /**< Clock Ratio Register */
162#define LCRR_DBYP 0x80000000 164#define LCRR_DBYP 0x80000000
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index dde1296b8b41..169d039ed402 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -60,4 +60,18 @@ struct dyn_arch_ftrace {
60 60
61#endif 61#endif
62 62
63#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
64#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
65static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
66{
67 /*
68 * Compare the symbol name with the system call name. Skip the .sys or .SyS
69 * prefix from the symbol name and the sys prefix from the system call name and
70 * just match the rest. This is only needed on ppc64 since symbol names on
71 * 32bit do not start with a period so the generic function will work.
72 */
73 return !strcmp(sym + 4, name + 3);
74}
75#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
76
63#endif /* _ASM_POWERPC_FTRACE */ 77#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 852b8c1c09db..fd8201dddd4b 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -236,7 +236,7 @@
236#define H_HOME_NODE_ASSOCIATIVITY 0x2EC 236#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
237#define H_BEST_ENERGY 0x2F4 237#define H_BEST_ENERGY 0x2F4
238#define H_GET_MPP_X 0x314 238#define H_GET_MPP_X 0x314
239#define MAX_HCALL_OPCODE H_BEST_ENERGY 239#define MAX_HCALL_OPCODE H_GET_MPP_X
240 240
241#ifndef __ASSEMBLY__ 241#ifndef __ASSEMBLY__
242 242
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index abe8532bd14e..bf301ac62f35 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -31,14 +31,29 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
31#endif 31#endif
32 32
33#ifdef CONFIG_SMP 33#ifdef CONFIG_SMP
34extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift); 34struct mmu_gather;
35extern void pte_free_finish(void); 35extern void tlb_remove_table(struct mmu_gather *, void *);
36
37static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
38{
39 unsigned long pgf = (unsigned long)table;
40 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
41 pgf |= shift;
42 tlb_remove_table(tlb, (void *)pgf);
43}
44
45static inline void __tlb_remove_table(void *_table)
46{
47 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
48 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
49
50 pgtable_free(table, shift);
51}
36#else /* CONFIG_SMP */ 52#else /* CONFIG_SMP */
37static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) 53static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
38{ 54{
39 pgtable_free(table, shift); 55 pgtable_free(table, shift);
40} 56}
41static inline void pte_free_finish(void) { }
42#endif /* !CONFIG_SMP */ 57#endif /* !CONFIG_SMP */
43 58
44static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, 59static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
index 0018bf80cb25..d902abd33995 100644
--- a/arch/powerpc/include/asm/rio.h
+++ b/arch/powerpc/include/asm/rio.h
@@ -14,5 +14,10 @@
14#define ASM_PPC_RIO_H 14#define ASM_PPC_RIO_H
15 15
16extern void platform_rio_init(void); 16extern void platform_rio_init(void);
17#ifdef CONFIG_RAPIDIO
18extern int fsl_rio_mcheck_exception(struct pt_regs *);
19#else
20static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
21#endif
17 22
18#endif /* ASM_PPC_RIO_H */ 23#endif /* ASM_PPC_RIO_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 880b8c1e6e53..11eb404b5606 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -191,8 +191,6 @@ extern unsigned long __secondary_hold_spinloop;
191extern unsigned long __secondary_hold_acknowledge; 191extern unsigned long __secondary_hold_acknowledge;
192extern char __secondary_hold; 192extern char __secondary_hold;
193 193
194extern irqreturn_t debug_ipi_action(int irq, void *data);
195
196#endif /* __ASSEMBLY__ */ 194#endif /* __ASSEMBLY__ */
197 195
198#endif /* __KERNEL__ */ 196#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 23913e902fc3..b54b2add07be 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -15,6 +15,11 @@
15 15
16#include <linux/sched.h> 16#include <linux/sched.h>
17 17
18/* ftrace syscalls requires exporting the sys_call_table */
19#ifdef CONFIG_FTRACE_SYSCALLS
20extern const unsigned long *sys_call_table;
21#endif /* CONFIG_FTRACE_SYSCALLS */
22
18static inline long syscall_get_nr(struct task_struct *task, 23static inline long syscall_get_nr(struct task_struct *task,
19 struct pt_regs *regs) 24 struct pt_regs *regs)
20{ 25{
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index d8529ef13b23..836f231ec1f0 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -110,7 +110,8 @@ static inline struct thread_info *current_thread_info(void)
110#define TIF_NOERROR 12 /* Force successful syscall return */ 110#define TIF_NOERROR 12 /* Force successful syscall return */
111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ 111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
112#define TIF_FREEZE 14 /* Freezing for suspend */ 112#define TIF_FREEZE 14 /* Freezing for suspend */
113#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ 113#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
114#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
114 115
115/* as above, but as bit values */ 116/* as above, but as bit values */
116#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 117#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -127,8 +128,10 @@ static inline struct thread_info *current_thread_info(void)
127#define _TIF_NOERROR (1<<TIF_NOERROR) 128#define _TIF_NOERROR (1<<TIF_NOERROR)
128#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 129#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
129#define _TIF_FREEZE (1<<TIF_FREEZE) 130#define _TIF_FREEZE (1<<TIF_FREEZE)
131#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
130#define _TIF_RUNLATCH (1<<TIF_RUNLATCH) 132#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
131#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) 133#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
134 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
132 135
133#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 136#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
134 _TIF_NOTIFY_RESUME) 137 _TIF_NOTIFY_RESUME)
@@ -139,10 +142,12 @@ static inline struct thread_info *current_thread_info(void)
139#define TLF_NAPPING 0 /* idle thread enabled NAP mode */ 142#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
140#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ 143#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
141#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ 144#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
145#define TLF_LAZY_MMU 3 /* tlb_batch is active */
142 146
143#define _TLF_NAPPING (1 << TLF_NAPPING) 147#define _TLF_NAPPING (1 << TLF_NAPPING)
144#define _TLF_SLEEPING (1 << TLF_SLEEPING) 148#define _TLF_SLEEPING (1 << TLF_SLEEPING)
145#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) 149#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
150#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
146 151
147#ifndef __ASSEMBLY__ 152#ifndef __ASSEMBLY__
148#define HAVE_SET_RESTORE_SIGMASK 1 153#define HAVE_SET_RESTORE_SIGMASK 1
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 9aab36312572..e8b981897d44 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -109,6 +109,7 @@ obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
109 109
110obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 110obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
111obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 111obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
112obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
112obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o 113obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
113 114
114obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o 115obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index ce1f3e44c24f..bf99cfa6bbfe 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -22,6 +22,7 @@
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/code-patching.h> 23#include <asm/code-patching.h>
24#include <asm/ftrace.h> 24#include <asm/ftrace.h>
25#include <asm/syscall.h>
25 26
26 27
27#ifdef CONFIG_DYNAMIC_FTRACE 28#ifdef CONFIG_DYNAMIC_FTRACE
@@ -600,3 +601,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
600 } 601 }
601} 602}
602#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 603#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
604
605#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
606unsigned long __init arch_syscall_addr(int nr)
607{
608 return sys_call_table[nr*2];
609}
610#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index a24d37d4cf51..5b428e308666 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -295,17 +295,20 @@ static inline void handle_one_irq(unsigned int irq)
295 unsigned long saved_sp_limit; 295 unsigned long saved_sp_limit;
296 struct irq_desc *desc; 296 struct irq_desc *desc;
297 297
298 desc = irq_to_desc(irq);
299 if (!desc)
300 return;
301
298 /* Switch to the irq stack to handle this */ 302 /* Switch to the irq stack to handle this */
299 curtp = current_thread_info(); 303 curtp = current_thread_info();
300 irqtp = hardirq_ctx[smp_processor_id()]; 304 irqtp = hardirq_ctx[smp_processor_id()];
301 305
302 if (curtp == irqtp) { 306 if (curtp == irqtp) {
303 /* We're already on the irq stack, just handle it */ 307 /* We're already on the irq stack, just handle it */
304 generic_handle_irq(irq); 308 desc->handle_irq(irq, desc);
305 return; 309 return;
306 } 310 }
307 311
308 desc = irq_to_desc(irq);
309 saved_sp_limit = current->thread.ksp_limit; 312 saved_sp_limit = current->thread.ksp_limit;
310 313
311 irqtp->task = curtp->task; 314 irqtp->task = curtp->task;
@@ -557,15 +560,8 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
557 if (revmap_type == IRQ_HOST_MAP_LEGACY) { 560 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
558 if (irq_map[0].host != NULL) { 561 if (irq_map[0].host != NULL) {
559 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 562 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
560 /* If we are early boot, we can't free the structure, 563 of_node_put(host->of_node);
561 * too bad... 564 kfree(host);
562 * this will be fixed once slab is made available early
563 * instead of the current cruft
564 */
565 if (mem_init_done) {
566 of_node_put(host->of_node);
567 kfree(host);
568 }
569 return NULL; 565 return NULL;
570 } 566 }
571 irq_map[0].host = host; 567 irq_map[0].host = host;
@@ -727,9 +723,7 @@ unsigned int irq_create_mapping(struct irq_host *host,
727 } 723 }
728 pr_debug("irq: -> using host @%p\n", host); 724 pr_debug("irq: -> using host @%p\n", host);
729 725
730 /* Check if mapping already exist, if it does, call 726 /* Check if mapping already exists */
731 * host->ops->map() to update the flags
732 */
733 virq = irq_find_mapping(host, hwirq); 727 virq = irq_find_mapping(host, hwirq);
734 if (virq != NO_IRQ) { 728 if (virq != NO_IRQ) {
735 pr_debug("irq: -> existing mapping on virq %d\n", virq); 729 pr_debug("irq: -> existing mapping on virq %d\n", virq);
@@ -899,10 +893,13 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
899 return irq_find_mapping(host, hwirq); 893 return irq_find_mapping(host, hwirq);
900 894
901 /* 895 /*
902 * No rcu_read_lock(ing) needed, the ptr returned can't go under us 896 * The ptr returned references the static global irq_map.
903 * as it's referencing an entry in the static irq_map table. 897 * but freeing an irq can delete nodes along the path to
898 * do the lookup via call_rcu.
904 */ 899 */
900 rcu_read_lock();
905 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); 901 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
902 rcu_read_unlock();
906 903
907 /* 904 /*
908 * If found in radix tree, then fine. 905 * If found in radix tree, then fine.
@@ -1010,14 +1007,23 @@ void irq_free_virt(unsigned int virq, unsigned int count)
1010 WARN_ON (virq < NUM_ISA_INTERRUPTS); 1007 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1011 WARN_ON (count == 0 || (virq + count) > irq_virq_count); 1008 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1012 1009
1010 if (virq < NUM_ISA_INTERRUPTS) {
1011 if (virq + count < NUM_ISA_INTERRUPTS)
1012 return;
1013 count =- NUM_ISA_INTERRUPTS - virq;
1014 virq = NUM_ISA_INTERRUPTS;
1015 }
1016
1017 if (count > irq_virq_count || virq > irq_virq_count - count) {
1018 if (virq > irq_virq_count)
1019 return;
1020 count = irq_virq_count - virq;
1021 }
1022
1013 raw_spin_lock_irqsave(&irq_big_lock, flags); 1023 raw_spin_lock_irqsave(&irq_big_lock, flags);
1014 for (i = virq; i < (virq + count); i++) { 1024 for (i = virq; i < (virq + count); i++) {
1015 struct irq_host *host; 1025 struct irq_host *host;
1016 1026
1017 if (i < NUM_ISA_INTERRUPTS ||
1018 (virq + count) > irq_virq_count)
1019 continue;
1020
1021 host = irq_map[i].host; 1027 host = irq_map[i].host;
1022 irq_map[i].hwirq = host->inval_irq; 1028 irq_map[i].hwirq = host->inval_irq;
1023 smp_wmb(); 1029 smp_wmb();
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 095043d79946..91e52df3d81d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -395,6 +395,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
395 struct thread_struct *new_thread, *old_thread; 395 struct thread_struct *new_thread, *old_thread;
396 unsigned long flags; 396 unsigned long flags;
397 struct task_struct *last; 397 struct task_struct *last;
398#ifdef CONFIG_PPC_BOOK3S_64
399 struct ppc64_tlb_batch *batch;
400#endif
398 401
399#ifdef CONFIG_SMP 402#ifdef CONFIG_SMP
400 /* avoid complexity of lazy save/restore of fpu 403 /* avoid complexity of lazy save/restore of fpu
@@ -513,7 +516,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
513 old_thread->accum_tb += (current_tb - start_tb); 516 old_thread->accum_tb += (current_tb - start_tb);
514 new_thread->start_tb = current_tb; 517 new_thread->start_tb = current_tb;
515 } 518 }
516#endif 519#endif /* CONFIG_PPC64 */
520
521#ifdef CONFIG_PPC_BOOK3S_64
522 batch = &__get_cpu_var(ppc64_tlb_batch);
523 if (batch->active) {
524 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
525 if (batch->index)
526 __flush_tlb_pending(batch);
527 batch->active = 0;
528 }
529#endif /* CONFIG_PPC_BOOK3S_64 */
517 530
518 local_irq_save(flags); 531 local_irq_save(flags);
519 532
@@ -528,6 +541,14 @@ struct task_struct *__switch_to(struct task_struct *prev,
528 hard_irq_disable(); 541 hard_irq_disable();
529 last = _switch(old_thread, new_thread); 542 last = _switch(old_thread, new_thread);
530 543
544#ifdef CONFIG_PPC_BOOK3S_64
545 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
546 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
547 batch = &__get_cpu_var(ppc64_tlb_batch);
548 batch->active = 1;
549 }
550#endif /* CONFIG_PPC_BOOK3S_64 */
551
531 local_irq_restore(flags); 552 local_irq_restore(flags);
532 553
533 return last; 554 return last;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a6ae1cfad86c..cb22024f2b42 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -29,6 +29,7 @@
29#include <linux/signal.h> 29#include <linux/signal.h>
30#include <linux/seccomp.h> 30#include <linux/seccomp.h>
31#include <linux/audit.h> 31#include <linux/audit.h>
32#include <trace/syscall.h>
32#ifdef CONFIG_PPC32 33#ifdef CONFIG_PPC32
33#include <linux/module.h> 34#include <linux/module.h>
34#endif 35#endif
@@ -40,6 +41,9 @@
40#include <asm/pgtable.h> 41#include <asm/pgtable.h>
41#include <asm/system.h> 42#include <asm/system.h>
42 43
44#define CREATE_TRACE_POINTS
45#include <trace/events/syscalls.h>
46
43/* 47/*
44 * The parameter save area on the stack is used to store arguments being passed 48 * The parameter save area on the stack is used to store arguments being passed
45 * to callee function and is located at fixed offset from stack pointer. 49 * to callee function and is located at fixed offset from stack pointer.
@@ -1710,6 +1714,9 @@ long do_syscall_trace_enter(struct pt_regs *regs)
1710 */ 1714 */
1711 ret = -1L; 1715 ret = -1L;
1712 1716
1717 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1718 trace_sys_enter(regs, regs->gpr[0]);
1719
1713 if (unlikely(current->audit_context)) { 1720 if (unlikely(current->audit_context)) {
1714#ifdef CONFIG_PPC64 1721#ifdef CONFIG_PPC64
1715 if (!is_32bit_task()) 1722 if (!is_32bit_task())
@@ -1738,6 +1745,9 @@ void do_syscall_trace_leave(struct pt_regs *regs)
1738 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 1745 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
1739 regs->result); 1746 regs->result);
1740 1747
1748 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1749 trace_sys_exit(regs, regs->result);
1750
1741 step = test_thread_flag(TIF_SINGLESTEP); 1751 step = test_thread_flag(TIF_SINGLESTEP);
1742 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1752 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1743 tracehook_report_syscall_exit(regs, step); 1753 tracehook_report_syscall_exit(regs, step);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 4a6f2ec7e761..8ebc6700b98d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -129,7 +129,7 @@ static irqreturn_t call_function_single_action(int irq, void *data)
129 return IRQ_HANDLED; 129 return IRQ_HANDLED;
130} 130}
131 131
132irqreturn_t debug_ipi_action(int irq, void *data) 132static irqreturn_t debug_ipi_action(int irq, void *data)
133{ 133{
134 if (crash_ipi_function_ptr) { 134 if (crash_ipi_function_ptr) {
135 crash_ipi_function_ptr(get_irq_regs()); 135 crash_ipi_function_ptr(get_irq_regs());
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b13306b0d925..0ff4ab98d50c 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -55,6 +55,7 @@
55#endif 55#endif
56#include <asm/kexec.h> 56#include <asm/kexec.h>
57#include <asm/ppc-opcode.h> 57#include <asm/ppc-opcode.h>
58#include <asm/rio.h>
58 59
59#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 60#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
60int (*__debugger)(struct pt_regs *regs) __read_mostly; 61int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -424,6 +425,12 @@ int machine_check_e500mc(struct pt_regs *regs)
424 unsigned long reason = mcsr; 425 unsigned long reason = mcsr;
425 int recoverable = 1; 426 int recoverable = 1;
426 427
428 if (reason & MCSR_BUS_RBERR) {
429 recoverable = fsl_rio_mcheck_exception(regs);
430 if (recoverable == 1)
431 goto silent_out;
432 }
433
427 printk("Machine check in kernel mode.\n"); 434 printk("Machine check in kernel mode.\n");
428 printk("Caused by (from MCSR=%lx): ", reason); 435 printk("Caused by (from MCSR=%lx): ", reason);
429 436
@@ -499,6 +506,7 @@ int machine_check_e500mc(struct pt_regs *regs)
499 reason & MCSR_MEA ? "Effective" : "Physical", addr); 506 reason & MCSR_MEA ? "Effective" : "Physical", addr);
500 } 507 }
501 508
509silent_out:
502 mtspr(SPRN_MCSR, mcsr); 510 mtspr(SPRN_MCSR, mcsr);
503 return mfspr(SPRN_MCSR) == 0 && recoverable; 511 return mfspr(SPRN_MCSR) == 0 && recoverable;
504} 512}
@@ -507,6 +515,11 @@ int machine_check_e500(struct pt_regs *regs)
507{ 515{
508 unsigned long reason = get_mc_reason(regs); 516 unsigned long reason = get_mc_reason(regs);
509 517
518 if (reason & MCSR_BUS_RBERR) {
519 if (fsl_rio_mcheck_exception(regs))
520 return 1;
521 }
522
510 printk("Machine check in kernel mode.\n"); 523 printk("Machine check in kernel mode.\n");
511 printk("Caused by (from MCSR=%lx): ", reason); 524 printk("Caused by (from MCSR=%lx): ", reason);
512 525
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 6a3997f98dfb..af40c8768a78 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -33,110 +33,6 @@
33 33
34#include "mmu_decl.h" 34#include "mmu_decl.h"
35 35
36DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
37
38#ifdef CONFIG_SMP
39
40/*
41 * Handle batching of page table freeing on SMP. Page tables are
42 * queued up and send to be freed later by RCU in order to avoid
43 * freeing a page table page that is being walked without locks
44 */
45
46static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
47static unsigned long pte_freelist_forced_free;
48
49struct pte_freelist_batch
50{
51 struct rcu_head rcu;
52 unsigned int index;
53 unsigned long tables[0];
54};
55
56#define PTE_FREELIST_SIZE \
57 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
58 / sizeof(unsigned long))
59
60static void pte_free_smp_sync(void *arg)
61{
62 /* Do nothing, just ensure we sync with all CPUs */
63}
64
65/* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
67 */
68static void pgtable_free_now(void *table, unsigned shift)
69{
70 pte_freelist_forced_free++;
71
72 smp_call_function(pte_free_smp_sync, NULL, 1);
73
74 pgtable_free(table, shift);
75}
76
77static void pte_free_rcu_callback(struct rcu_head *head)
78{
79 struct pte_freelist_batch *batch =
80 container_of(head, struct pte_freelist_batch, rcu);
81 unsigned int i;
82
83 for (i = 0; i < batch->index; i++) {
84 void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE);
85 unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE;
86
87 pgtable_free(table, shift);
88 }
89
90 free_page((unsigned long)batch);
91}
92
93static void pte_free_submit(struct pte_freelist_batch *batch)
94{
95 call_rcu_sched(&batch->rcu, pte_free_rcu_callback);
96}
97
98void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
99{
100 /* This is safe since tlb_gather_mmu has disabled preemption */
101 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
102 unsigned long pgf;
103
104 if (atomic_read(&tlb->mm->mm_users) < 2 ||
105 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
106 pgtable_free(table, shift);
107 return;
108 }
109
110 if (*batchp == NULL) {
111 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
112 if (*batchp == NULL) {
113 pgtable_free_now(table, shift);
114 return;
115 }
116 (*batchp)->index = 0;
117 }
118 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
119 pgf = (unsigned long)table | shift;
120 (*batchp)->tables[(*batchp)->index++] = pgf;
121 if ((*batchp)->index == PTE_FREELIST_SIZE) {
122 pte_free_submit(*batchp);
123 *batchp = NULL;
124 }
125}
126
127void pte_free_finish(void)
128{
129 /* This is safe since tlb_gather_mmu has disabled preemption */
130 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
131
132 if (*batchp == NULL)
133 return;
134 pte_free_submit(*batchp);
135 *batchp = NULL;
136}
137
138#endif /* CONFIG_SMP */
139
140static inline int is_exec_fault(void) 36static inline int is_exec_fault(void)
141{ 37{
142 return current->thread.regs && TRAP(current->thread.regs) == 0x400; 38 return current->thread.regs && TRAP(current->thread.regs) == 0x400;
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 690566b66e8e..27b863c14941 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -71,9 +71,6 @@ void tlb_flush(struct mmu_gather *tlb)
71 */ 71 */
72 _tlbia(); 72 _tlbia();
73 } 73 }
74
75 /* Push out batch of freed page tables */
76 pte_free_finish();
77} 74}
78 75
79/* 76/*
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index c14d09f614f3..31f18207970b 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -155,7 +155,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
155 155
156void tlb_flush(struct mmu_gather *tlb) 156void tlb_flush(struct mmu_gather *tlb)
157{ 157{
158 struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); 158 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
159 159
160 /* If there's a TLB batch pending, then we must flush it because the 160 /* If there's a TLB batch pending, then we must flush it because the
161 * pages are going to be freed and we really don't want to have a CPU 161 * pages are going to be freed and we really don't want to have a CPU
@@ -164,8 +164,7 @@ void tlb_flush(struct mmu_gather *tlb)
164 if (tlbbatch->index) 164 if (tlbbatch->index)
165 __flush_tlb_pending(tlbbatch); 165 __flush_tlb_pending(tlbbatch);
166 166
167 /* Push out batch of freed page tables */ 167 put_cpu_var(ppc64_tlb_batch);
168 pte_free_finish();
169} 168}
170 169
171/** 170/**
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 2a030d89bbc6..0bdad3aecc67 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -299,9 +299,6 @@ EXPORT_SYMBOL(flush_tlb_range);
299void tlb_flush(struct mmu_gather *tlb) 299void tlb_flush(struct mmu_gather *tlb)
300{ 300{
301 flush_tlb_mm(tlb->mm); 301 flush_tlb_mm(tlb->mm);
302
303 /* Push out batch of freed page tables */
304 pte_free_finish();
305} 302}
306 303
307/* 304/*
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 8ee51a252cf1..e6bec74be131 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
261 return is_kernel; 261 return is_kernel;
262} 262}
263 263
264static bool pmc_overflow(unsigned long val)
265{
266 if ((int)val < 0)
267 return true;
268
269 /*
270 * Events on POWER7 can roll back if a speculative event doesn't
271 * eventually complete. Unfortunately in some rare cases they will
272 * raise a performance monitor exception. We need to catch this to
273 * ensure we reset the PMC. In all cases the PMC will be 256 or less
274 * cycles from overflow.
275 *
276 * We only do this if the first pass fails to find any overflowing
277 * PMCs because a user might set a period of less than 256 and we
278 * don't want to mistakenly reset them.
279 */
280 if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
281 return true;
282
283 return false;
284}
285
264static void power4_handle_interrupt(struct pt_regs *regs, 286static void power4_handle_interrupt(struct pt_regs *regs,
265 struct op_counter_config *ctr) 287 struct op_counter_config *ctr)
266{ 288{
@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
281 303
282 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { 304 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
283 val = classic_ctr_read(i); 305 val = classic_ctr_read(i);
284 if (val < 0) { 306 if (pmc_overflow(val)) {
285 if (oprofile_running && ctr[i].enabled) { 307 if (oprofile_running && ctr[i].enabled) {
286 oprofile_add_ext_sample(pc, regs, i, is_kernel); 308 oprofile_add_ext_sample(pc, regs, i, is_kernel);
287 classic_ctr_write(i, reset_value[i]); 309 classic_ctr_write(i, reset_value[i]);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index b72176434ebe..d733d7ca939c 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -57,6 +57,8 @@ config KILAUEA
57 select 405EX 57 select 405EX
58 select PPC40x_SIMPLE 58 select PPC40x_SIMPLE
59 select PPC4xx_PCI_EXPRESS 59 select PPC4xx_PCI_EXPRESS
60 select PCI_MSI
61 select PPC4xx_MSI
60 help 62 help
61 This option enables support for the AMCC PPC405EX evaluation board. 63 This option enables support for the AMCC PPC405EX evaluation board.
62 64
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index f485fc5f6d5e..e958b6f48ec2 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -74,6 +74,8 @@ config KATMAI
74 select 440SPe 74 select 440SPe
75 select PCI 75 select PCI
76 select PPC4xx_PCI_EXPRESS 76 select PPC4xx_PCI_EXPRESS
77 select PCI_MSI
78 select PCC4xx_MSI
77 help 79 help
78 This option enables support for the AMCC PPC440SPe evaluation board. 80 This option enables support for the AMCC PPC440SPe evaluation board.
79 81
@@ -118,6 +120,8 @@ config CANYONLANDS
118 select 460EX 120 select 460EX
119 select PCI 121 select PCI
120 select PPC4xx_PCI_EXPRESS 122 select PPC4xx_PCI_EXPRESS
123 select PCI_MSI
124 select PPC4xx_MSI
121 select IBM_NEW_EMAC_RGMII 125 select IBM_NEW_EMAC_RGMII
122 select IBM_NEW_EMAC_ZMII 126 select IBM_NEW_EMAC_ZMII
123 help 127 help
@@ -144,6 +148,8 @@ config REDWOOD
144 select 460SX 148 select 460SX
145 select PCI 149 select PCI
146 select PPC4xx_PCI_EXPRESS 150 select PPC4xx_PCI_EXPRESS
151 select PCI_MSI
152 select PPC4xx_MSI
147 help 153 help
148 This option enables support for the AMCC PPC460SX Redwood board. 154 This option enables support for the AMCC PPC460SX Redwood board.
149 155
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 449c08c15862..3e4eba603e6b 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -176,14 +176,14 @@ EXPORT_SYMBOL_GPL(iic_get_target_id);
176#ifdef CONFIG_SMP 176#ifdef CONFIG_SMP
177 177
178/* Use the highest interrupt priorities for IPI */ 178/* Use the highest interrupt priorities for IPI */
179static inline int iic_ipi_to_irq(int ipi) 179static inline int iic_msg_to_irq(int msg)
180{ 180{
181 return IIC_IRQ_TYPE_IPI + 0xf - ipi; 181 return IIC_IRQ_TYPE_IPI + 0xf - msg;
182} 182}
183 183
184void iic_cause_IPI(int cpu, int mesg) 184void iic_message_pass(int cpu, int msg)
185{ 185{
186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4); 186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
187} 187}
188 188
189struct irq_host *iic_get_irq_host(int node) 189struct irq_host *iic_get_irq_host(int node)
@@ -192,50 +192,31 @@ struct irq_host *iic_get_irq_host(int node)
192} 192}
193EXPORT_SYMBOL_GPL(iic_get_irq_host); 193EXPORT_SYMBOL_GPL(iic_get_irq_host);
194 194
195static irqreturn_t iic_ipi_action(int irq, void *dev_id) 195static void iic_request_ipi(int msg)
196{
197 int ipi = (int)(long)dev_id;
198
199 switch(ipi) {
200 case PPC_MSG_CALL_FUNCTION:
201 generic_smp_call_function_interrupt();
202 break;
203 case PPC_MSG_RESCHEDULE:
204 scheduler_ipi();
205 break;
206 case PPC_MSG_CALL_FUNC_SINGLE:
207 generic_smp_call_function_single_interrupt();
208 break;
209 case PPC_MSG_DEBUGGER_BREAK:
210 debug_ipi_action(0, NULL);
211 break;
212 }
213 return IRQ_HANDLED;
214}
215static void iic_request_ipi(int ipi, const char *name)
216{ 196{
217 int virq; 197 int virq;
218 198
219 virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi)); 199 virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
220 if (virq == NO_IRQ) { 200 if (virq == NO_IRQ) {
221 printk(KERN_ERR 201 printk(KERN_ERR
222 "iic: failed to map IPI %s\n", name); 202 "iic: failed to map IPI %s\n", smp_ipi_name[msg]);
223 return; 203 return;
224 } 204 }
225 if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name, 205
226 (void *)(long)ipi)) 206 /*
227 printk(KERN_ERR 207 * If smp_request_message_ipi encounters an error it will notify
228 "iic: failed to request IPI %s\n", name); 208 * the error. If a message is not needed it will return non-zero.
209 */
210 if (smp_request_message_ipi(virq, msg))
211 irq_dispose_mapping(virq);
229} 212}
230 213
231void iic_request_IPIs(void) 214void iic_request_IPIs(void)
232{ 215{
233 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); 216 iic_request_ipi(PPC_MSG_CALL_FUNCTION);
234 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); 217 iic_request_ipi(PPC_MSG_RESCHEDULE);
235 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single"); 218 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE);
236#ifdef CONFIG_DEBUGGER 219 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK);
237 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
238#endif /* CONFIG_DEBUGGER */
239} 220}
240 221
241#endif /* CONFIG_SMP */ 222#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 942dc39d6045..4f60ae6ca358 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -75,7 +75,7 @@ enum {
75}; 75};
76 76
77extern void iic_init_IRQ(void); 77extern void iic_init_IRQ(void);
78extern void iic_cause_IPI(int cpu, int mesg); 78extern void iic_message_pass(int cpu, int msg);
79extern void iic_request_IPIs(void); 79extern void iic_request_IPIs(void);
80extern void iic_setup_cpu(void); 80extern void iic_setup_cpu(void);
81 81
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index d176e6148e3f..dbb641ea90dd 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -152,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr)
152 return 1; 152 return 1;
153} 153}
154static struct smp_ops_t bpa_iic_smp_ops = { 154static struct smp_ops_t bpa_iic_smp_ops = {
155 .message_pass = iic_cause_IPI, 155 .message_pass = iic_message_pass,
156 .probe = smp_iic_probe, 156 .probe = smp_iic_probe,
157 .kick_cpu = smp_cell_kick_cpu, 157 .kick_cpu = smp_cell_kick_cpu,
158 .setup_cpu = smp_cell_setup_cpu, 158 .setup_cpu = smp_cell_setup_cpu,
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index d775fd148d13..7b4df37ac381 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -7,11 +7,18 @@ config PPC4xx_PCI_EXPRESS
7 depends on PCI && 4xx 7 depends on PCI && 4xx
8 default n 8 default n
9 9
10config PPC4xx_MSI
11 bool
12 depends on PCI_MSI
13 depends on PCI && 4xx
14 default n
15
10config PPC_MSI_BITMAP 16config PPC_MSI_BITMAP
11 bool 17 bool
12 depends on PCI_MSI 18 depends on PCI_MSI
13 default y if MPIC 19 default y if MPIC
14 default y if FSL_PCI 20 default y if FSL_PCI
21 default y if PPC4xx_MSI
15 22
16source "arch/powerpc/sysdev/xics/Kconfig" 23source "arch/powerpc/sysdev/xics/Kconfig"
17 24
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 6076e0074a87..0efa990e3344 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o
41ifeq ($(CONFIG_PCI),y) 41ifeq ($(CONFIG_PCI),y)
42obj-$(CONFIG_4xx) += ppc4xx_pci.o 42obj-$(CONFIG_4xx) += ppc4xx_pci.o
43endif 43endif
44obj-$(CONFIG_PPC4xx_MSI) += ppc4xx_msi.o
44obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o 45obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o
45obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o 46obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
46 47
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 4fcb5a4e60dd..0608b1657da4 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -184,7 +184,8 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
184} 184}
185EXPORT_SYMBOL(fsl_upm_run_pattern); 185EXPORT_SYMBOL(fsl_upm_run_pattern);
186 186
187static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl) 187static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
188 struct device_node *node)
188{ 189{
189 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 190 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
190 191
@@ -198,6 +199,10 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl)
198 /* Enable interrupts for any detected events */ 199 /* Enable interrupts for any detected events */
199 out_be32(&lbc->lteir, LTEIR_ENABLE); 200 out_be32(&lbc->lteir, LTEIR_ENABLE);
200 201
202 /* Set the monitor timeout value to the maximum for erratum A001 */
203 if (of_device_is_compatible(node, "fsl,elbc"))
204 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
205
201 return 0; 206 return 0;
202} 207}
203 208
@@ -304,7 +309,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
304 309
305 fsl_lbc_ctrl_dev->dev = &dev->dev; 310 fsl_lbc_ctrl_dev->dev = &dev->dev;
306 311
307 ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev); 312 ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node);
308 if (ret < 0) 313 if (ret < 0)
309 goto err; 314 goto err;
310 315
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 49798532b477..5b206a2fe17c 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -10,7 +10,7 @@
10 * - Added Port-Write message handling 10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling 11 * - Added Machine Check exception handling
12 * 12 *
13 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. 13 * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc.
14 * Zhang Wei <wei.zhang@freescale.com> 14 * Zhang Wei <wei.zhang@freescale.com>
15 * 15 *
16 * Copyright 2005 MontaVista Software, Inc. 16 * Copyright 2005 MontaVista Software, Inc.
@@ -47,15 +47,33 @@
47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) 47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) 48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
49 49
50#define IPWSR_CLEAR 0x98
51#define OMSR_CLEAR 0x1cb3
52#define IMSR_CLEAR 0x491
53#define IDSR_CLEAR 0x91
54#define ODSR_CLEAR 0x1c00
55#define LTLEECSR_ENABLE_ALL 0xFFC000FC
56#define ESCSR_CLEAR 0x07120204
57
58#define RIO_PORT1_EDCSR 0x0640
59#define RIO_PORT2_EDCSR 0x0680
60#define RIO_PORT1_IECSR 0x10130
61#define RIO_PORT2_IECSR 0x101B0
62#define RIO_IM0SR 0x13064
63#define RIO_IM1SR 0x13164
64#define RIO_OM0SR 0x13004
65#define RIO_OM1SR 0x13104
66
50#define RIO_ATMU_REGS_OFFSET 0x10c00 67#define RIO_ATMU_REGS_OFFSET 0x10c00
51#define RIO_P_MSG_REGS_OFFSET 0x11000 68#define RIO_P_MSG_REGS_OFFSET 0x11000
52#define RIO_S_MSG_REGS_OFFSET 0x13000 69#define RIO_S_MSG_REGS_OFFSET 0x13000
53#define RIO_GCCSR 0x13c 70#define RIO_GCCSR 0x13c
54#define RIO_ESCSR 0x158 71#define RIO_ESCSR 0x158
72#define RIO_PORT2_ESCSR 0x178
55#define RIO_CCSR 0x15c 73#define RIO_CCSR 0x15c
56#define RIO_LTLEDCSR 0x0608 74#define RIO_LTLEDCSR 0x0608
57#define RIO_LTLEDCSR_IER 0x80000000 75#define RIO_LTLEDCSR_IER 0x80000000
58#define RIO_LTLEDCSR_PRT 0x01000000 76#define RIO_LTLEDCSR_PRT 0x01000000
59#define RIO_LTLEECSR 0x060c 77#define RIO_LTLEECSR 0x060c
60#define RIO_EPWISR 0x10010 78#define RIO_EPWISR 0x10010
61#define RIO_ISR_AACR 0x10120 79#define RIO_ISR_AACR 0x10120
@@ -88,7 +106,10 @@
88#define RIO_IPWSR_PWD 0x00000008 106#define RIO_IPWSR_PWD 0x00000008
89#define RIO_IPWSR_PWB 0x00000004 107#define RIO_IPWSR_PWB 0x00000004
90 108
91#define RIO_EPWISR_PINT 0x80000000 109/* EPWISR Error match value */
110#define RIO_EPWISR_PINT1 0x80000000
111#define RIO_EPWISR_PINT2 0x40000000
112#define RIO_EPWISR_MU 0x00000002
92#define RIO_EPWISR_PW 0x00000001 113#define RIO_EPWISR_PW 0x00000001
93 114
94#define RIO_MSG_DESC_SIZE 32 115#define RIO_MSG_DESC_SIZE 32
@@ -260,9 +281,7 @@ struct rio_priv {
260static void __iomem *rio_regs_win; 281static void __iomem *rio_regs_win;
261 282
262#ifdef CONFIG_E500 283#ifdef CONFIG_E500
263static int (*saved_mcheck_exception)(struct pt_regs *regs); 284int fsl_rio_mcheck_exception(struct pt_regs *regs)
264
265static int fsl_rio_mcheck_exception(struct pt_regs *regs)
266{ 285{
267 const struct exception_table_entry *entry = NULL; 286 const struct exception_table_entry *entry = NULL;
268 unsigned long reason = mfspr(SPRN_MCSR); 287 unsigned long reason = mfspr(SPRN_MCSR);
@@ -284,11 +303,9 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs)
284 } 303 }
285 } 304 }
286 305
287 if (saved_mcheck_exception) 306 return 0;
288 return saved_mcheck_exception(regs);
289 else
290 return cur_cpu_spec->machine_check(regs);
291} 307}
308EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception);
292#endif 309#endif
293 310
294/** 311/**
@@ -1064,6 +1081,40 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
1064 return rc; 1081 return rc;
1065} 1082}
1066 1083
1084static void port_error_handler(struct rio_mport *port, int offset)
1085{
1086 /*XXX: Error recovery is not implemented, we just clear errors */
1087 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
1088
1089 if (offset == 0) {
1090 out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
1091 out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0);
1092 out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
1093 } else {
1094 out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
1095 out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0);
1096 out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
1097 }
1098}
1099
1100static void msg_unit_error_handler(struct rio_mport *port)
1101{
1102 struct rio_priv *priv = port->priv;
1103
1104 /*XXX: Error recovery is not implemented, we just clear errors */
1105 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
1106
1107 out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR);
1108 out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR);
1109 out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR);
1110 out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR);
1111
1112 out_be32(&priv->msg_regs->odsr, ODSR_CLEAR);
1113 out_be32(&priv->msg_regs->dsr, IDSR_CLEAR);
1114
1115 out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR);
1116}
1117
1067/** 1118/**
1068 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler 1119 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
1069 * @irq: Linux interrupt number 1120 * @irq: Linux interrupt number
@@ -1144,10 +1195,22 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
1144 } 1195 }
1145 1196
1146pw_done: 1197pw_done:
1147 if (epwisr & RIO_EPWISR_PINT) { 1198 if (epwisr & RIO_EPWISR_PINT1) {
1199 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1200 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1201 port_error_handler(port, 0);
1202 }
1203
1204 if (epwisr & RIO_EPWISR_PINT2) {
1148 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); 1205 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1149 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); 1206 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1150 out_be32(priv->regs_win + RIO_LTLEDCSR, 0); 1207 port_error_handler(port, 1);
1208 }
1209
1210 if (epwisr & RIO_EPWISR_MU) {
1211 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1212 pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
1213 msg_unit_error_handler(port);
1151 } 1214 }
1152 1215
1153 return IRQ_HANDLED; 1216 return IRQ_HANDLED;
@@ -1258,12 +1321,14 @@ static int fsl_rio_port_write_init(struct rio_mport *mport)
1258 1321
1259 1322
1260 /* Hook up port-write handler */ 1323 /* Hook up port-write handler */
1261 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, 1324 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler,
1262 "port-write", (void *)mport); 1325 IRQF_SHARED, "port-write", (void *)mport);
1263 if (rc < 0) { 1326 if (rc < 0) {
1264 pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); 1327 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
1265 goto err_out; 1328 goto err_out;
1266 } 1329 }
1330 /* Enable Error Interrupt */
1331 out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
1267 1332
1268 INIT_WORK(&priv->pw_work, fsl_pw_dpc); 1333 INIT_WORK(&priv->pw_work, fsl_pw_dpc);
1269 spin_lock_init(&priv->pw_fifo_lock); 1334 spin_lock_init(&priv->pw_fifo_lock);
@@ -1538,11 +1603,6 @@ int fsl_rio_setup(struct platform_device *dev)
1538 fsl_rio_doorbell_init(port); 1603 fsl_rio_doorbell_init(port);
1539 fsl_rio_port_write_init(port); 1604 fsl_rio_port_write_init(port);
1540 1605
1541#ifdef CONFIG_E500
1542 saved_mcheck_exception = ppc_md.machine_check_exception;
1543 ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
1544#endif
1545
1546 return 0; 1606 return 0;
1547err: 1607err:
1548 iounmap(priv->regs_win); 1608 iounmap(priv->regs_win);
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
new file mode 100644
index 000000000000..367af0241851
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -0,0 +1,276 @@
1/*
2 * Adding PCI-E MSI support for PPC4XX SoCs.
3 *
4 * Copyright (c) 2010, Applied Micro Circuits Corporation
5 * Authors: Tirumala R Marri <tmarri@apm.com>
6 * Feng Kan <fkan@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24#include <linux/irq.h>
25#include <linux/bootmem.h>
26#include <linux/pci.h>
27#include <linux/msi.h>
28#include <linux/of_platform.h>
29#include <linux/interrupt.h>
30#include <asm/prom.h>
31#include <asm/hw_irq.h>
32#include <asm/ppc-pci.h>
33#include <boot/dcr.h>
34#include <asm/dcr-regs.h>
35#include <asm/msi_bitmap.h>
36
37#define PEIH_TERMADH 0x00
38#define PEIH_TERMADL 0x08
39#define PEIH_MSIED 0x10
40#define PEIH_MSIMK 0x18
41#define PEIH_MSIASS 0x20
42#define PEIH_FLUSH0 0x30
43#define PEIH_FLUSH1 0x38
44#define PEIH_CNTRST 0x48
45#define NR_MSI_IRQS 4
46
47struct ppc4xx_msi {
48 u32 msi_addr_lo;
49 u32 msi_addr_hi;
50 void __iomem *msi_regs;
51 int msi_virqs[NR_MSI_IRQS];
52 struct msi_bitmap bitmap;
53 struct device_node *msi_dev;
54};
55
56static struct ppc4xx_msi ppc4xx_msi;
57
58static int ppc4xx_msi_init_allocator(struct platform_device *dev,
59 struct ppc4xx_msi *msi_data)
60{
61 int err;
62
63 err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
64 dev->dev.of_node);
65 if (err)
66 return err;
67
68 err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
69 if (err < 0) {
70 msi_bitmap_free(&msi_data->bitmap);
71 return err;
72 }
73
74 return 0;
75}
76
77static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
78{
79 int int_no = -ENOMEM;
80 unsigned int virq;
81 struct msi_msg msg;
82 struct msi_desc *entry;
83 struct ppc4xx_msi *msi_data = &ppc4xx_msi;
84
85 list_for_each_entry(entry, &dev->msi_list, list) {
86 int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
87 if (int_no >= 0)
88 break;
89 if (int_no < 0) {
90 pr_debug("%s: fail allocating msi interrupt\n",
91 __func__);
92 }
93 virq = irq_of_parse_and_map(msi_data->msi_dev, int_no);
94 if (virq == NO_IRQ) {
95 dev_err(&dev->dev, "%s: fail mapping irq\n", __func__);
96 msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1);
97 return -ENOSPC;
98 }
99 dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq);
100
101 /* Setup msi address space */
102 msg.address_hi = msi_data->msi_addr_hi;
103 msg.address_lo = msi_data->msi_addr_lo;
104
105 irq_set_msi_desc(virq, entry);
106 msg.data = int_no;
107 write_msi_msg(virq, &msg);
108 }
109 return 0;
110}
111
112void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
113{
114 struct msi_desc *entry;
115 struct ppc4xx_msi *msi_data = &ppc4xx_msi;
116
117 dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
118
119 list_for_each_entry(entry, &dev->msi_list, list) {
120 if (entry->irq == NO_IRQ)
121 continue;
122 irq_set_msi_desc(entry->irq, NULL);
123 msi_bitmap_free_hwirqs(&msi_data->bitmap,
124 virq_to_hw(entry->irq), 1);
125 irq_dispose_mapping(entry->irq);
126 }
127}
128
129static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type)
130{
131 dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
132 __func__, nvec, type);
133 if (type == PCI_CAP_ID_MSIX)
134 pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
135
136 return 0;
137}
138
139static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
140 struct resource res, struct ppc4xx_msi *msi)
141{
142 const u32 *msi_data;
143 const u32 *msi_mask;
144 const u32 *sdr_addr;
145 dma_addr_t msi_phys;
146 void *msi_virt;
147
148 sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
149 if (!sdr_addr)
150 return -1;
151
152 SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */
153 SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */
154
155
156 msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
157 if (msi->msi_dev)
158 return -ENODEV;
159
160 msi->msi_regs = of_iomap(msi->msi_dev, 0);
161 if (!msi->msi_regs) {
162 dev_err(&dev->dev, "of_iomap problem failed\n");
163 return -ENOMEM;
164 }
165 dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
166 (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
167
168 msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
169 msi->msi_addr_hi = 0x0;
170 msi->msi_addr_lo = (u32) msi_phys;
171 dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo);
172
173 /* Progam the Interrupt handler Termination addr registers */
174 out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
175 out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
176
177 msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
178 if (!msi_data)
179 return -1;
180 msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
181 if (!msi_mask)
182 return -1;
183 /* Program MSI Expected data and Mask bits */
184 out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
185 out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
186
187 return 0;
188}
189
190static int ppc4xx_of_msi_remove(struct platform_device *dev)
191{
192 struct ppc4xx_msi *msi = dev->dev.platform_data;
193 int i;
194 int virq;
195
196 for (i = 0; i < NR_MSI_IRQS; i++) {
197 virq = msi->msi_virqs[i];
198 if (virq != NO_IRQ)
199 irq_dispose_mapping(virq);
200 }
201
202 if (msi->bitmap.bitmap)
203 msi_bitmap_free(&msi->bitmap);
204 iounmap(msi->msi_regs);
205 of_node_put(msi->msi_dev);
206 kfree(msi);
207
208 return 0;
209}
210
211static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
212{
213 struct ppc4xx_msi *msi;
214 struct resource res;
215 int err = 0;
216
217 msi = &ppc4xx_msi;/*keep the msi data for further use*/
218
219 dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
220
221 msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL);
222 if (!msi) {
223 dev_err(&dev->dev, "No memory for MSI structure\n");
224 return -ENOMEM;
225 }
226 dev->dev.platform_data = msi;
227
228 /* Get MSI ranges */
229 err = of_address_to_resource(dev->dev.of_node, 0, &res);
230 if (err) {
231 dev_err(&dev->dev, "%s resource error!\n",
232 dev->dev.of_node->full_name);
233 goto error_out;
234 }
235
236 if (ppc4xx_setup_pcieh_hw(dev, res, msi))
237 goto error_out;
238
239 err = ppc4xx_msi_init_allocator(dev, msi);
240 if (err) {
241 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
242 goto error_out;
243 }
244
245 ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs;
246 ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
247 ppc_md.msi_check_device = ppc4xx_msi_check_device;
248 return err;
249
250error_out:
251 ppc4xx_of_msi_remove(dev);
252 return err;
253}
254static const struct of_device_id ppc4xx_msi_ids[] = {
255 {
256 .compatible = "amcc,ppc4xx-msi",
257 },
258 {}
259};
260static struct platform_driver ppc4xx_msi_driver = {
261 .probe = ppc4xx_msi_probe,
262 .remove = ppc4xx_of_msi_remove,
263 .driver = {
264 .name = "ppc4xx-msi",
265 .owner = THIS_MODULE,
266 .of_match_table = ppc4xx_msi_ids,
267 },
268
269};
270
271static __init int ppc4xx_msi_init(void)
272{
273 return platform_driver_register(&ppc4xx_msi_driver);
274}
275
276subsys_initcall(ppc4xx_msi_init);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff2d2371b2e9..9fab2aa9c2c8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -2,7 +2,7 @@ config MMU
2 def_bool y 2 def_bool y
3 3
4config ZONE_DMA 4config ZONE_DMA
5 def_bool y if 64BIT 5 def_bool y
6 6
7config LOCKDEP_SUPPORT 7config LOCKDEP_SUPPORT
8 def_bool y 8 def_bool y
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index e43fe7537031..f7d3dc555bdb 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -92,9 +92,7 @@ static void appldata_get_mem_data(void *data)
92 mem_data->pswpin = ev[PSWPIN]; 92 mem_data->pswpin = ev[PSWPIN];
93 mem_data->pswpout = ev[PSWPOUT]; 93 mem_data->pswpout = ev[PSWPOUT];
94 mem_data->pgalloc = ev[PGALLOC_NORMAL]; 94 mem_data->pgalloc = ev[PGALLOC_NORMAL];
95#ifdef CONFIG_ZONE_DMA
96 mem_data->pgalloc += ev[PGALLOC_DMA]; 95 mem_data->pgalloc += ev[PGALLOC_DMA];
97#endif
98 mem_data->pgfault = ev[PGFAULT]; 96 mem_data->pgfault = ev[PGFAULT];
99 mem_data->pgmajfault = ev[PGMAJFAULT]; 97 mem_data->pgmajfault = ev[PGMAJFAULT];
100 98
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index e1c8f3a49884..667c6e9f6a34 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -621,6 +621,7 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr,
621 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); 621 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
622 return (bits < size) ? bits : size; 622 return (bits < size) ? bits : size;
623} 623}
624#define find_first_zero_bit find_first_zero_bit
624 625
625/** 626/**
626 * find_first_bit - find the first set bit in a memory region 627 * find_first_bit - find the first set bit in a memory region
@@ -641,6 +642,7 @@ static inline unsigned long find_first_bit(const unsigned long * addr,
641 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); 642 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
642 return (bits < size) ? bits : size; 643 return (bits < size) ? bits : size;
643} 644}
645#define find_first_bit find_first_bit
644 646
645/** 647/**
646 * find_next_zero_bit - find the first zero bit in a memory region 648 * find_next_zero_bit - find the first zero bit in a memory region
@@ -677,6 +679,7 @@ static inline int find_next_zero_bit (const unsigned long * addr,
677 } 679 }
678 return offset + find_first_zero_bit(p, size); 680 return offset + find_first_zero_bit(p, size);
679} 681}
682#define find_next_zero_bit find_next_zero_bit
680 683
681/** 684/**
682 * find_next_bit - find the first set bit in a memory region 685 * find_next_bit - find the first set bit in a memory region
@@ -713,6 +716,7 @@ static inline int find_next_bit (const unsigned long * addr,
713 } 716 }
714 return offset + find_first_bit(p, size); 717 return offset + find_first_bit(p, size);
715} 718}
719#define find_next_bit find_next_bit
716 720
717/* 721/*
718 * Every architecture must define this function. It's the fastest 722 * Every architecture must define this function. It's the fastest
@@ -742,41 +746,6 @@ static inline int sched_find_first_bit(unsigned long *b)
742 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 746 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
743 */ 747 */
744 748
745static inline void __set_bit_le(unsigned long nr, void *addr)
746{
747 __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
748}
749
750static inline void __clear_bit_le(unsigned long nr, void *addr)
751{
752 __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
753}
754
755static inline int __test_and_set_bit_le(unsigned long nr, void *addr)
756{
757 return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
758}
759
760static inline int test_and_set_bit_le(unsigned long nr, void *addr)
761{
762 return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
763}
764
765static inline int __test_and_clear_bit_le(unsigned long nr, void *addr)
766{
767 return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
768}
769
770static inline int test_and_clear_bit_le(unsigned long nr, void *addr)
771{
772 return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
773}
774
775static inline int test_bit_le(unsigned long nr, const void *addr)
776{
777 return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
778}
779
780static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) 749static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
781{ 750{
782 unsigned long bytes, bits; 751 unsigned long bytes, bits;
@@ -787,6 +756,7 @@ static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
787 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); 756 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
788 return (bits < size) ? bits : size; 757 return (bits < size) ? bits : size;
789} 758}
759#define find_first_zero_bit_le find_first_zero_bit_le
790 760
791static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, 761static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
792 unsigned long offset) 762 unsigned long offset)
@@ -816,6 +786,7 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
816 } 786 }
817 return offset + find_first_zero_bit_le(p, size); 787 return offset + find_first_zero_bit_le(p, size);
818} 788}
789#define find_next_zero_bit_le find_next_zero_bit_le
819 790
820static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) 791static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
821{ 792{
@@ -827,6 +798,7 @@ static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
827 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); 798 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
828 return (bits < size) ? bits : size; 799 return (bits < size) ? bits : size;
829} 800}
801#define find_first_bit_le find_first_bit_le
830 802
831static inline int find_next_bit_le(void *vaddr, unsigned long size, 803static inline int find_next_bit_le(void *vaddr, unsigned long size,
832 unsigned long offset) 804 unsigned long offset)
@@ -856,6 +828,9 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
856 } 828 }
857 return offset + find_first_bit_le(p, size); 829 return offset + find_first_bit_le(p, size);
858} 830}
831#define find_next_bit_le find_next_bit_le
832
833#include <asm-generic/bitops/le.h>
859 834
860#define ext2_set_bit_atomic(lock, nr, addr) \ 835#define ext2_set_bit_atomic(lock, nr, addr) \
861 test_and_set_bit_le(nr, addr) 836 test_and_set_bit_le(nr, addr)
diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h
index 8a096b83f51f..0e3b35f96be1 100644
--- a/arch/s390/include/asm/delay.h
+++ b/arch/s390/include/asm/delay.h
@@ -14,10 +14,12 @@
14#ifndef _S390_DELAY_H 14#ifndef _S390_DELAY_H
15#define _S390_DELAY_H 15#define _S390_DELAY_H
16 16
17extern void __udelay(unsigned long long usecs); 17void __ndelay(unsigned long long nsecs);
18extern void udelay_simple(unsigned long long usecs); 18void __udelay(unsigned long long usecs);
19extern void __delay(unsigned long loops); 19void udelay_simple(unsigned long long usecs);
20void __delay(unsigned long loops);
20 21
22#define ndelay(n) __ndelay((unsigned long long) (n))
21#define udelay(n) __udelay((unsigned long long) (n)) 23#define udelay(n) __udelay((unsigned long long) (n))
22#define mdelay(n) __udelay((unsigned long long) (n) * 1000) 24#define mdelay(n) __udelay((unsigned long long) (n) * 1000)
23 25
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 1544b90bd6d6..ba7b01c726a3 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -2,6 +2,7 @@
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#include <linux/hardirq.h> 4#include <linux/hardirq.h>
5#include <linux/types.h>
5 6
6enum interruption_class { 7enum interruption_class {
7 EXTERNAL_INTERRUPT, 8 EXTERNAL_INTERRUPT,
@@ -31,4 +32,11 @@ enum interruption_class {
31 NR_IRQS, 32 NR_IRQS,
32}; 33};
33 34
35typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
36
37int register_external_interrupt(u16 code, ext_int_handler_t handler);
38int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
39void service_subclass_irq_register(void);
40void service_subclass_irq_unregister(void);
41
34#endif /* _ASM_IRQ_H */ 42#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
deleted file mode 100644
index 080876d5f196..000000000000
--- a/arch/s390/include/asm/s390_ext.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * Copyright IBM Corp. 1999,2010
3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 */
6
7#ifndef _S390_EXTINT_H
8#define _S390_EXTINT_H
9
10#include <linux/types.h>
11
12typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
13
14int register_external_interrupt(__u16 code, ext_int_handler_t handler);
15int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
16
17#endif /* _S390_EXTINT_H */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 9074a54c4d10..77eee5477a52 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -29,65 +29,77 @@
29#include <asm/smp.h> 29#include <asm/smp.h>
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31 31
32#ifndef CONFIG_SMP
33#define TLB_NR_PTRS 1
34#else
35#define TLB_NR_PTRS 508
36#endif
37
38struct mmu_gather { 32struct mmu_gather {
39 struct mm_struct *mm; 33 struct mm_struct *mm;
40 unsigned int fullmm; 34 unsigned int fullmm;
41 unsigned int nr_ptes; 35 unsigned int nr_ptes;
42 unsigned int nr_pxds; 36 unsigned int nr_pxds;
43 void *array[TLB_NR_PTRS]; 37 unsigned int max;
38 void **array;
39 void *local[8];
44}; 40};
45 41
46DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 42static inline void __tlb_alloc_page(struct mmu_gather *tlb)
47
48static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
49 unsigned int full_mm_flush)
50{ 43{
51 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 44 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
52 45
46 if (addr) {
47 tlb->array = (void *) addr;
48 tlb->max = PAGE_SIZE / sizeof(void *);
49 }
50}
51
52static inline void tlb_gather_mmu(struct mmu_gather *tlb,
53 struct mm_struct *mm,
54 unsigned int full_mm_flush)
55{
53 tlb->mm = mm; 56 tlb->mm = mm;
57 tlb->max = ARRAY_SIZE(tlb->local);
58 tlb->array = tlb->local;
54 tlb->fullmm = full_mm_flush; 59 tlb->fullmm = full_mm_flush;
55 tlb->nr_ptes = 0;
56 tlb->nr_pxds = TLB_NR_PTRS;
57 if (tlb->fullmm) 60 if (tlb->fullmm)
58 __tlb_flush_mm(mm); 61 __tlb_flush_mm(mm);
59 return tlb; 62 else
63 __tlb_alloc_page(tlb);
64 tlb->nr_ptes = 0;
65 tlb->nr_pxds = tlb->max;
60} 66}
61 67
62static inline void tlb_flush_mmu(struct mmu_gather *tlb, 68static inline void tlb_flush_mmu(struct mmu_gather *tlb)
63 unsigned long start, unsigned long end)
64{ 69{
65 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) 70 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max))
66 __tlb_flush_mm(tlb->mm); 71 __tlb_flush_mm(tlb->mm);
67 while (tlb->nr_ptes > 0) 72 while (tlb->nr_ptes > 0)
68 page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); 73 page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
69 while (tlb->nr_pxds < TLB_NR_PTRS) 74 while (tlb->nr_pxds < tlb->max)
70 crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); 75 crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
71} 76}
72 77
73static inline void tlb_finish_mmu(struct mmu_gather *tlb, 78static inline void tlb_finish_mmu(struct mmu_gather *tlb,
74 unsigned long start, unsigned long end) 79 unsigned long start, unsigned long end)
75{ 80{
76 tlb_flush_mmu(tlb, start, end); 81 tlb_flush_mmu(tlb);
77 82
78 rcu_table_freelist_finish(); 83 rcu_table_freelist_finish();
79 84
80 /* keep the page table cache within bounds */ 85 /* keep the page table cache within bounds */
81 check_pgt_cache(); 86 check_pgt_cache();
82 87
83 put_cpu_var(mmu_gathers); 88 if (tlb->array != tlb->local)
89 free_pages((unsigned long) tlb->array, 0);
84} 90}
85 91
86/* 92/*
87 * Release the page cache reference for a pte removed by 93 * Release the page cache reference for a pte removed by
88 * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page 94 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
89 * has already been freed, so just do free_page_and_swap_cache. 95 * has already been freed, so just do free_page_and_swap_cache.
90 */ 96 */
97static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
98{
99 free_page_and_swap_cache(page);
100 return 1; /* avoid calling tlb_flush_mmu */
101}
102
91static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 103static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
92{ 104{
93 free_page_and_swap_cache(page); 105 free_page_and_swap_cache(page);
@@ -103,7 +115,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
103 if (!tlb->fullmm) { 115 if (!tlb->fullmm) {
104 tlb->array[tlb->nr_ptes++] = pte; 116 tlb->array[tlb->nr_ptes++] = pte;
105 if (tlb->nr_ptes >= tlb->nr_pxds) 117 if (tlb->nr_ptes >= tlb->nr_pxds)
106 tlb_flush_mmu(tlb, 0, 0); 118 tlb_flush_mmu(tlb);
107 } else 119 } else
108 page_table_free(tlb->mm, (unsigned long *) pte); 120 page_table_free(tlb->mm, (unsigned long *) pte);
109} 121}
@@ -124,7 +136,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
124 if (!tlb->fullmm) { 136 if (!tlb->fullmm) {
125 tlb->array[--tlb->nr_pxds] = pmd; 137 tlb->array[--tlb->nr_pxds] = pmd;
126 if (tlb->nr_ptes >= tlb->nr_pxds) 138 if (tlb->nr_ptes >= tlb->nr_pxds)
127 tlb_flush_mmu(tlb, 0, 0); 139 tlb_flush_mmu(tlb);
128 } else 140 } else
129 crst_table_free(tlb->mm, (unsigned long *) pmd); 141 crst_table_free(tlb->mm, (unsigned long *) pmd);
130#endif 142#endif
@@ -146,7 +158,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
146 if (!tlb->fullmm) { 158 if (!tlb->fullmm) {
147 tlb->array[--tlb->nr_pxds] = pud; 159 tlb->array[--tlb->nr_pxds] = pud;
148 if (tlb->nr_ptes >= tlb->nr_pxds) 160 if (tlb->nr_ptes >= tlb->nr_pxds)
149 tlb_flush_mmu(tlb, 0, 0); 161 tlb_flush_mmu(tlb);
150 } else 162 } else
151 crst_table_free(tlb->mm, (unsigned long *) pud); 163 crst_table_free(tlb->mm, (unsigned long *) pud);
152#endif 164#endif
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index c5338834ddbd..005d77d8ae2a 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -7,7 +7,7 @@
7extern unsigned char cpu_core_id[NR_CPUS]; 7extern unsigned char cpu_core_id[NR_CPUS];
8extern cpumask_t cpu_core_map[NR_CPUS]; 8extern cpumask_t cpu_core_map[NR_CPUS];
9 9
10static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 10static inline const struct cpumask *cpu_coregroup_mask(int cpu)
11{ 11{
12 return &cpu_core_map[cpu]; 12 return &cpu_core_map[cpu];
13} 13}
@@ -21,7 +21,7 @@ static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
21extern unsigned char cpu_book_id[NR_CPUS]; 21extern unsigned char cpu_book_id[NR_CPUS];
22extern cpumask_t cpu_book_map[NR_CPUS]; 22extern cpumask_t cpu_book_map[NR_CPUS];
23 23
24static inline const struct cpumask *cpu_book_mask(unsigned int cpu) 24static inline const struct cpumask *cpu_book_mask(int cpu)
25{ 25{
26 return &cpu_book_map[cpu]; 26 return &cpu_book_map[cpu];
27} 27}
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 2d9ea11f919a..2b23885e81e9 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -49,12 +49,13 @@
49 49
50#define segment_eq(a,b) ((a).ar4 == (b).ar4) 50#define segment_eq(a,b) ((a).ar4 == (b).ar4)
51 51
52#define __access_ok(addr, size) \
53({ \
54 __chk_user_ptr(addr); \
55 1; \
56})
52 57
53static inline int __access_ok(const void __user *addr, unsigned long size) 58#define access_ok(type, addr, size) __access_ok(addr, size)
54{
55 return 1;
56}
57#define access_ok(type,addr,size) __access_ok(addr,size)
58 59
59/* 60/*
60 * The exception table consists of pairs of addresses: the first is the 61 * The exception table consists of pairs of addresses: the first is the
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 5ff15dacb571..df3732249baa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -20,10 +20,10 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
20 20
21CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 21CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
22 22
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
25 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
26 vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o 26 sysinfo.o jump_label.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3d4a78fc1adc..1ca3d1d6a86c 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -30,9 +30,9 @@
30#include <asm/atomic.h> 30#include <asm/atomic.h>
31#include <asm/mathemu.h> 31#include <asm/mathemu.h>
32#include <asm/cpcmd.h> 32#include <asm/cpcmd.h>
33#include <asm/s390_ext.h>
34#include <asm/lowcore.h> 33#include <asm/lowcore.h>
35#include <asm/debug.h> 34#include <asm/debug.h>
35#include <asm/irq.h>
36 36
37#ifndef CONFIG_64BIT 37#ifndef CONFIG_64BIT
38#define ONELONG "%08lx: " 38#define ONELONG "%08lx: "
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e204f9597aaf..e3264f6a9720 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,19 +1,28 @@
1/* 1/*
2 * Copyright IBM Corp. 2004,2010 2 * Copyright IBM Corp. 2004,2011
3 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4 * Thomas Spatzier (tspat@de.ibm.com) 4 * Holger Smolinski <Holger.Smolinski@de.ibm.com>,
5 * Thomas Spatzier <tspat@de.ibm.com>,
5 * 6 *
6 * This file contains interrupt related functions. 7 * This file contains interrupt related functions.
7 */ 8 */
8 9
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/kernel_stat.h> 10#include <linux/kernel_stat.h>
12#include <linux/interrupt.h> 11#include <linux/interrupt.h>
13#include <linux/seq_file.h> 12#include <linux/seq_file.h>
14#include <linux/cpu.h>
15#include <linux/proc_fs.h> 13#include <linux/proc_fs.h>
16#include <linux/profile.h> 14#include <linux/profile.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/ftrace.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/cpu.h>
21#include <asm/irq_regs.h>
22#include <asm/cputime.h>
23#include <asm/lowcore.h>
24#include <asm/irq.h>
25#include "entry.h"
17 26
18struct irq_class { 27struct irq_class {
19 char *name; 28 char *name;
@@ -82,8 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
82 * For compatibilty only. S/390 specific setup of interrupts et al. is done 91 * For compatibilty only. S/390 specific setup of interrupts et al. is done
83 * much later in init_channel_subsystem(). 92 * much later in init_channel_subsystem().
84 */ 93 */
85void __init 94void __init init_IRQ(void)
86init_IRQ(void)
87{ 95{
88 /* nothing... */ 96 /* nothing... */
89} 97}
@@ -134,3 +142,116 @@ void init_irq_proc(void)
134 create_prof_cpu_mask(root_irq_dir); 142 create_prof_cpu_mask(root_irq_dir);
135} 143}
136#endif 144#endif
145
146/*
147 * ext_int_hash[index] is the start of the list for all external interrupts
148 * that hash to this index. With the current set of external interrupts
149 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
150 * iucv and 0x2603 pfault) this is always the first element.
151 */
152
153struct ext_int_info {
154 struct ext_int_info *next;
155 ext_int_handler_t handler;
156 u16 code;
157};
158
159static struct ext_int_info *ext_int_hash[256];
160
161static inline int ext_hash(u16 code)
162{
163 return (code + (code >> 9)) & 0xff;
164}
165
166int register_external_interrupt(u16 code, ext_int_handler_t handler)
167{
168 struct ext_int_info *p;
169 int index;
170
171 p = kmalloc(sizeof(*p), GFP_ATOMIC);
172 if (!p)
173 return -ENOMEM;
174 p->code = code;
175 p->handler = handler;
176 index = ext_hash(code);
177 p->next = ext_int_hash[index];
178 ext_int_hash[index] = p;
179 return 0;
180}
181EXPORT_SYMBOL(register_external_interrupt);
182
183int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
184{
185 struct ext_int_info *p, *q;
186 int index;
187
188 index = ext_hash(code);
189 q = NULL;
190 p = ext_int_hash[index];
191 while (p) {
192 if (p->code == code && p->handler == handler)
193 break;
194 q = p;
195 p = p->next;
196 }
197 if (!p)
198 return -ENOENT;
199 if (q)
200 q->next = p->next;
201 else
202 ext_int_hash[index] = p->next;
203 kfree(p);
204 return 0;
205}
206EXPORT_SYMBOL(unregister_external_interrupt);
207
208void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
209 unsigned int param32, unsigned long param64)
210{
211 struct pt_regs *old_regs;
212 unsigned short code;
213 struct ext_int_info *p;
214 int index;
215
216 code = (unsigned short) ext_int_code;
217 old_regs = set_irq_regs(regs);
218 s390_idle_check(regs, S390_lowcore.int_clock,
219 S390_lowcore.async_enter_timer);
220 irq_enter();
221 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
222 /* Serve timer interrupts first. */
223 clock_comparator_work();
224 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
225 if (code != 0x1004)
226 __get_cpu_var(s390_idle).nohz_delay = 1;
227 index = ext_hash(code);
228 for (p = ext_int_hash[index]; p; p = p->next) {
229 if (likely(p->code == code))
230 p->handler(ext_int_code, param32, param64);
231 }
232 irq_exit();
233 set_irq_regs(old_regs);
234}
235
236static DEFINE_SPINLOCK(sc_irq_lock);
237static int sc_irq_refcount;
238
239void service_subclass_irq_register(void)
240{
241 spin_lock(&sc_irq_lock);
242 if (!sc_irq_refcount)
243 ctl_set_bit(0, 9);
244 sc_irq_refcount++;
245 spin_unlock(&sc_irq_lock);
246}
247EXPORT_SYMBOL(service_subclass_irq_register);
248
249void service_subclass_irq_unregister(void)
250{
251 spin_lock(&sc_irq_lock);
252 sc_irq_refcount--;
253 if (!sc_irq_refcount)
254 ctl_clear_bit(0, 9);
255 spin_unlock(&sc_irq_lock);
256}
257EXPORT_SYMBOL(service_subclass_irq_unregister);
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
deleted file mode 100644
index 185029919c4d..000000000000
--- a/arch/s390/kernel/s390_ext.c
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * Copyright IBM Corp. 1999,2010
3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 */
6
7#include <linux/kernel_stat.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/ftrace.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <asm/s390_ext.h>
15#include <asm/irq_regs.h>
16#include <asm/cputime.h>
17#include <asm/lowcore.h>
18#include <asm/irq.h>
19#include "entry.h"
20
21struct ext_int_info {
22 struct ext_int_info *next;
23 ext_int_handler_t handler;
24 __u16 code;
25};
26
27/*
28 * ext_int_hash[index] is the start of the list for all external interrupts
29 * that hash to this index. With the current set of external interrupts
30 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
31 * iucv and 0x2603 pfault) this is always the first element.
32 */
33static struct ext_int_info *ext_int_hash[256];
34
35static inline int ext_hash(__u16 code)
36{
37 return (code + (code >> 9)) & 0xff;
38}
39
40int register_external_interrupt(__u16 code, ext_int_handler_t handler)
41{
42 struct ext_int_info *p;
43 int index;
44
45 p = kmalloc(sizeof(*p), GFP_ATOMIC);
46 if (!p)
47 return -ENOMEM;
48 p->code = code;
49 p->handler = handler;
50 index = ext_hash(code);
51 p->next = ext_int_hash[index];
52 ext_int_hash[index] = p;
53 return 0;
54}
55EXPORT_SYMBOL(register_external_interrupt);
56
57int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
58{
59 struct ext_int_info *p, *q;
60 int index;
61
62 index = ext_hash(code);
63 q = NULL;
64 p = ext_int_hash[index];
65 while (p) {
66 if (p->code == code && p->handler == handler)
67 break;
68 q = p;
69 p = p->next;
70 }
71 if (!p)
72 return -ENOENT;
73 if (q)
74 q->next = p->next;
75 else
76 ext_int_hash[index] = p->next;
77 kfree(p);
78 return 0;
79}
80EXPORT_SYMBOL(unregister_external_interrupt);
81
82void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
83 unsigned int param32, unsigned long param64)
84{
85 struct pt_regs *old_regs;
86 unsigned short code;
87 struct ext_int_info *p;
88 int index;
89
90 code = (unsigned short) ext_int_code;
91 old_regs = set_irq_regs(regs);
92 s390_idle_check(regs, S390_lowcore.int_clock,
93 S390_lowcore.async_enter_timer);
94 irq_enter();
95 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
96 /* Serve timer interrupts first. */
97 clock_comparator_work();
98 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
99 if (code != 0x1004)
100 __get_cpu_var(s390_idle).nohz_delay = 1;
101 index = ext_hash(code);
102 for (p = ext_int_hash[index]; p; p = p->next) {
103 if (likely(p->code == code))
104 p->handler(ext_int_code, param32, param64);
105 }
106 irq_exit();
107 set_irq_regs(old_regs);
108}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f8e85ecbc459..52420d2785b3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -44,7 +44,6 @@
44#include <asm/sigp.h> 44#include <asm/sigp.h>
45#include <asm/pgalloc.h> 45#include <asm/pgalloc.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/s390_ext.h>
48#include <asm/cpcmd.h> 47#include <asm/cpcmd.h>
49#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
50#include <asm/timer.h> 49#include <asm/timer.h>
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index a59557f1fb5f..dff933065ab6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -41,7 +41,6 @@
41#include <linux/kprobes.h> 41#include <linux/kprobes.h>
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/delay.h> 43#include <asm/delay.h>
44#include <asm/s390_ext.h>
45#include <asm/div64.h> 44#include <asm/div64.h>
46#include <asm/vdso.h> 45#include <asm/vdso.h>
47#include <asm/irq.h> 46#include <asm/irq.h>
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 2eafb8c7a746..0cd340b72632 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -17,7 +17,6 @@
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/cpuset.h> 18#include <linux/cpuset.h>
19#include <asm/delay.h> 19#include <asm/delay.h>
20#include <asm/s390_ext.h>
21 20
22#define PTF_HORIZONTAL (0UL) 21#define PTF_HORIZONTAL (0UL)
23#define PTF_VERTICAL (1UL) 22#define PTF_VERTICAL (1UL)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index b5a4a739b477..a65d2e82f61d 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -39,7 +39,6 @@
39#include <asm/atomic.h> 39#include <asm/atomic.h>
40#include <asm/mathemu.h> 40#include <asm/mathemu.h>
41#include <asm/cpcmd.h> 41#include <asm/cpcmd.h>
42#include <asm/s390_ext.h>
43#include <asm/lowcore.h> 42#include <asm/lowcore.h>
44#include <asm/debug.h> 43#include <asm/debug.h>
45#include "entry.h" 44#include "entry.h"
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 5e8ead4b4aba..2d6228f60cd6 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -22,10 +22,10 @@
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/kprobes.h> 23#include <linux/kprobes.h>
24 24
25#include <asm/s390_ext.h>
26#include <asm/timer.h> 25#include <asm/timer.h>
27#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
28#include <asm/cputime.h> 27#include <asm/cputime.h>
28#include <asm/irq.h>
29 29
30static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 30static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
31 31
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 0f53110e1d09..a65229d91c92 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <asm/div64.h>
15 16
16void __delay(unsigned long loops) 17void __delay(unsigned long loops)
17{ 18{
@@ -116,3 +117,17 @@ void udelay_simple(unsigned long long usecs)
116 while (get_clock() < end) 117 while (get_clock() < end)
117 cpu_relax(); 118 cpu_relax();
118} 119}
120
121void __ndelay(unsigned long long nsecs)
122{
123 u64 end;
124
125 nsecs <<= 9;
126 do_div(nsecs, 125);
127 end = get_clock() + nsecs;
128 if (nsecs & ~0xfffUL)
129 __udelay(nsecs >> 12);
130 while (get_clock() < end)
131 barrier();
132}
133EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a0f9e730f26a..fe103e891e7a 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,7 +34,7 @@
34#include <asm/asm-offsets.h> 34#include <asm/asm-offsets.h>
35#include <asm/system.h> 35#include <asm/system.h>
36#include <asm/pgtable.h> 36#include <asm/pgtable.h>
37#include <asm/s390_ext.h> 37#include <asm/irq.h>
38#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
39#include <asm/compat.h> 39#include <asm/compat.h>
40#include "../kernel/entry.h" 40#include "../kernel/entry.h"
@@ -245,9 +245,12 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
245 do_no_context(regs, int_code, trans_exc_code); 245 do_no_context(regs, int_code, trans_exc_code);
246 break; 246 break;
247 default: /* fault & VM_FAULT_ERROR */ 247 default: /* fault & VM_FAULT_ERROR */
248 if (fault & VM_FAULT_OOM) 248 if (fault & VM_FAULT_OOM) {
249 pagefault_out_of_memory(); 249 if (!(regs->psw.mask & PSW_MASK_PSTATE))
250 else if (fault & VM_FAULT_SIGBUS) { 250 do_no_context(regs, int_code, trans_exc_code);
251 else
252 pagefault_out_of_memory();
253 } else if (fault & VM_FAULT_SIGBUS) {
251 /* Kernel mode? Handle exceptions or die */ 254 /* Kernel mode? Handle exceptions or die */
252 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 255 if (!(regs->psw.mask & PSW_MASK_PSTATE))
253 do_no_context(regs, int_code, trans_exc_code); 256 do_no_context(regs, int_code, trans_exc_code);
@@ -277,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access,
277 struct mm_struct *mm; 280 struct mm_struct *mm;
278 struct vm_area_struct *vma; 281 struct vm_area_struct *vma;
279 unsigned long address; 282 unsigned long address;
280 int fault, write; 283 unsigned int flags;
284 int fault;
281 285
282 if (notify_page_fault(regs)) 286 if (notify_page_fault(regs))
283 return 0; 287 return 0;
@@ -296,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access,
296 300
297 address = trans_exc_code & __FAIL_ADDR_MASK; 301 address = trans_exc_code & __FAIL_ADDR_MASK;
298 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
303 flags = FAULT_FLAG_ALLOW_RETRY;
304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305 flags |= FAULT_FLAG_WRITE;
306retry:
299 down_read(&mm->mmap_sem); 307 down_read(&mm->mmap_sem);
300 308
301 fault = VM_FAULT_BADMAP; 309 fault = VM_FAULT_BADMAP;
@@ -325,21 +333,31 @@ static inline int do_exception(struct pt_regs *regs, int access,
325 * make sure we exit gracefully rather than endlessly redo 333 * make sure we exit gracefully rather than endlessly redo
326 * the fault. 334 * the fault.
327 */ 335 */
328 write = (access == VM_WRITE || 336 fault = handle_mm_fault(mm, vma, address, flags);
329 (trans_exc_code & store_indication) == 0x400) ?
330 FAULT_FLAG_WRITE : 0;
331 fault = handle_mm_fault(mm, vma, address, write);
332 if (unlikely(fault & VM_FAULT_ERROR)) 337 if (unlikely(fault & VM_FAULT_ERROR))
333 goto out_up; 338 goto out_up;
334 339
335 if (fault & VM_FAULT_MAJOR) { 340 /*
336 tsk->maj_flt++; 341 * Major/minor page fault accounting is only done on the
337 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 342 * initial attempt. If we go through a retry, it is extremely
338 regs, address); 343 * likely that the page will be found in page cache at that point.
339 } else { 344 */
340 tsk->min_flt++; 345 if (flags & FAULT_FLAG_ALLOW_RETRY) {
341 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 346 if (fault & VM_FAULT_MAJOR) {
342 regs, address); 347 tsk->maj_flt++;
348 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
349 regs, address);
350 } else {
351 tsk->min_flt++;
352 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
353 regs, address);
354 }
355 if (fault & VM_FAULT_RETRY) {
356 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
357 * of starvation. */
358 flags &= ~FAULT_FLAG_ALLOW_RETRY;
359 goto retry;
360 }
343 } 361 }
344 /* 362 /*
345 * The instruction that caused the program check will 363 * The instruction that caused the program check will
@@ -429,10 +447,9 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
429 access = write ? VM_WRITE : VM_READ; 447 access = write ? VM_WRITE : VM_READ;
430 fault = do_exception(&regs, access, uaddr | 2); 448 fault = do_exception(&regs, access, uaddr | 2);
431 if (unlikely(fault)) { 449 if (unlikely(fault)) {
432 if (fault & VM_FAULT_OOM) { 450 if (fault & VM_FAULT_OOM)
433 pagefault_out_of_memory(); 451 return -EFAULT;
434 fault = 0; 452 else if (fault & VM_FAULT_SIGBUS)
435 } else if (fault & VM_FAULT_SIGBUS)
436 do_sigbus(&regs, pgm_int_code, uaddr); 453 do_sigbus(&regs, pgm_int_code, uaddr);
437 } 454 }
438 return fault ? -EFAULT : 0; 455 return fault ? -EFAULT : 0;
@@ -485,7 +502,6 @@ int pfault_init(void)
485 "2:\n" 502 "2:\n"
486 EX_TABLE(0b,1b) 503 EX_TABLE(0b,1b)
487 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); 504 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
488 __ctl_set_bit(0, 9);
489 return rc; 505 return rc;
490} 506}
491 507
@@ -500,7 +516,6 @@ void pfault_fini(void)
500 516
501 if (!MACHINE_IS_VM || pfault_disable) 517 if (!MACHINE_IS_VM || pfault_disable)
502 return; 518 return;
503 __ctl_clear_bit(0,9);
504 asm volatile( 519 asm volatile(
505 " diag %0,0,0x258\n" 520 " diag %0,0,0x258\n"
506 "0:\n" 521 "0:\n"
@@ -615,6 +630,7 @@ static int __init pfault_irq_init(void)
615 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; 630 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
616 if (rc) 631 if (rc)
617 goto out_pfault; 632 goto out_pfault;
633 service_subclass_irq_register();
618 hotcpu_notifier(pfault_cpu_notify, 0); 634 hotcpu_notifier(pfault_cpu_notify, 0);
619 return 0; 635 return 0;
620 636
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index dfefc2171691..59b663109d90 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -119,9 +119,7 @@ void __init paging_init(void)
119 sparse_memory_present_with_active_regions(MAX_NUMNODES); 119 sparse_memory_present_with_active_regions(MAX_NUMNODES);
120 sparse_init(); 120 sparse_init();
121 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 121 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
122#ifdef CONFIG_ZONE_DMA
123 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 122 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
124#endif
125 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 123 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
126 free_area_init_nodes(max_zone_pfns); 124 free_area_init_nodes(max_zone_pfns);
127 fault_init(); 125 fault_init();
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 8d4330642512..14c6fae6fe6b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -36,7 +36,6 @@ struct rcu_table_freelist {
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \ 36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long)) 37 / sizeof(unsigned long))
38 38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); 39static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 40
42static void __page_table_free(struct mm_struct *mm, unsigned long *table); 41static void __page_table_free(struct mm_struct *mm, unsigned long *table);
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 053caa0fd276..4552ce40c81a 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -19,7 +19,7 @@
19#include <linux/oprofile.h> 19#include <linux/oprofile.h>
20 20
21#include <asm/lowcore.h> 21#include <asm/lowcore.h>
22#include <asm/s390_ext.h> 22#include <asm/irq.h>
23 23
24#include "hwsampler.h" 24#include "hwsampler.h"
25 25
@@ -580,7 +580,7 @@ static int hws_cpu_callback(struct notifier_block *nfb,
580{ 580{
581 /* We do not have sampler space available for all possible CPUs. 581 /* We do not have sampler space available for all possible CPUs.
582 All CPUs should be online when hw sampling is activated. */ 582 All CPUs should be online when hw sampling is activated. */
583 return NOTIFY_BAD; 583 return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
584} 584}
585 585
586static struct notifier_block hws_cpu_notifier = { 586static struct notifier_block hws_cpu_notifier = {
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index e73bc781cc14..288add8d168f 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -43,9 +43,6 @@ config NO_DMA
43config RWSEM_GENERIC_SPINLOCK 43config RWSEM_GENERIC_SPINLOCK
44 def_bool y 44 def_bool y
45 45
46config GENERIC_FIND_NEXT_BIT
47 def_bool y
48
49config GENERIC_HWEIGHT 46config GENERIC_HWEIGHT
50 def_bool y 47 def_bool y
51 48
diff --git a/arch/score/Kconfig.debug b/arch/score/Kconfig.debug
index 451ed54ce646..a1f346df0a71 100644
--- a/arch/score/Kconfig.debug
+++ b/arch/score/Kconfig.debug
@@ -16,15 +16,6 @@ config CMDLINE
16 other cases you can specify kernel args so that you don't have 16 other cases you can specify kernel args so that you don't have
17 to set them up in board prom initialization routines. 17 to set them up in board prom initialization routines.
18 18
19config DEBUG_STACK_USAGE
20 bool "Enable stack utilization instrumentation"
21 depends on DEBUG_KERNEL
22 help
23 Enables the display of the minimum amount of free stack which each
24 task has ever had available in the sysrq-T and sysrq-P debug output.
25
26 This option will slow down process creation somewhat.
27
28config RUNTIME_DEBUG 19config RUNTIME_DEBUG
29 bool "Enable run-time debugging" 20 bool "Enable run-time debugging"
30 depends on DEBUG_KERNEL 21 depends on DEBUG_KERNEL
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index 50fdec54c70a..cee6bce1e30c 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -38,8 +38,6 @@
38#include <asm/sections.h> 38#include <asm/sections.h>
39#include <asm/tlb.h> 39#include <asm/tlb.h>
40 40
41DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42
43unsigned long empty_zero_page; 41unsigned long empty_zero_page;
44EXPORT_SYMBOL_GPL(empty_zero_page); 42EXPORT_SYMBOL_GPL(empty_zero_page);
45 43
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index b44e37753b9a..74495a5ea027 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -71,12 +71,6 @@ config GENERIC_CSUM
71 def_bool y 71 def_bool y
72 depends on SUPERH64 72 depends on SUPERH64
73 73
74config GENERIC_FIND_NEXT_BIT
75 def_bool y
76
77config GENERIC_FIND_BIT_LE
78 def_bool y
79
80config GENERIC_HWEIGHT 74config GENERIC_HWEIGHT
81 def_bool y 75 def_bool y
82 76
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 1553d56cf4e0..c1d5a820b1aa 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -28,15 +28,6 @@ config STACK_DEBUG
28 every function call and will therefore incur a major 28 every function call and will therefore incur a major
29 performance hit. Most users should say N. 29 performance hit. Most users should say N.
30 30
31config DEBUG_STACK_USAGE
32 bool "Stack utilization instrumentation"
33 depends on DEBUG_KERNEL
34 help
35 Enables the display of the minimum amount of free stack which each
36 task has ever had available in the sysrq-T and sysrq-P debug output.
37
38 This option will slow down process creation somewhat.
39
40config 4KSTACKS 31config 4KSTACKS
41 bool "Use 4Kb for kernel stacks instead of 8Kb" 32 bool "Use 4Kb for kernel stacks instead of 8Kb"
42 depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB 33 depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index 77ec0e7b8ddf..e7583484cc07 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -7,7 +7,6 @@ CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
8CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
9CONFIG_CGROUPS=y 9CONFIG_CGROUPS=y
10CONFIG_CGROUP_NS=y
11CONFIG_CGROUP_FREEZER=y 10CONFIG_CGROUP_FREEZER=y
12CONFIG_CGROUP_DEVICE=y 11CONFIG_CGROUP_DEVICE=y
13CONFIG_CGROUP_CPUACCT=y 12CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index c41650572d79..8a7dd7b59c5c 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -12,7 +12,6 @@ CONFIG_IKCONFIG=y
12CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
13CONFIG_CGROUPS=y 13CONFIG_CGROUPS=y
14CONFIG_CGROUP_DEBUG=y 14CONFIG_CGROUP_DEBUG=y
15CONFIG_CGROUP_NS=y
16CONFIG_CGROUP_FREEZER=y 15CONFIG_CGROUP_FREEZER=y
17CONFIG_CGROUP_DEVICE=y 16CONFIG_CGROUP_DEVICE=y
18CONFIG_CPUSETS=y 17CONFIG_CPUSETS=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index a468ff227fc6..72c3fad7383f 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -8,7 +8,6 @@ CONFIG_RCU_TRACE=y
8CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
9CONFIG_CGROUPS=y 9CONFIG_CGROUPS=y
10CONFIG_CGROUP_DEBUG=y 10CONFIG_CGROUP_DEBUG=y
11CONFIG_CGROUP_NS=y
12CONFIG_CGROUP_DEVICE=y 11CONFIG_CGROUP_DEVICE=y
13CONFIG_CGROUP_CPUACCT=y 12CONFIG_CGROUP_CPUACCT=y
14CONFIG_RESOURCE_COUNTERS=y 13CONFIG_RESOURCE_COUNTERS=y
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index 3f92d37c6374..6bb413036892 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG=y
9CONFIG_IKCONFIG_PROC=y 9CONFIG_IKCONFIG_PROC=y
10CONFIG_LOG_BUF_SHIFT=14 10CONFIG_LOG_BUF_SHIFT=14
11CONFIG_CGROUPS=y 11CONFIG_CGROUPS=y
12CONFIG_CGROUP_NS=y
13CONFIG_CGROUP_FREEZER=y 12CONFIG_CGROUP_FREEZER=y
14CONFIG_CGROUP_DEVICE=y 13CONFIG_CGROUP_DEVICE=y
15CONFIG_CGROUP_CPUACCT=y 14CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index 7b3daec6fefe..8bfa4d056d7a 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
9CONFIG_LOG_BUF_SHIFT=14 9CONFIG_LOG_BUF_SHIFT=14
10CONFIG_CGROUPS=y 10CONFIG_CGROUPS=y
11CONFIG_CGROUP_DEBUG=y 11CONFIG_CGROUP_DEBUG=y
12CONFIG_CGROUP_NS=y
13CONFIG_CGROUP_FREEZER=y 12CONFIG_CGROUP_FREEZER=y
14CONFIG_CGROUP_DEVICE=y 13CONFIG_CGROUP_DEVICE=y
15CONFIG_CPUSETS=y 14CONFIG_CPUSETS=y
diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h
index 4235e228d921..f3613952d1ae 100644
--- a/arch/sh/include/asm/kgdb.h
+++ b/arch/sh/include/asm/kgdb.h
@@ -34,5 +34,6 @@ static inline void arch_kgdb_breakpoint(void)
34 34
35#define CACHE_FLUSH_IS_SAFE 1 35#define CACHE_FLUSH_IS_SAFE 1
36#define BREAK_INSTR_SIZE 2 36#define BREAK_INSTR_SIZE 2
37#define GDB_ADJUSTS_BREAK_OFFSET
37 38
38#endif /* __ASM_SH_KGDB_H */ 39#endif /* __ASM_SH_KGDB_H */
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index de167d3a1a80..40725b4a8018 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -40,9 +40,8 @@
40#include <asm/system.h> 40#include <asm/system.h>
41 41
42#define user_mode(regs) (((regs)->sr & 0x40000000)==0) 42#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
43#define user_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
44#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) 43#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
45#define instruction_pointer(regs) ((unsigned long)(regs)->pc) 44#define GET_USP(regs) ((regs)->regs[15])
46 45
47extern void show_regs(struct pt_regs *); 46extern void show_regs(struct pt_regs *);
48 47
@@ -139,6 +138,9 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
139 138
140 return pc; 139 return pc;
141} 140}
141#define profile_pc profile_pc
142
143#include <asm-generic/ptrace.h>
142#endif /* __KERNEL__ */ 144#endif /* __KERNEL__ */
143 145
144#endif /* __ASM_SH_PTRACE_H */ 146#endif /* __ASM_SH_PTRACE_H */
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 75abb38dffd5..6c308d8b9a50 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -23,8 +23,6 @@ struct mmu_gather {
23 unsigned long start, end; 23 unsigned long start, end;
24}; 24};
25 25
26DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27
28static inline void init_tlb_gather(struct mmu_gather *tlb) 26static inline void init_tlb_gather(struct mmu_gather *tlb)
29{ 27{
30 tlb->start = TASK_SIZE; 28 tlb->start = TASK_SIZE;
@@ -36,17 +34,13 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36 } 34 }
37} 35}
38 36
39static inline struct mmu_gather * 37static inline void
40tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 38tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
41{ 39{
42 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
43
44 tlb->mm = mm; 40 tlb->mm = mm;
45 tlb->fullmm = full_mm_flush; 41 tlb->fullmm = full_mm_flush;
46 42
47 init_tlb_gather(tlb); 43 init_tlb_gather(tlb);
48
49 return tlb;
50} 44}
51 45
52static inline void 46static inline void
@@ -57,8 +51,6 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
57 51
58 /* keep the page table cache within bounds */ 52 /* keep the page table cache within bounds */
59 check_pgt_cache(); 53 check_pgt_cache();
60
61 put_cpu_var(mmu_gathers);
62} 54}
63 55
64static inline void 56static inline void
@@ -91,7 +83,21 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
91 } 83 }
92} 84}
93 85
94#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 86static inline void tlb_flush_mmu(struct mmu_gather *tlb)
87{
88}
89
90static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
91{
92 free_page_and_swap_cache(page);
93 return 1; /* avoid calling tlb_flush_mmu */
94}
95
96static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97{
98 __tlb_remove_page(tlb, page);
99}
100
95#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) 101#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
96#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 102#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
97#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) 103#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 0d3f912e3334..58a93fb3d965 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -28,7 +28,6 @@
28#include <asm/cache.h> 28#include <asm/cache.h>
29#include <asm/sizes.h> 29#include <asm/sizes.h>
30 30
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32pgd_t swapper_pg_dir[PTRS_PER_PGD]; 31pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 32
34void __init generic_mem_init(void) 33void __init generic_mem_init(void)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 63a027c9ada5..af32e17fa170 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -190,14 +190,6 @@ config RWSEM_XCHGADD_ALGORITHM
190 bool 190 bool
191 default y if SPARC64 191 default y if SPARC64
192 192
193config GENERIC_FIND_NEXT_BIT
194 bool
195 default y
196
197config GENERIC_FIND_BIT_LE
198 bool
199 default y
200
201config GENERIC_HWEIGHT 193config GENERIC_HWEIGHT
202 bool 194 bool
203 default y if !ULTRA_HAS_POPULATION_COUNT 195 default y if !ULTRA_HAS_POPULATION_COUNT
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index d9a795efbc04..6db35fba79fd 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -6,15 +6,6 @@ config TRACE_IRQFLAGS_SUPPORT
6 6
7source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
8 8
9config DEBUG_STACK_USAGE
10 bool "Enable stack utilization instrumentation"
11 depends on DEBUG_KERNEL
12 help
13 Enables the display of the minimum amount of free stack which each
14 task has ever had available in the sysrq-T and sysrq-P debug output.
15
16 This option will slow down process creation somewhat.
17
18config DEBUG_DCFLUSH 9config DEBUG_DCFLUSH
19 bool "D-cache flush debugging" 10 bool "D-cache flush debugging"
20 depends on SPARC64 && DEBUG_KERNEL 11 depends on SPARC64 && DEBUG_KERNEL
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 5bdfa2c6e400..4e5e0878144f 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -78,4 +78,7 @@ static inline void check_pgt_cache(void)
78 quicklist_trim(0, NULL, 25, 16); 78 quicklist_trim(0, NULL, 25, 16);
79} 79}
80 80
81#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
82#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
83
81#endif /* _SPARC64_PGALLOC_H */ 84#endif /* _SPARC64_PGALLOC_H */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index b77128c80524..1e03c5a6b4f7 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -655,9 +655,11 @@ static inline int pte_special(pte_t pte)
655#define pte_unmap(pte) do { } while (0) 655#define pte_unmap(pte) do { } while (0)
656 656
657/* Actual page table PTE updates. */ 657/* Actual page table PTE updates. */
658extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig); 658extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
659 pte_t *ptep, pte_t orig, int fullmm);
659 660
660static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 661static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
662 pte_t *ptep, pte_t pte, int fullmm)
661{ 663{
662 pte_t orig = *ptep; 664 pte_t orig = *ptep;
663 665
@@ -670,12 +672,19 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
670 * and SUN4V pte layout, so this inline test is fine. 672 * and SUN4V pte layout, so this inline test is fine.
671 */ 673 */
672 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) 674 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
673 tlb_batch_add(mm, addr, ptep, orig); 675 tlb_batch_add(mm, addr, ptep, orig, fullmm);
674} 676}
675 677
678#define set_pte_at(mm,addr,ptep,pte) \
679 __set_pte_at((mm), (addr), (ptep), (pte), 0)
680
676#define pte_clear(mm,addr,ptep) \ 681#define pte_clear(mm,addr,ptep) \
677 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 682 set_pte_at((mm), (addr), (ptep), __pte(0UL))
678 683
684#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
685#define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
686 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
687
679#ifdef DCACHE_ALIASING_POSSIBLE 688#ifdef DCACHE_ALIASING_POSSIBLE
680#define __HAVE_ARCH_MOVE_PTE 689#define __HAVE_ARCH_MOVE_PTE
681#define move_pte(pte, prot, old_addr, new_addr) \ 690#define move_pte(pte, prot, old_addr, new_addr) \
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index dca406b9b6fc..190e18913cc6 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -7,66 +7,11 @@
7#include <asm/tlbflush.h> 7#include <asm/tlbflush.h>
8#include <asm/mmu_context.h> 8#include <asm/mmu_context.h>
9 9
10#define TLB_BATCH_NR 192
11
12/*
13 * For UP we don't need to worry about TLB flush
14 * and page free order so much..
15 */
16#ifdef CONFIG_SMP
17 #define FREE_PTE_NR 506
18 #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
19#else
20 #define FREE_PTE_NR 1
21 #define tlb_fast_mode(bp) 1
22#endif
23
24struct mmu_gather {
25 struct mm_struct *mm;
26 unsigned int pages_nr;
27 unsigned int need_flush;
28 unsigned int fullmm;
29 unsigned int tlb_nr;
30 unsigned long vaddrs[TLB_BATCH_NR];
31 struct page *pages[FREE_PTE_NR];
32};
33
34DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
37extern void smp_flush_tlb_pending(struct mm_struct *, 11extern void smp_flush_tlb_pending(struct mm_struct *,
38 unsigned long, unsigned long *); 12 unsigned long, unsigned long *);
39#endif 13#endif
40 14
41extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
42extern void flush_tlb_pending(void);
43
44static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
45{
46 struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
47
48 BUG_ON(mp->tlb_nr);
49
50 mp->mm = mm;
51 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
52 mp->fullmm = full_mm_flush;
53
54 return mp;
55}
56
57
58static inline void tlb_flush_mmu(struct mmu_gather *mp)
59{
60 if (!mp->fullmm)
61 flush_tlb_pending();
62 if (mp->need_flush) {
63 free_pages_and_swap_cache(mp->pages, mp->pages_nr);
64 mp->pages_nr = 0;
65 mp->need_flush = 0;
66 }
67
68}
69
70#ifdef CONFIG_SMP 15#ifdef CONFIG_SMP
71extern void smp_flush_tlb_mm(struct mm_struct *mm); 16extern void smp_flush_tlb_mm(struct mm_struct *mm);
72#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) 17#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
@@ -74,38 +19,14 @@ extern void smp_flush_tlb_mm(struct mm_struct *mm);
74#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) 19#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
75#endif 20#endif
76 21
77static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end) 22extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
78{ 23extern void flush_tlb_pending(void);
79 tlb_flush_mmu(mp);
80
81 if (mp->fullmm)
82 mp->fullmm = 0;
83
84 /* keep the page table cache within bounds */
85 check_pgt_cache();
86
87 put_cpu_var(mmu_gathers);
88}
89
90static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
91{
92 if (tlb_fast_mode(mp)) {
93 free_page_and_swap_cache(page);
94 return;
95 }
96 mp->need_flush = 1;
97 mp->pages[mp->pages_nr++] = page;
98 if (mp->pages_nr >= FREE_PTE_NR)
99 tlb_flush_mmu(mp);
100}
101
102#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
103#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
104#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
105#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
106 24
107#define tlb_migrate_finish(mm) do { } while (0)
108#define tlb_start_vma(tlb, vma) do { } while (0) 25#define tlb_start_vma(tlb, vma) do { } while (0)
109#define tlb_end_vma(tlb, vma) do { } while (0) 26#define tlb_end_vma(tlb, vma) do { } while (0)
27#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
28#define tlb_flush(tlb) flush_tlb_pending()
29
30#include <asm-generic/tlb.h>
110 31
111#endif /* _SPARC64_TLB_H */ 32#endif /* _SPARC64_TLB_H */
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index fbb675dbe0c9..2ef463494153 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -5,9 +5,17 @@
5#include <asm/mmu_context.h> 5#include <asm/mmu_context.h>
6 6
7/* TSB flush operations. */ 7/* TSB flush operations. */
8struct mmu_gather; 8
9#define TLB_BATCH_NR 192
10
11struct tlb_batch {
12 struct mm_struct *mm;
13 unsigned long tlb_nr;
14 unsigned long vaddrs[TLB_BATCH_NR];
15};
16
9extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); 17extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
10extern void flush_tsb_user(struct mmu_gather *mp); 18extern void flush_tsb_user(struct tlb_batch *tb);
11 19
12/* TLB flush operations. */ 20/* TLB flush operations. */
13 21
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 3609bdee9ed2..3249d3f3234d 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -82,7 +82,7 @@ static void prom_sync_me(void)
82 "nop\n\t" : : "r" (&trapbase)); 82 "nop\n\t" : : "r" (&trapbase));
83 83
84 prom_printf("PROM SYNC COMMAND...\n"); 84 prom_printf("PROM SYNC COMMAND...\n");
85 show_free_areas(); 85 show_free_areas(0);
86 if(current->pid != 0) { 86 if(current->pid != 0) {
87 local_irq_enable(); 87 local_irq_enable();
88 sys_sync(); 88 sys_sync();
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 4c31e2b6e71b..ca217327e8d2 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -37,8 +37,6 @@
37#include <asm/prom.h> 37#include <asm/prom.h>
38#include <asm/leon.h> 38#include <asm/leon.h>
39 39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42unsigned long *sparc_valid_addr_bitmap; 40unsigned long *sparc_valid_addr_bitmap;
43EXPORT_SYMBOL(sparc_valid_addr_bitmap); 41EXPORT_SYMBOL(sparc_valid_addr_bitmap);
44 42
@@ -78,7 +76,7 @@ void __init kmap_init(void)
78void show_mem(unsigned int filter) 76void show_mem(unsigned int filter)
79{ 77{
80 printk("Mem-info:\n"); 78 printk("Mem-info:\n");
81 show_free_areas(); 79 show_free_areas(filter);
82 printk("Free swap: %6ldkB\n", 80 printk("Free swap: %6ldkB\n",
83 nr_swap_pages << (PAGE_SHIFT-10)); 81 nr_swap_pages << (PAGE_SHIFT-10));
84 printk("%ld pages of RAM\n", totalram_pages); 82 printk("%ld pages of RAM\n", totalram_pages);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index d8f21e24a82f..b1f279cd00bf 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -19,33 +19,34 @@
19 19
20/* Heavily inspired by the ppc64 code. */ 20/* Heavily inspired by the ppc64 code. */
21 21
22DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23 23
24void flush_tlb_pending(void) 24void flush_tlb_pending(void)
25{ 25{
26 struct mmu_gather *mp = &get_cpu_var(mmu_gathers); 26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 27
28 if (mp->tlb_nr) { 28 if (tb->tlb_nr) {
29 flush_tsb_user(mp); 29 flush_tsb_user(tb);
30 30
31 if (CTX_VALID(mp->mm->context)) { 31 if (CTX_VALID(tb->mm->context)) {
32#ifdef CONFIG_SMP 32#ifdef CONFIG_SMP
33 smp_flush_tlb_pending(mp->mm, mp->tlb_nr, 33 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34 &mp->vaddrs[0]); 34 &tb->vaddrs[0]);
35#else 35#else
36 __flush_tlb_pending(CTX_HWBITS(mp->mm->context), 36 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37 mp->tlb_nr, &mp->vaddrs[0]); 37 tb->tlb_nr, &tb->vaddrs[0]);
38#endif 38#endif
39 } 39 }
40 mp->tlb_nr = 0; 40 tb->tlb_nr = 0;
41 } 41 }
42 42
43 put_cpu_var(mmu_gathers); 43 put_cpu_var(tlb_batch);
44} 44}
45 45
46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) 46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
47 pte_t *ptep, pte_t orig, int fullmm)
47{ 48{
48 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); 49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
49 unsigned long nr; 50 unsigned long nr;
50 51
51 vaddr &= PAGE_MASK; 52 vaddr &= PAGE_MASK;
@@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
77 78
78no_cache_flush: 79no_cache_flush:
79 80
80 if (mp->fullmm) 81 if (fullmm) {
82 put_cpu_var(tlb_batch);
81 return; 83 return;
84 }
82 85
83 nr = mp->tlb_nr; 86 nr = tb->tlb_nr;
84 87
85 if (unlikely(nr != 0 && mm != mp->mm)) { 88 if (unlikely(nr != 0 && mm != tb->mm)) {
86 flush_tlb_pending(); 89 flush_tlb_pending();
87 nr = 0; 90 nr = 0;
88 } 91 }
89 92
90 if (nr == 0) 93 if (nr == 0)
91 mp->mm = mm; 94 tb->mm = mm;
92 95
93 mp->vaddrs[nr] = vaddr; 96 tb->vaddrs[nr] = vaddr;
94 mp->tlb_nr = ++nr; 97 tb->tlb_nr = ++nr;
95 if (nr >= TLB_BATCH_NR) 98 if (nr >= TLB_BATCH_NR)
96 flush_tlb_pending(); 99 flush_tlb_pending();
100
101 put_cpu_var(tlb_batch);
97} 102}
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 101d7c82870b..948461513499 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
47 } 47 }
48} 48}
49 49
50static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) 50static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
51 unsigned long tsb, unsigned long nentries)
51{ 52{
52 unsigned long i; 53 unsigned long i;
53 54
54 for (i = 0; i < mp->tlb_nr; i++) { 55 for (i = 0; i < tb->tlb_nr; i++) {
55 unsigned long v = mp->vaddrs[i]; 56 unsigned long v = tb->vaddrs[i];
56 unsigned long tag, ent, hash; 57 unsigned long tag, ent, hash;
57 58
58 v &= ~0x1UL; 59 v &= ~0x1UL;
@@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, uns
65 } 66 }
66} 67}
67 68
68void flush_tsb_user(struct mmu_gather *mp) 69void flush_tsb_user(struct tlb_batch *tb)
69{ 70{
70 struct mm_struct *mm = mp->mm; 71 struct mm_struct *mm = tb->mm;
71 unsigned long nentries, base, flags; 72 unsigned long nentries, base, flags;
72 73
73 spin_lock_irqsave(&mm->context.lock, flags); 74 spin_lock_irqsave(&mm->context.lock, flags);
@@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *mp)
76 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 77 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
77 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 78 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
78 base = __pa(base); 79 base = __pa(base);
79 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); 80 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
80 81
81#ifdef CONFIG_HUGETLB_PAGE 82#ifdef CONFIG_HUGETLB_PAGE
82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 83 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *mp)
84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 85 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
85 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 86 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
86 base = __pa(base); 87 base = __pa(base);
87 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); 88 __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
88 } 89 }
89#endif 90#endif
90 spin_unlock_irqrestore(&mm->context.lock, flags); 91 spin_unlock_irqrestore(&mm->context.lock, flags);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index e32b0c23c4c8..e1e50101b3bb 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -5,7 +5,6 @@ config TILE
5 def_bool y 5 def_bool y
6 select HAVE_KVM if !TILEGX 6 select HAVE_KVM if !TILEGX
7 select GENERIC_FIND_FIRST_BIT 7 select GENERIC_FIND_FIRST_BIT
8 select GENERIC_FIND_NEXT_BIT
9 select USE_GENERIC_SMP_HELPERS 8 select USE_GENERIC_SMP_HELPERS
10 select CC_OPTIMIZE_FOR_SIZE 9 select CC_OPTIMIZE_FOR_SIZE
11 select HAVE_GENERIC_HARDIRQS 10 select HAVE_GENERIC_HARDIRQS
@@ -339,6 +338,14 @@ config NO_IOPORT
339 338
340source "drivers/pci/Kconfig" 339source "drivers/pci/Kconfig"
341 340
341config HOTPLUG
342 bool "Support for hot-pluggable devices"
343 ---help---
344 Say Y here if you want to plug devices into your computer while
345 the system is running, and be able to use them quickly. In many
346 cases, the devices can likewise be unplugged at any time too.
347 One well-known example of this is USB.
348
342source "drivers/pci/hotplug/Kconfig" 349source "drivers/pci/hotplug/Kconfig"
343 350
344endmenu 351endmenu
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug
index 9bc161a02c71..ddbfc3322d7f 100644
--- a/arch/tile/Kconfig.debug
+++ b/arch/tile/Kconfig.debug
@@ -21,15 +21,6 @@ config DEBUG_STACKOVERFLOW
21 This option will cause messages to be printed if free stack space 21 This option will cause messages to be printed if free stack space
22 drops below a certain limit. 22 drops below a certain limit.
23 23
24config DEBUG_STACK_USAGE
25 bool "Stack utilization instrumentation"
26 depends on DEBUG_KERNEL
27 help
28 Enables the display of the minimum amount of free stack which each
29 task has ever had available in the sysrq-T and sysrq-P debug output.
30
31 This option will slow down process creation somewhat.
32
33config DEBUG_EXTRA_FLAGS 24config DEBUG_EXTRA_FLAGS
34 string "Additional compiler arguments when building with '-g'" 25 string "Additional compiler arguments when building with '-g'"
35 depends on DEBUG_INFO 26 depends on DEBUG_INFO
diff --git a/arch/tile/configs/tile_defconfig b/arch/tile/configs/tile_defconfig
deleted file mode 100644
index 0fe54445fda5..000000000000
--- a/arch/tile/configs/tile_defconfig
+++ /dev/null
@@ -1,71 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_BLK_DEV_INITRD=y
5CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
6CONFIG_EXPERT=y
7# CONFIG_COMPAT_BRK is not set
8CONFIG_PROFILING=y
9CONFIG_MODULES=y
10CONFIG_MODULE_UNLOAD=y
11# CONFIG_BLK_DEV_BSG is not set
12# CONFIG_IOSCHED_DEADLINE is not set
13# CONFIG_IOSCHED_CFQ is not set
14CONFIG_NO_HZ=y
15CONFIG_HIGH_RES_TIMERS=y
16CONFIG_HZ_100=y
17CONFIG_NET=y
18CONFIG_PACKET=y
19CONFIG_UNIX=y
20CONFIG_INET=y
21CONFIG_IP_MULTICAST=y
22# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
23# CONFIG_INET_XFRM_MODE_TUNNEL is not set
24# CONFIG_INET_LRO is not set
25# CONFIG_INET_DIAG is not set
26CONFIG_IPV6=y
27# CONFIG_WIRELESS is not set
28CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
29CONFIG_SCSI=y
30CONFIG_BLK_DEV_SD=y
31CONFIG_SCSI_CONSTANTS=y
32CONFIG_SCSI_LOGGING=y
33CONFIG_NETDEVICES=y
34CONFIG_TUN=y
35# CONFIG_NETDEV_10000 is not set
36# CONFIG_WLAN is not set
37# CONFIG_INPUT_MOUSEDEV is not set
38# CONFIG_INPUT_KEYBOARD is not set
39# CONFIG_INPUT_MOUSE is not set
40# CONFIG_SERIO is not set
41# CONFIG_VT is not set
42# CONFIG_LEGACY_PTYS is not set
43# CONFIG_HW_RANDOM is not set
44CONFIG_WATCHDOG=y
45CONFIG_WATCHDOG_NOWAYOUT=y
46# CONFIG_HID_SUPPORT is not set
47CONFIG_RTC_CLASS=y
48# CONFIG_RTC_INTF_SYSFS is not set
49# CONFIG_RTC_INTF_PROC is not set
50CONFIG_EXT2_FS=y
51CONFIG_EXT3_FS=y
52# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
53CONFIG_FUSE_FS=y
54CONFIG_MSDOS_FS=y
55CONFIG_VFAT_FS=m
56CONFIG_TMPFS=y
57CONFIG_HUGETLBFS=y
58CONFIG_NFS_FS=m
59CONFIG_NFS_V3=y
60CONFIG_NLS_CODEPAGE_437=y
61CONFIG_NLS_ISO8859_1=y
62CONFIG_FRAME_WARN=2048
63CONFIG_MAGIC_SYSRQ=y
64CONFIG_DEBUG_KERNEL=y
65CONFIG_DETECT_HUNG_TASK=y
66CONFIG_DEBUG_SPINLOCK_SLEEP=y
67CONFIG_DEBUG_INFO=y
68CONFIG_DEBUG_VM=y
69# CONFIG_RCU_CPU_STALL_DETECTOR is not set
70CONFIG_DEBUG_STACKOVERFLOW=y
71CONFIG_DEBUG_EXTRA_FLAGS="-femit-struct-debug-baseonly"
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
new file mode 100644
index 000000000000..09f1c7fad8bf
--- /dev/null
+++ b/arch/tile/configs/tilegx_defconfig
@@ -0,0 +1,1833 @@
1#
2# Automatically generated make config: don't edit
3# Linux/tilegx 2.6.39-rc5 Kernel Configuration
4# Wed May 4 11:08:04 2011
5#
6CONFIG_TILE=y
7CONFIG_MMU=y
8CONFIG_GENERIC_CSUM=y
9CONFIG_SEMAPHORE_SLEEPERS=y
10CONFIG_HAVE_ARCH_ALLOC_REMAP=y
11CONFIG_HAVE_SETUP_PER_CPU_AREA=y
12CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
13CONFIG_SYS_SUPPORTS_HUGETLBFS=y
14CONFIG_GENERIC_TIME=y
15CONFIG_GENERIC_CLOCKEVENTS=y
16CONFIG_RWSEM_GENERIC_SPINLOCK=y
17CONFIG_DEFAULT_MIGRATION_COST=10000000
18CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
19CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
20CONFIG_ARCH_DMA_ADDR_T_64BIT=y
21CONFIG_LOCKDEP_SUPPORT=y
22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
24CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
25CONFIG_TRACE_IRQFLAGS_SUPPORT=y
26CONFIG_STRICT_DEVMEM=y
27CONFIG_SMP=y
28# CONFIG_DEBUG_COPY_FROM_USER is not set
29CONFIG_HVC_TILE=y
30CONFIG_TILEGX=y
31CONFIG_64BIT=y
32CONFIG_ARCH_DEFCONFIG="arch/tile/configs/tilegx_defconfig"
33CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
34CONFIG_CONSTRUCTORS=y
35
36#
37# General setup
38#
39CONFIG_EXPERIMENTAL=y
40CONFIG_INIT_ENV_ARG_LIMIT=32
41CONFIG_CROSS_COMPILE=""
42CONFIG_LOCALVERSION=""
43# CONFIG_LOCALVERSION_AUTO is not set
44CONFIG_SWAP=y
45CONFIG_SYSVIPC=y
46CONFIG_SYSVIPC_SYSCTL=y
47CONFIG_POSIX_MQUEUE=y
48CONFIG_POSIX_MQUEUE_SYSCTL=y
49CONFIG_BSD_PROCESS_ACCT=y
50CONFIG_BSD_PROCESS_ACCT_V3=y
51# CONFIG_FHANDLE is not set
52CONFIG_TASKSTATS=y
53CONFIG_TASK_DELAY_ACCT=y
54CONFIG_TASK_XACCT=y
55CONFIG_TASK_IO_ACCOUNTING=y
56CONFIG_AUDIT=y
57CONFIG_HAVE_GENERIC_HARDIRQS=y
58
59#
60# IRQ subsystem
61#
62CONFIG_GENERIC_HARDIRQS=y
63CONFIG_GENERIC_IRQ_PROBE=y
64CONFIG_GENERIC_IRQ_SHOW=y
65CONFIG_GENERIC_PENDING_IRQ=y
66
67#
68# RCU Subsystem
69#
70CONFIG_TREE_RCU=y
71# CONFIG_PREEMPT_RCU is not set
72# CONFIG_RCU_TRACE is not set
73CONFIG_RCU_FANOUT=64
74# CONFIG_RCU_FANOUT_EXACT is not set
75# CONFIG_RCU_FAST_NO_HZ is not set
76# CONFIG_TREE_RCU_TRACE is not set
77# CONFIG_IKCONFIG is not set
78CONFIG_LOG_BUF_SHIFT=19
79CONFIG_CGROUPS=y
80CONFIG_CGROUP_DEBUG=y
81CONFIG_CGROUP_NS=y
82# CONFIG_CGROUP_FREEZER is not set
83CONFIG_CGROUP_DEVICE=y
84CONFIG_CPUSETS=y
85CONFIG_PROC_PID_CPUSET=y
86CONFIG_CGROUP_CPUACCT=y
87CONFIG_RESOURCE_COUNTERS=y
88CONFIG_CGROUP_MEM_RES_CTLR=y
89CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
90CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED=y
91CONFIG_CGROUP_SCHED=y
92CONFIG_FAIR_GROUP_SCHED=y
93CONFIG_RT_GROUP_SCHED=y
94CONFIG_BLK_CGROUP=y
95# CONFIG_DEBUG_BLK_CGROUP is not set
96CONFIG_NAMESPACES=y
97CONFIG_UTS_NS=y
98CONFIG_IPC_NS=y
99CONFIG_USER_NS=y
100CONFIG_PID_NS=y
101CONFIG_NET_NS=y
102# CONFIG_SCHED_AUTOGROUP is not set
103CONFIG_MM_OWNER=y
104# CONFIG_SYSFS_DEPRECATED is not set
105CONFIG_RELAY=y
106CONFIG_BLK_DEV_INITRD=y
107CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
108CONFIG_INITRAMFS_ROOT_UID=0
109CONFIG_INITRAMFS_ROOT_GID=0
110CONFIG_RD_GZIP=y
111# CONFIG_RD_BZIP2 is not set
112# CONFIG_RD_LZMA is not set
113# CONFIG_RD_XZ is not set
114# CONFIG_RD_LZO is not set
115CONFIG_INITRAMFS_COMPRESSION_NONE=y
116# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
117CONFIG_CC_OPTIMIZE_FOR_SIZE=y
118CONFIG_SYSCTL=y
119CONFIG_ANON_INODES=y
120CONFIG_EXPERT=y
121CONFIG_SYSCTL_SYSCALL=y
122CONFIG_KALLSYMS=y
123# CONFIG_KALLSYMS_ALL is not set
124# CONFIG_KALLSYMS_EXTRA_PASS is not set
125CONFIG_HOTPLUG=y
126CONFIG_PRINTK=y
127CONFIG_BUG=y
128CONFIG_ELF_CORE=y
129CONFIG_BASE_FULL=y
130CONFIG_FUTEX=y
131CONFIG_EPOLL=y
132CONFIG_SIGNALFD=y
133CONFIG_TIMERFD=y
134CONFIG_EVENTFD=y
135CONFIG_SHMEM=y
136CONFIG_AIO=y
137CONFIG_EMBEDDED=y
138
139#
140# Kernel Performance Events And Counters
141#
142CONFIG_VM_EVENT_COUNTERS=y
143CONFIG_PCI_QUIRKS=y
144CONFIG_SLUB_DEBUG=y
145# CONFIG_COMPAT_BRK is not set
146# CONFIG_SLAB is not set
147CONFIG_SLUB=y
148# CONFIG_SLOB is not set
149CONFIG_PROFILING=y
150CONFIG_USE_GENERIC_SMP_HELPERS=y
151
152#
153# GCOV-based kernel profiling
154#
155# CONFIG_GCOV_KERNEL is not set
156# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
157CONFIG_SLABINFO=y
158CONFIG_RT_MUTEXES=y
159CONFIG_BASE_SMALL=0
160CONFIG_MODULES=y
161CONFIG_MODULE_FORCE_LOAD=y
162CONFIG_MODULE_UNLOAD=y
163# CONFIG_MODULE_FORCE_UNLOAD is not set
164# CONFIG_MODVERSIONS is not set
165# CONFIG_MODULE_SRCVERSION_ALL is not set
166CONFIG_STOP_MACHINE=y
167CONFIG_BLOCK=y
168CONFIG_BLK_DEV_BSG=y
169CONFIG_BLK_DEV_INTEGRITY=y
170# CONFIG_BLK_DEV_THROTTLING is not set
171CONFIG_BLOCK_COMPAT=y
172
173#
174# IO Schedulers
175#
176CONFIG_IOSCHED_NOOP=y
177CONFIG_IOSCHED_DEADLINE=y
178CONFIG_IOSCHED_CFQ=y
179CONFIG_CFQ_GROUP_IOSCHED=y
180# CONFIG_DEFAULT_DEADLINE is not set
181CONFIG_DEFAULT_CFQ=y
182# CONFIG_DEFAULT_NOOP is not set
183CONFIG_DEFAULT_IOSCHED="cfq"
184CONFIG_PADATA=y
185# CONFIG_INLINE_SPIN_TRYLOCK is not set
186# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
187# CONFIG_INLINE_SPIN_LOCK is not set
188# CONFIG_INLINE_SPIN_LOCK_BH is not set
189# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
190# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
191CONFIG_INLINE_SPIN_UNLOCK=y
192# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
193CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
194# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
195# CONFIG_INLINE_READ_TRYLOCK is not set
196# CONFIG_INLINE_READ_LOCK is not set
197# CONFIG_INLINE_READ_LOCK_BH is not set
198# CONFIG_INLINE_READ_LOCK_IRQ is not set
199# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
200CONFIG_INLINE_READ_UNLOCK=y
201# CONFIG_INLINE_READ_UNLOCK_BH is not set
202CONFIG_INLINE_READ_UNLOCK_IRQ=y
203# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
204# CONFIG_INLINE_WRITE_TRYLOCK is not set
205# CONFIG_INLINE_WRITE_LOCK is not set
206# CONFIG_INLINE_WRITE_LOCK_BH is not set
207# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
208# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
209CONFIG_INLINE_WRITE_UNLOCK=y
210# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
211CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
212# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
213CONFIG_MUTEX_SPIN_ON_OWNER=y
214
215#
216# Tilera-specific configuration
217#
218CONFIG_NR_CPUS=100
219CONFIG_TICK_ONESHOT=y
220CONFIG_NO_HZ=y
221CONFIG_HIGH_RES_TIMERS=y
222CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
223CONFIG_HZ_100=y
224# CONFIG_HZ_250 is not set
225# CONFIG_HZ_300 is not set
226# CONFIG_HZ_1000 is not set
227CONFIG_HZ=100
228CONFIG_SCHED_HRTICK=y
229# CONFIG_KEXEC is not set
230CONFIG_COMPAT=y
231CONFIG_SYSVIPC_COMPAT=y
232# CONFIG_HIGHMEM is not set
233CONFIG_NUMA=y
234CONFIG_NODES_SHIFT=2
235CONFIG_PAGE_OFFSET=0xC0000000
236CONFIG_SELECT_MEMORY_MODEL=y
237CONFIG_DISCONTIGMEM_MANUAL=y
238CONFIG_DISCONTIGMEM=y
239CONFIG_FLAT_NODE_MEM_MAP=y
240CONFIG_NEED_MULTIPLE_NODES=y
241CONFIG_PAGEFLAGS_EXTENDED=y
242CONFIG_SPLIT_PTLOCK_CPUS=4
243# CONFIG_COMPACTION is not set
244CONFIG_MIGRATION=y
245CONFIG_PHYS_ADDR_T_64BIT=y
246CONFIG_ZONE_DMA_FLAG=0
247CONFIG_VIRT_TO_BUS=y
248# CONFIG_KSM is not set
249CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
250# CONFIG_CMDLINE_BOOL is not set
251CONFIG_VMALLOC_RESERVE=0x1000000
252CONFIG_HARDWALL=y
253CONFIG_KERNEL_PL=1
254
255#
256# Bus options
257#
258CONFIG_PCI=y
259CONFIG_PCI_DOMAINS=y
260# CONFIG_NO_IOMEM is not set
261# CONFIG_NO_IOPORT is not set
262# CONFIG_ARCH_SUPPORTS_MSI is not set
263CONFIG_PCI_DEBUG=y
264# CONFIG_PCI_STUB is not set
265# CONFIG_PCI_IOV is not set
266# CONFIG_HOTPLUG_PCI is not set
267
268#
269# Executable file formats
270#
271CONFIG_KCORE_ELF=y
272CONFIG_BINFMT_ELF=y
273CONFIG_COMPAT_BINFMT_ELF=y
274# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
275# CONFIG_HAVE_AOUT is not set
276CONFIG_BINFMT_MISC=y
277CONFIG_NET=y
278
279#
280# Networking options
281#
282CONFIG_PACKET=y
283CONFIG_UNIX=y
284CONFIG_XFRM=y
285CONFIG_XFRM_USER=y
286CONFIG_XFRM_SUB_POLICY=y
287CONFIG_XFRM_MIGRATE=y
288CONFIG_XFRM_STATISTICS=y
289CONFIG_XFRM_IPCOMP=m
290CONFIG_NET_KEY=m
291CONFIG_NET_KEY_MIGRATE=y
292CONFIG_INET=y
293CONFIG_IP_MULTICAST=y
294CONFIG_IP_ADVANCED_ROUTER=y
295# CONFIG_IP_FIB_TRIE_STATS is not set
296CONFIG_IP_MULTIPLE_TABLES=y
297CONFIG_IP_ROUTE_MULTIPATH=y
298CONFIG_IP_ROUTE_VERBOSE=y
299CONFIG_IP_ROUTE_CLASSID=y
300# CONFIG_IP_PNP is not set
301CONFIG_NET_IPIP=m
302# CONFIG_NET_IPGRE_DEMUX is not set
303CONFIG_IP_MROUTE=y
304# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
305CONFIG_IP_PIMSM_V1=y
306CONFIG_IP_PIMSM_V2=y
307# CONFIG_ARPD is not set
308CONFIG_SYN_COOKIES=y
309CONFIG_INET_AH=m
310CONFIG_INET_ESP=m
311CONFIG_INET_IPCOMP=m
312CONFIG_INET_XFRM_TUNNEL=m
313CONFIG_INET_TUNNEL=m
314CONFIG_INET_XFRM_MODE_TRANSPORT=m
315CONFIG_INET_XFRM_MODE_TUNNEL=m
316CONFIG_INET_XFRM_MODE_BEET=m
317CONFIG_INET_LRO=y
318CONFIG_INET_DIAG=m
319CONFIG_INET_TCP_DIAG=m
320CONFIG_TCP_CONG_ADVANCED=y
321CONFIG_TCP_CONG_BIC=m
322CONFIG_TCP_CONG_CUBIC=y
323CONFIG_TCP_CONG_WESTWOOD=m
324CONFIG_TCP_CONG_HTCP=m
325CONFIG_TCP_CONG_HSTCP=m
326CONFIG_TCP_CONG_HYBLA=m
327CONFIG_TCP_CONG_VEGAS=m
328CONFIG_TCP_CONG_SCALABLE=m
329CONFIG_TCP_CONG_LP=m
330CONFIG_TCP_CONG_VENO=m
331CONFIG_TCP_CONG_YEAH=m
332CONFIG_TCP_CONG_ILLINOIS=m
333CONFIG_DEFAULT_CUBIC=y
334# CONFIG_DEFAULT_RENO is not set
335CONFIG_DEFAULT_TCP_CONG="cubic"
336CONFIG_TCP_MD5SIG=y
337CONFIG_IPV6=y
338CONFIG_IPV6_PRIVACY=y
339CONFIG_IPV6_ROUTER_PREF=y
340CONFIG_IPV6_ROUTE_INFO=y
341CONFIG_IPV6_OPTIMISTIC_DAD=y
342CONFIG_INET6_AH=m
343CONFIG_INET6_ESP=m
344CONFIG_INET6_IPCOMP=m
345CONFIG_IPV6_MIP6=m
346CONFIG_INET6_XFRM_TUNNEL=m
347CONFIG_INET6_TUNNEL=m
348CONFIG_INET6_XFRM_MODE_TRANSPORT=m
349CONFIG_INET6_XFRM_MODE_TUNNEL=m
350CONFIG_INET6_XFRM_MODE_BEET=m
351CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
352CONFIG_IPV6_SIT=m
353# CONFIG_IPV6_SIT_6RD is not set
354CONFIG_IPV6_NDISC_NODETYPE=y
355CONFIG_IPV6_TUNNEL=m
356CONFIG_IPV6_MULTIPLE_TABLES=y
357# CONFIG_IPV6_SUBTREES is not set
358CONFIG_IPV6_MROUTE=y
359# CONFIG_IPV6_MROUTE_MULTIPLE_TABLES is not set
360CONFIG_IPV6_PIMSM_V2=y
361CONFIG_NETLABEL=y
362CONFIG_NETWORK_SECMARK=y
363# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
364CONFIG_NETFILTER=y
365# CONFIG_NETFILTER_DEBUG is not set
366CONFIG_NETFILTER_ADVANCED=y
367CONFIG_BRIDGE_NETFILTER=y
368
369#
370# Core Netfilter Configuration
371#
372CONFIG_NETFILTER_NETLINK=m
373CONFIG_NETFILTER_NETLINK_QUEUE=m
374CONFIG_NETFILTER_NETLINK_LOG=m
375CONFIG_NF_CONNTRACK=y
376CONFIG_NF_CONNTRACK_MARK=y
377CONFIG_NF_CONNTRACK_SECMARK=y
378CONFIG_NF_CONNTRACK_ZONES=y
379CONFIG_NF_CONNTRACK_EVENTS=y
380# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
381CONFIG_NF_CT_PROTO_DCCP=m
382CONFIG_NF_CT_PROTO_GRE=m
383CONFIG_NF_CT_PROTO_SCTP=m
384CONFIG_NF_CT_PROTO_UDPLITE=m
385CONFIG_NF_CONNTRACK_AMANDA=m
386CONFIG_NF_CONNTRACK_FTP=m
387CONFIG_NF_CONNTRACK_H323=m
388CONFIG_NF_CONNTRACK_IRC=m
389CONFIG_NF_CONNTRACK_BROADCAST=m
390CONFIG_NF_CONNTRACK_NETBIOS_NS=m
391# CONFIG_NF_CONNTRACK_SNMP is not set
392CONFIG_NF_CONNTRACK_PPTP=m
393CONFIG_NF_CONNTRACK_SANE=m
394CONFIG_NF_CONNTRACK_SIP=m
395CONFIG_NF_CONNTRACK_TFTP=m
396# CONFIG_NF_CT_NETLINK is not set
397CONFIG_NETFILTER_TPROXY=m
398CONFIG_NETFILTER_XTABLES=y
399
400#
401# Xtables combined modules
402#
403CONFIG_NETFILTER_XT_MARK=m
404CONFIG_NETFILTER_XT_CONNMARK=m
405
406#
407# Xtables targets
408#
409# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
410# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
411CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
412CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
413CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
414CONFIG_NETFILTER_XT_TARGET_CT=m
415CONFIG_NETFILTER_XT_TARGET_DSCP=m
416CONFIG_NETFILTER_XT_TARGET_HL=m
417CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
418CONFIG_NETFILTER_XT_TARGET_MARK=m
419CONFIG_NETFILTER_XT_TARGET_NFLOG=m
420CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
421CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
422CONFIG_NETFILTER_XT_TARGET_RATEEST=m
423CONFIG_NETFILTER_XT_TARGET_TEE=m
424CONFIG_NETFILTER_XT_TARGET_TPROXY=m
425CONFIG_NETFILTER_XT_TARGET_TRACE=m
426CONFIG_NETFILTER_XT_TARGET_SECMARK=m
427CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
428CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
429
430#
431# Xtables matches
432#
433# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
434CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
435CONFIG_NETFILTER_XT_MATCH_COMMENT=m
436CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
437CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
438CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
439CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
440# CONFIG_NETFILTER_XT_MATCH_CPU is not set
441CONFIG_NETFILTER_XT_MATCH_DCCP=m
442# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
443CONFIG_NETFILTER_XT_MATCH_DSCP=m
444CONFIG_NETFILTER_XT_MATCH_ESP=m
445CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
446CONFIG_NETFILTER_XT_MATCH_HELPER=m
447CONFIG_NETFILTER_XT_MATCH_HL=m
448CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
449CONFIG_NETFILTER_XT_MATCH_IPVS=m
450CONFIG_NETFILTER_XT_MATCH_LENGTH=m
451CONFIG_NETFILTER_XT_MATCH_LIMIT=m
452CONFIG_NETFILTER_XT_MATCH_MAC=m
453CONFIG_NETFILTER_XT_MATCH_MARK=m
454CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
455CONFIG_NETFILTER_XT_MATCH_OSF=m
456CONFIG_NETFILTER_XT_MATCH_OWNER=m
457CONFIG_NETFILTER_XT_MATCH_POLICY=m
458CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
459CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
460CONFIG_NETFILTER_XT_MATCH_QUOTA=m
461CONFIG_NETFILTER_XT_MATCH_RATEEST=m
462CONFIG_NETFILTER_XT_MATCH_REALM=m
463CONFIG_NETFILTER_XT_MATCH_RECENT=m
464CONFIG_NETFILTER_XT_MATCH_SCTP=m
465CONFIG_NETFILTER_XT_MATCH_SOCKET=m
466CONFIG_NETFILTER_XT_MATCH_STATE=y
467CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
468CONFIG_NETFILTER_XT_MATCH_STRING=m
469CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
470CONFIG_NETFILTER_XT_MATCH_TIME=m
471CONFIG_NETFILTER_XT_MATCH_U32=m
472# CONFIG_IP_SET is not set
473CONFIG_IP_VS=m
474CONFIG_IP_VS_IPV6=y
475# CONFIG_IP_VS_DEBUG is not set
476CONFIG_IP_VS_TAB_BITS=12
477
478#
479# IPVS transport protocol load balancing support
480#
481CONFIG_IP_VS_PROTO_TCP=y
482CONFIG_IP_VS_PROTO_UDP=y
483CONFIG_IP_VS_PROTO_AH_ESP=y
484CONFIG_IP_VS_PROTO_ESP=y
485CONFIG_IP_VS_PROTO_AH=y
486CONFIG_IP_VS_PROTO_SCTP=y
487
488#
489# IPVS scheduler
490#
491CONFIG_IP_VS_RR=m
492CONFIG_IP_VS_WRR=m
493CONFIG_IP_VS_LC=m
494CONFIG_IP_VS_WLC=m
495CONFIG_IP_VS_LBLC=m
496CONFIG_IP_VS_LBLCR=m
497# CONFIG_IP_VS_DH is not set
498# CONFIG_IP_VS_SH is not set
499CONFIG_IP_VS_SED=m
500CONFIG_IP_VS_NQ=m
501
502#
503# IPVS application helper
504#
505# CONFIG_IP_VS_NFCT is not set
506# CONFIG_IP_VS_PE_SIP is not set
507
508#
509# IP: Netfilter Configuration
510#
511CONFIG_NF_DEFRAG_IPV4=y
512CONFIG_NF_CONNTRACK_IPV4=y
513# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
514CONFIG_IP_NF_QUEUE=m
515CONFIG_IP_NF_IPTABLES=y
516CONFIG_IP_NF_MATCH_AH=m
517CONFIG_IP_NF_MATCH_ECN=m
518CONFIG_IP_NF_MATCH_TTL=m
519CONFIG_IP_NF_FILTER=y
520CONFIG_IP_NF_TARGET_REJECT=y
521CONFIG_IP_NF_TARGET_LOG=m
522CONFIG_IP_NF_TARGET_ULOG=m
523# CONFIG_NF_NAT is not set
524CONFIG_IP_NF_MANGLE=m
525# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
526CONFIG_IP_NF_TARGET_ECN=m
527CONFIG_IP_NF_TARGET_TTL=m
528CONFIG_IP_NF_RAW=m
529CONFIG_IP_NF_SECURITY=m
530CONFIG_IP_NF_ARPTABLES=m
531CONFIG_IP_NF_ARPFILTER=m
532CONFIG_IP_NF_ARP_MANGLE=m
533
534#
535# IPv6: Netfilter Configuration
536#
537CONFIG_NF_DEFRAG_IPV6=m
538CONFIG_NF_CONNTRACK_IPV6=m
539CONFIG_IP6_NF_QUEUE=m
540CONFIG_IP6_NF_IPTABLES=m
541CONFIG_IP6_NF_MATCH_AH=m
542CONFIG_IP6_NF_MATCH_EUI64=m
543CONFIG_IP6_NF_MATCH_FRAG=m
544CONFIG_IP6_NF_MATCH_OPTS=m
545CONFIG_IP6_NF_MATCH_HL=m
546CONFIG_IP6_NF_MATCH_IPV6HEADER=m
547CONFIG_IP6_NF_MATCH_MH=m
548CONFIG_IP6_NF_MATCH_RT=m
549CONFIG_IP6_NF_TARGET_HL=m
550CONFIG_IP6_NF_TARGET_LOG=m
551CONFIG_IP6_NF_FILTER=m
552CONFIG_IP6_NF_TARGET_REJECT=m
553CONFIG_IP6_NF_MANGLE=m
554CONFIG_IP6_NF_RAW=m
555CONFIG_IP6_NF_SECURITY=m
556CONFIG_BRIDGE_NF_EBTABLES=m
557CONFIG_BRIDGE_EBT_BROUTE=m
558CONFIG_BRIDGE_EBT_T_FILTER=m
559CONFIG_BRIDGE_EBT_T_NAT=m
560CONFIG_BRIDGE_EBT_802_3=m
561CONFIG_BRIDGE_EBT_AMONG=m
562CONFIG_BRIDGE_EBT_ARP=m
563CONFIG_BRIDGE_EBT_IP=m
564CONFIG_BRIDGE_EBT_IP6=m
565CONFIG_BRIDGE_EBT_LIMIT=m
566CONFIG_BRIDGE_EBT_MARK=m
567CONFIG_BRIDGE_EBT_PKTTYPE=m
568CONFIG_BRIDGE_EBT_STP=m
569CONFIG_BRIDGE_EBT_VLAN=m
570CONFIG_BRIDGE_EBT_ARPREPLY=m
571CONFIG_BRIDGE_EBT_DNAT=m
572CONFIG_BRIDGE_EBT_MARK_T=m
573CONFIG_BRIDGE_EBT_REDIRECT=m
574CONFIG_BRIDGE_EBT_SNAT=m
575CONFIG_BRIDGE_EBT_LOG=m
576CONFIG_BRIDGE_EBT_ULOG=m
577CONFIG_BRIDGE_EBT_NFLOG=m
578# CONFIG_IP_DCCP is not set
579CONFIG_IP_SCTP=m
580# CONFIG_SCTP_DBG_MSG is not set
581# CONFIG_SCTP_DBG_OBJCNT is not set
582# CONFIG_SCTP_HMAC_NONE is not set
583# CONFIG_SCTP_HMAC_SHA1 is not set
584CONFIG_SCTP_HMAC_MD5=y
585CONFIG_RDS=m
586CONFIG_RDS_TCP=m
587# CONFIG_RDS_DEBUG is not set
588# CONFIG_TIPC is not set
589# CONFIG_ATM is not set
590# CONFIG_L2TP is not set
591CONFIG_STP=m
592CONFIG_GARP=m
593CONFIG_BRIDGE=m
594CONFIG_BRIDGE_IGMP_SNOOPING=y
595CONFIG_NET_DSA=y
596CONFIG_NET_DSA_TAG_DSA=y
597CONFIG_NET_DSA_TAG_EDSA=y
598CONFIG_NET_DSA_TAG_TRAILER=y
599CONFIG_NET_DSA_MV88E6XXX=y
600CONFIG_NET_DSA_MV88E6060=y
601CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y
602CONFIG_NET_DSA_MV88E6131=y
603CONFIG_NET_DSA_MV88E6123_61_65=y
604CONFIG_VLAN_8021Q=m
605CONFIG_VLAN_8021Q_GVRP=y
606# CONFIG_DECNET is not set
607CONFIG_LLC=m
608# CONFIG_LLC2 is not set
609# CONFIG_IPX is not set
610# CONFIG_ATALK is not set
611# CONFIG_X25 is not set
612# CONFIG_LAPB is not set
613# CONFIG_ECONET is not set
614# CONFIG_WAN_ROUTER is not set
615CONFIG_PHONET=m
616# CONFIG_IEEE802154 is not set
617CONFIG_NET_SCHED=y
618
619#
620# Queueing/Scheduling
621#
622CONFIG_NET_SCH_CBQ=m
623CONFIG_NET_SCH_HTB=m
624CONFIG_NET_SCH_HFSC=m
625CONFIG_NET_SCH_PRIO=m
626CONFIG_NET_SCH_MULTIQ=m
627CONFIG_NET_SCH_RED=m
628# CONFIG_NET_SCH_SFB is not set
629CONFIG_NET_SCH_SFQ=m
630CONFIG_NET_SCH_TEQL=m
631CONFIG_NET_SCH_TBF=m
632CONFIG_NET_SCH_GRED=m
633CONFIG_NET_SCH_DSMARK=m
634CONFIG_NET_SCH_NETEM=m
635CONFIG_NET_SCH_DRR=m
636# CONFIG_NET_SCH_MQPRIO is not set
637# CONFIG_NET_SCH_CHOKE is not set
638CONFIG_NET_SCH_INGRESS=m
639
640#
641# Classification
642#
643CONFIG_NET_CLS=y
644CONFIG_NET_CLS_BASIC=m
645CONFIG_NET_CLS_TCINDEX=m
646CONFIG_NET_CLS_ROUTE4=m
647CONFIG_NET_CLS_FW=m
648CONFIG_NET_CLS_U32=m
649CONFIG_CLS_U32_PERF=y
650CONFIG_CLS_U32_MARK=y
651CONFIG_NET_CLS_RSVP=m
652CONFIG_NET_CLS_RSVP6=m
653CONFIG_NET_CLS_FLOW=m
654CONFIG_NET_CLS_CGROUP=y
655CONFIG_NET_EMATCH=y
656CONFIG_NET_EMATCH_STACK=32
657CONFIG_NET_EMATCH_CMP=m
658CONFIG_NET_EMATCH_NBYTE=m
659CONFIG_NET_EMATCH_U32=m
660CONFIG_NET_EMATCH_META=m
661CONFIG_NET_EMATCH_TEXT=m
662CONFIG_NET_CLS_ACT=y
663CONFIG_NET_ACT_POLICE=m
664CONFIG_NET_ACT_GACT=m
665CONFIG_GACT_PROB=y
666CONFIG_NET_ACT_MIRRED=m
667CONFIG_NET_ACT_IPT=m
668CONFIG_NET_ACT_NAT=m
669CONFIG_NET_ACT_PEDIT=m
670CONFIG_NET_ACT_SIMP=m
671CONFIG_NET_ACT_SKBEDIT=m
672# CONFIG_NET_ACT_CSUM is not set
673CONFIG_NET_CLS_IND=y
674CONFIG_NET_SCH_FIFO=y
675CONFIG_DCB=y
676CONFIG_DNS_RESOLVER=y
677# CONFIG_BATMAN_ADV is not set
678CONFIG_RPS=y
679CONFIG_RFS_ACCEL=y
680CONFIG_XPS=y
681
682#
683# Network testing
684#
685# CONFIG_NET_PKTGEN is not set
686# CONFIG_HAMRADIO is not set
687# CONFIG_CAN is not set
688# CONFIG_IRDA is not set
689# CONFIG_BT is not set
690# CONFIG_AF_RXRPC is not set
691CONFIG_FIB_RULES=y
692# CONFIG_WIRELESS is not set
693# CONFIG_WIMAX is not set
694# CONFIG_RFKILL is not set
695# CONFIG_NET_9P is not set
696# CONFIG_CAIF is not set
697# CONFIG_CEPH_LIB is not set
698
699#
700# Device Drivers
701#
702
703#
704# Generic Driver Options
705#
706CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
707CONFIG_DEVTMPFS=y
708CONFIG_DEVTMPFS_MOUNT=y
709CONFIG_STANDALONE=y
710CONFIG_PREVENT_FIRMWARE_BUILD=y
711CONFIG_FW_LOADER=y
712# CONFIG_FIRMWARE_IN_KERNEL is not set
713CONFIG_EXTRA_FIRMWARE=""
714# CONFIG_DEBUG_DRIVER is not set
715# CONFIG_DEBUG_DEVRES is not set
716# CONFIG_SYS_HYPERVISOR is not set
717CONFIG_CONNECTOR=y
718CONFIG_PROC_EVENTS=y
719# CONFIG_MTD is not set
720# CONFIG_PARPORT is not set
721CONFIG_BLK_DEV=y
722# CONFIG_BLK_CPQ_DA is not set
723# CONFIG_BLK_CPQ_CISS_DA is not set
724# CONFIG_BLK_DEV_DAC960 is not set
725# CONFIG_BLK_DEV_UMEM is not set
726# CONFIG_BLK_DEV_COW_COMMON is not set
727CONFIG_BLK_DEV_LOOP=y
728CONFIG_BLK_DEV_CRYPTOLOOP=m
729# CONFIG_BLK_DEV_DRBD is not set
730# CONFIG_BLK_DEV_NBD is not set
731CONFIG_BLK_DEV_SX8=m
732CONFIG_BLK_DEV_RAM=y
733CONFIG_BLK_DEV_RAM_COUNT=16
734CONFIG_BLK_DEV_RAM_SIZE=16384
735# CONFIG_BLK_DEV_XIP is not set
736# CONFIG_CDROM_PKTCDVD is not set
737CONFIG_ATA_OVER_ETH=y
738# CONFIG_BLK_DEV_RBD is not set
739# CONFIG_SENSORS_LIS3LV02D is not set
740CONFIG_MISC_DEVICES=y
741# CONFIG_AD525X_DPOT is not set
742# CONFIG_PHANTOM is not set
743# CONFIG_SGI_IOC4 is not set
744# CONFIG_TIFM_CORE is not set
745# CONFIG_ICS932S401 is not set
746# CONFIG_ENCLOSURE_SERVICES is not set
747# CONFIG_HP_ILO is not set
748# CONFIG_APDS9802ALS is not set
749# CONFIG_ISL29003 is not set
750# CONFIG_ISL29020 is not set
751# CONFIG_SENSORS_TSL2550 is not set
752# CONFIG_SENSORS_BH1780 is not set
753# CONFIG_SENSORS_BH1770 is not set
754# CONFIG_SENSORS_APDS990X is not set
755# CONFIG_HMC6352 is not set
756# CONFIG_DS1682 is not set
757# CONFIG_BMP085 is not set
758# CONFIG_PCH_PHUB is not set
759# CONFIG_C2PORT is not set
760
761#
762# EEPROM support
763#
764# CONFIG_EEPROM_AT24 is not set
765# CONFIG_EEPROM_LEGACY is not set
766# CONFIG_EEPROM_MAX6875 is not set
767# CONFIG_EEPROM_93CX6 is not set
768# CONFIG_CB710_CORE is not set
769
770#
771# Texas Instruments shared transport line discipline
772#
773# CONFIG_SENSORS_LIS3_I2C is not set
774
775#
776# SCSI device support
777#
778CONFIG_SCSI_MOD=m
779CONFIG_RAID_ATTRS=m
780CONFIG_SCSI=m
781CONFIG_SCSI_DMA=y
782CONFIG_SCSI_TGT=m
783# CONFIG_SCSI_NETLINK is not set
784CONFIG_SCSI_PROC_FS=y
785
786#
787# SCSI support type (disk, tape, CD-ROM)
788#
789CONFIG_BLK_DEV_SD=m
790# CONFIG_CHR_DEV_ST is not set
791# CONFIG_CHR_DEV_OSST is not set
792# CONFIG_BLK_DEV_SR is not set
793# CONFIG_CHR_DEV_SG is not set
794# CONFIG_CHR_DEV_SCH is not set
795# CONFIG_SCSI_MULTI_LUN is not set
796CONFIG_SCSI_CONSTANTS=y
797CONFIG_SCSI_LOGGING=y
798# CONFIG_SCSI_SCAN_ASYNC is not set
799CONFIG_SCSI_WAIT_SCAN=m
800
801#
802# SCSI Transports
803#
804# CONFIG_SCSI_SPI_ATTRS is not set
805# CONFIG_SCSI_FC_ATTRS is not set
806# CONFIG_SCSI_ISCSI_ATTRS is not set
807CONFIG_SCSI_SAS_ATTRS=m
808# CONFIG_SCSI_SAS_LIBSAS is not set
809# CONFIG_SCSI_SRP_ATTRS is not set
810CONFIG_SCSI_LOWLEVEL=y
811# CONFIG_ISCSI_TCP is not set
812# CONFIG_ISCSI_BOOT_SYSFS is not set
813# CONFIG_SCSI_CXGB3_ISCSI is not set
814# CONFIG_SCSI_CXGB4_ISCSI is not set
815# CONFIG_SCSI_BNX2_ISCSI is not set
816# CONFIG_SCSI_BNX2X_FCOE is not set
817# CONFIG_BE2ISCSI is not set
818# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
819# CONFIG_SCSI_HPSA is not set
820# CONFIG_SCSI_3W_9XXX is not set
821# CONFIG_SCSI_3W_SAS is not set
822# CONFIG_SCSI_ACARD is not set
823# CONFIG_SCSI_AACRAID is not set
824# CONFIG_SCSI_AIC7XXX is not set
825# CONFIG_SCSI_AIC7XXX_OLD is not set
826# CONFIG_SCSI_AIC79XX is not set
827# CONFIG_SCSI_AIC94XX is not set
828# CONFIG_SCSI_MVSAS is not set
829# CONFIG_SCSI_DPT_I2O is not set
830# CONFIG_SCSI_ADVANSYS is not set
831# CONFIG_SCSI_ARCMSR is not set
832# CONFIG_MEGARAID_NEWGEN is not set
833# CONFIG_MEGARAID_LEGACY is not set
834# CONFIG_MEGARAID_SAS is not set
835# CONFIG_SCSI_MPT2SAS is not set
836# CONFIG_SCSI_HPTIOP is not set
837# CONFIG_LIBFC is not set
838# CONFIG_LIBFCOE is not set
839# CONFIG_FCOE is not set
840# CONFIG_SCSI_DMX3191D is not set
841# CONFIG_SCSI_FUTURE_DOMAIN is not set
842# CONFIG_SCSI_IPS is not set
843# CONFIG_SCSI_INITIO is not set
844# CONFIG_SCSI_INIA100 is not set
845# CONFIG_SCSI_STEX is not set
846# CONFIG_SCSI_SYM53C8XX_2 is not set
847# CONFIG_SCSI_IPR is not set
848# CONFIG_SCSI_QLOGIC_1280 is not set
849# CONFIG_SCSI_QLA_FC is not set
850# CONFIG_SCSI_QLA_ISCSI is not set
851# CONFIG_SCSI_LPFC is not set
852# CONFIG_SCSI_DC395x is not set
853# CONFIG_SCSI_DC390T is not set
854# CONFIG_SCSI_DEBUG is not set
855# CONFIG_SCSI_PMCRAID is not set
856# CONFIG_SCSI_PM8001 is not set
857# CONFIG_SCSI_SRP is not set
858# CONFIG_SCSI_BFA_FC is not set
859# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
860# CONFIG_SCSI_DH is not set
861# CONFIG_SCSI_OSD_INITIATOR is not set
862CONFIG_ATA=m
863# CONFIG_ATA_NONSTANDARD is not set
864CONFIG_ATA_VERBOSE_ERROR=y
865CONFIG_SATA_PMP=y
866
867#
868# Controllers with non-SFF native interface
869#
870# CONFIG_SATA_AHCI is not set
871# CONFIG_SATA_AHCI_PLATFORM is not set
872# CONFIG_SATA_INIC162X is not set
873# CONFIG_SATA_ACARD_AHCI is not set
874CONFIG_SATA_SIL24=m
875CONFIG_ATA_SFF=y
876
877#
878# SFF controllers with custom DMA interface
879#
880# CONFIG_PDC_ADMA is not set
881# CONFIG_SATA_QSTOR is not set
882# CONFIG_SATA_SX4 is not set
883CONFIG_ATA_BMDMA=y
884
885#
886# SATA SFF controllers with BMDMA
887#
888# CONFIG_ATA_PIIX is not set
889# CONFIG_SATA_MV is not set
890# CONFIG_SATA_NV is not set
891# CONFIG_SATA_PROMISE is not set
892# CONFIG_SATA_SIL is not set
893# CONFIG_SATA_SIS is not set
894# CONFIG_SATA_SVW is not set
895# CONFIG_SATA_ULI is not set
896# CONFIG_SATA_VIA is not set
897# CONFIG_SATA_VITESSE is not set
898
899#
900# PATA SFF controllers with BMDMA
901#
902# CONFIG_PATA_ALI is not set
903# CONFIG_PATA_AMD is not set
904# CONFIG_PATA_ARASAN_CF is not set
905# CONFIG_PATA_ARTOP is not set
906# CONFIG_PATA_ATIIXP is not set
907# CONFIG_PATA_ATP867X is not set
908# CONFIG_PATA_CMD64X is not set
909# CONFIG_PATA_CS5520 is not set
910# CONFIG_PATA_CS5530 is not set
911# CONFIG_PATA_CS5536 is not set
912# CONFIG_PATA_CYPRESS is not set
913# CONFIG_PATA_EFAR is not set
914# CONFIG_PATA_HPT366 is not set
915# CONFIG_PATA_HPT37X is not set
916# CONFIG_PATA_HPT3X2N is not set
917# CONFIG_PATA_HPT3X3 is not set
918# CONFIG_PATA_IT8213 is not set
919# CONFIG_PATA_IT821X is not set
920# CONFIG_PATA_JMICRON is not set
921# CONFIG_PATA_MARVELL is not set
922# CONFIG_PATA_NETCELL is not set
923# CONFIG_PATA_NINJA32 is not set
924# CONFIG_PATA_NS87415 is not set
925# CONFIG_PATA_OLDPIIX is not set
926# CONFIG_PATA_OPTIDMA is not set
927# CONFIG_PATA_PDC2027X is not set
928# CONFIG_PATA_PDC_OLD is not set
929# CONFIG_PATA_RADISYS is not set
930# CONFIG_PATA_RDC is not set
931# CONFIG_PATA_SC1200 is not set
932# CONFIG_PATA_SCH is not set
933# CONFIG_PATA_SERVERWORKS is not set
934# CONFIG_PATA_SIL680 is not set
935# CONFIG_PATA_SIS is not set
936# CONFIG_PATA_TOSHIBA is not set
937# CONFIG_PATA_TRIFLEX is not set
938# CONFIG_PATA_VIA is not set
939# CONFIG_PATA_WINBOND is not set
940
941#
942# PIO-only SFF controllers
943#
944# CONFIG_PATA_CMD640_PCI is not set
945# CONFIG_PATA_MPIIX is not set
946# CONFIG_PATA_NS87410 is not set
947# CONFIG_PATA_OPTI is not set
948# CONFIG_PATA_PLATFORM is not set
949# CONFIG_PATA_RZ1000 is not set
950
951#
952# Generic fallback / legacy drivers
953#
954# CONFIG_ATA_GENERIC is not set
955# CONFIG_PATA_LEGACY is not set
956CONFIG_MD=y
957CONFIG_BLK_DEV_MD=y
958CONFIG_MD_AUTODETECT=y
959CONFIG_MD_LINEAR=m
960CONFIG_MD_RAID0=m
961CONFIG_MD_RAID1=m
962CONFIG_MD_RAID10=m
963CONFIG_MD_RAID456=m
964CONFIG_MULTICORE_RAID456=y
965# CONFIG_MD_MULTIPATH is not set
966CONFIG_MD_FAULTY=m
967CONFIG_BLK_DEV_DM=m
968CONFIG_DM_DEBUG=y
969CONFIG_DM_CRYPT=m
970CONFIG_DM_SNAPSHOT=m
971CONFIG_DM_MIRROR=m
972# CONFIG_DM_RAID is not set
973CONFIG_DM_LOG_USERSPACE=m
974CONFIG_DM_ZERO=m
975CONFIG_DM_MULTIPATH=m
976CONFIG_DM_MULTIPATH_QL=m
977CONFIG_DM_MULTIPATH_ST=m
978CONFIG_DM_DELAY=m
979CONFIG_DM_UEVENT=y
980# CONFIG_DM_FLAKEY is not set
981# CONFIG_TARGET_CORE is not set
982# CONFIG_FUSION is not set
983
984#
985# IEEE 1394 (FireWire) support
986#
987# CONFIG_FIREWIRE is not set
988# CONFIG_FIREWIRE_NOSY is not set
989# CONFIG_I2O is not set
990CONFIG_NETDEVICES=y
991CONFIG_IFB=m
992CONFIG_DUMMY=m
993CONFIG_BONDING=m
994CONFIG_MACVLAN=m
995CONFIG_MACVTAP=m
996# CONFIG_EQUALIZER is not set
997CONFIG_TUN=y
998CONFIG_VETH=m
999# CONFIG_ARCNET is not set
1000# CONFIG_MII is not set
1001CONFIG_PHYLIB=y
1002
1003#
1004# MII PHY device drivers
1005#
1006# CONFIG_MARVELL_PHY is not set
1007# CONFIG_DAVICOM_PHY is not set
1008# CONFIG_QSEMI_PHY is not set
1009# CONFIG_LXT_PHY is not set
1010# CONFIG_CICADA_PHY is not set
1011# CONFIG_VITESSE_PHY is not set
1012# CONFIG_SMSC_PHY is not set
1013# CONFIG_BROADCOM_PHY is not set
1014# CONFIG_BCM63XX_PHY is not set
1015# CONFIG_ICPLUS_PHY is not set
1016# CONFIG_REALTEK_PHY is not set
1017# CONFIG_NATIONAL_PHY is not set
1018# CONFIG_STE10XP is not set
1019# CONFIG_LSI_ET1011C_PHY is not set
1020# CONFIG_MICREL_PHY is not set
1021# CONFIG_FIXED_PHY is not set
1022# CONFIG_MDIO_BITBANG is not set
1023# CONFIG_NET_ETHERNET is not set
1024CONFIG_NETDEV_1000=y
1025# CONFIG_ACENIC is not set
1026# CONFIG_DL2K is not set
1027# CONFIG_E1000 is not set
1028CONFIG_E1000E=m
1029# CONFIG_IP1000 is not set
1030# CONFIG_IGB is not set
1031# CONFIG_IGBVF is not set
1032# CONFIG_NS83820 is not set
1033# CONFIG_HAMACHI is not set
1034# CONFIG_YELLOWFIN is not set
1035# CONFIG_R8169 is not set
1036# CONFIG_SIS190 is not set
1037# CONFIG_SKGE is not set
1038# CONFIG_SKY2 is not set
1039# CONFIG_VIA_VELOCITY is not set
1040# CONFIG_TIGON3 is not set
1041# CONFIG_BNX2 is not set
1042# CONFIG_CNIC is not set
1043# CONFIG_QLA3XXX is not set
1044# CONFIG_ATL1 is not set
1045# CONFIG_ATL1E is not set
1046# CONFIG_ATL1C is not set
1047# CONFIG_JME is not set
1048# CONFIG_STMMAC_ETH is not set
1049# CONFIG_PCH_GBE is not set
1050# CONFIG_NETDEV_10000 is not set
1051# CONFIG_TR is not set
1052# CONFIG_WLAN is not set
1053
1054#
1055# Enable WiMAX (Networking options) to see the WiMAX drivers
1056#
1057# CONFIG_WAN is not set
1058
1059#
1060# CAIF transport drivers
1061#
1062# CONFIG_TILE_NET is not set
1063# CONFIG_FDDI is not set
1064# CONFIG_HIPPI is not set
1065# CONFIG_PPP is not set
1066# CONFIG_SLIP is not set
1067# CONFIG_NET_FC is not set
1068# CONFIG_NETCONSOLE is not set
1069# CONFIG_NETPOLL is not set
1070# CONFIG_NET_POLL_CONTROLLER is not set
1071# CONFIG_VMXNET3 is not set
1072# CONFIG_ISDN is not set
1073# CONFIG_PHONE is not set
1074
1075#
1076# Input device support
1077#
1078CONFIG_INPUT=y
1079# CONFIG_INPUT_FF_MEMLESS is not set
1080# CONFIG_INPUT_POLLDEV is not set
1081# CONFIG_INPUT_SPARSEKMAP is not set
1082
1083#
1084# Userland interfaces
1085#
1086# CONFIG_INPUT_MOUSEDEV is not set
1087# CONFIG_INPUT_JOYDEV is not set
1088# CONFIG_INPUT_EVDEV is not set
1089# CONFIG_INPUT_EVBUG is not set
1090
1091#
1092# Input Device Drivers
1093#
1094# CONFIG_INPUT_KEYBOARD is not set
1095# CONFIG_INPUT_MOUSE is not set
1096# CONFIG_INPUT_JOYSTICK is not set
1097# CONFIG_INPUT_TABLET is not set
1098# CONFIG_INPUT_TOUCHSCREEN is not set
1099# CONFIG_INPUT_MISC is not set
1100
1101#
1102# Hardware I/O ports
1103#
1104# CONFIG_SERIO is not set
1105# CONFIG_GAMEPORT is not set
1106
1107#
1108# Character devices
1109#
1110# CONFIG_VT is not set
1111CONFIG_UNIX98_PTYS=y
1112# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1113# CONFIG_LEGACY_PTYS is not set
1114# CONFIG_SERIAL_NONSTANDARD is not set
1115# CONFIG_NOZOMI is not set
1116# CONFIG_N_GSM is not set
1117CONFIG_DEVKMEM=y
1118
1119#
1120# Serial drivers
1121#
1122# CONFIG_SERIAL_8250 is not set
1123
1124#
1125# Non-8250 serial port support
1126#
1127# CONFIG_SERIAL_MFD_HSU is not set
1128# CONFIG_SERIAL_JSM is not set
1129# CONFIG_SERIAL_TIMBERDALE is not set
1130# CONFIG_SERIAL_ALTERA_JTAGUART is not set
1131# CONFIG_SERIAL_ALTERA_UART is not set
1132# CONFIG_SERIAL_PCH_UART is not set
1133# CONFIG_TTY_PRINTK is not set
1134CONFIG_HVC_DRIVER=y
1135# CONFIG_IPMI_HANDLER is not set
1136CONFIG_HW_RANDOM=y
1137CONFIG_HW_RANDOM_TIMERIOMEM=m
1138# CONFIG_R3964 is not set
1139# CONFIG_APPLICOM is not set
1140
1141#
1142# PCMCIA character devices
1143#
1144# CONFIG_RAW_DRIVER is not set
1145# CONFIG_TCG_TPM is not set
1146CONFIG_DEVPORT=y
1147# CONFIG_RAMOOPS is not set
1148CONFIG_I2C=y
1149CONFIG_I2C_BOARDINFO=y
1150CONFIG_I2C_COMPAT=y
1151CONFIG_I2C_CHARDEV=y
1152# CONFIG_I2C_MUX is not set
1153CONFIG_I2C_HELPER_AUTO=y
1154
1155#
1156# I2C Hardware Bus support
1157#
1158
1159#
1160# PC SMBus host controller drivers
1161#
1162# CONFIG_I2C_ALI1535 is not set
1163# CONFIG_I2C_ALI1563 is not set
1164# CONFIG_I2C_ALI15X3 is not set
1165# CONFIG_I2C_AMD756 is not set
1166# CONFIG_I2C_AMD8111 is not set
1167# CONFIG_I2C_I801 is not set
1168# CONFIG_I2C_ISCH is not set
1169# CONFIG_I2C_PIIX4 is not set
1170# CONFIG_I2C_NFORCE2 is not set
1171# CONFIG_I2C_SIS5595 is not set
1172# CONFIG_I2C_SIS630 is not set
1173# CONFIG_I2C_SIS96X is not set
1174# CONFIG_I2C_VIA is not set
1175# CONFIG_I2C_VIAPRO is not set
1176
1177#
1178# I2C system bus drivers (mostly embedded / system-on-chip)
1179#
1180# CONFIG_I2C_INTEL_MID is not set
1181# CONFIG_I2C_OCORES is not set
1182# CONFIG_I2C_PCA_PLATFORM is not set
1183# CONFIG_I2C_PXA_PCI is not set
1184# CONFIG_I2C_SIMTEC is not set
1185# CONFIG_I2C_XILINX is not set
1186# CONFIG_I2C_EG20T is not set
1187
1188#
1189# External I2C/SMBus adapter drivers
1190#
1191# CONFIG_I2C_PARPORT_LIGHT is not set
1192# CONFIG_I2C_TAOS_EVM is not set
1193
1194#
1195# Other I2C/SMBus bus drivers
1196#
1197# CONFIG_I2C_STUB is not set
1198# CONFIG_I2C_DEBUG_CORE is not set
1199# CONFIG_I2C_DEBUG_ALGO is not set
1200# CONFIG_I2C_DEBUG_BUS is not set
1201# CONFIG_SPI is not set
1202
1203#
1204# PPS support
1205#
1206# CONFIG_PPS is not set
1207
1208#
1209# PPS generators support
1210#
1211# CONFIG_W1 is not set
1212# CONFIG_POWER_SUPPLY is not set
1213# CONFIG_HWMON is not set
1214# CONFIG_THERMAL is not set
1215# CONFIG_WATCHDOG is not set
1216CONFIG_SSB_POSSIBLE=y
1217
1218#
1219# Sonics Silicon Backplane
1220#
1221# CONFIG_SSB is not set
1222CONFIG_MFD_SUPPORT=y
1223# CONFIG_MFD_CORE is not set
1224# CONFIG_MFD_88PM860X is not set
1225# CONFIG_MFD_SM501 is not set
1226# CONFIG_HTC_PASIC3 is not set
1227# CONFIG_TPS6105X is not set
1228# CONFIG_TPS6507X is not set
1229# CONFIG_TWL4030_CORE is not set
1230# CONFIG_MFD_STMPE is not set
1231# CONFIG_MFD_TC3589X is not set
1232# CONFIG_MFD_TMIO is not set
1233# CONFIG_PMIC_DA903X is not set
1234# CONFIG_PMIC_ADP5520 is not set
1235# CONFIG_MFD_MAX8925 is not set
1236# CONFIG_MFD_MAX8997 is not set
1237# CONFIG_MFD_MAX8998 is not set
1238# CONFIG_MFD_WM8400 is not set
1239# CONFIG_MFD_WM831X_I2C is not set
1240# CONFIG_MFD_WM8350_I2C is not set
1241# CONFIG_MFD_WM8994 is not set
1242# CONFIG_MFD_PCF50633 is not set
1243# CONFIG_ABX500_CORE is not set
1244# CONFIG_LPC_SCH is not set
1245# CONFIG_MFD_RDC321X is not set
1246# CONFIG_MFD_JANZ_CMODIO is not set
1247# CONFIG_MFD_VX855 is not set
1248# CONFIG_MFD_WL1273_CORE is not set
1249# CONFIG_REGULATOR is not set
1250# CONFIG_MEDIA_SUPPORT is not set
1251
1252#
1253# Graphics support
1254#
1255# CONFIG_VGA_ARB is not set
1256# CONFIG_DRM is not set
1257# CONFIG_STUB_POULSBO is not set
1258# CONFIG_VGASTATE is not set
1259# CONFIG_VIDEO_OUTPUT_CONTROL is not set
1260# CONFIG_FB is not set
1261# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
1262
1263#
1264# Display device support
1265#
1266# CONFIG_DISPLAY_SUPPORT is not set
1267# CONFIG_SOUND is not set
1268# CONFIG_HID_SUPPORT is not set
1269# CONFIG_USB_SUPPORT is not set
1270# CONFIG_UWB is not set
1271# CONFIG_MMC is not set
1272# CONFIG_MEMSTICK is not set
1273# CONFIG_NEW_LEDS is not set
1274# CONFIG_NFC_DEVICES is not set
1275# CONFIG_ACCESSIBILITY is not set
1276# CONFIG_INFINIBAND is not set
1277# CONFIG_EDAC is not set
1278CONFIG_RTC_LIB=y
1279CONFIG_RTC_CLASS=y
1280CONFIG_RTC_HCTOSYS=y
1281CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1282# CONFIG_RTC_DEBUG is not set
1283
1284#
1285# RTC interfaces
1286#
1287CONFIG_RTC_INTF_SYSFS=y
1288CONFIG_RTC_INTF_PROC=y
1289CONFIG_RTC_INTF_DEV=y
1290# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1291# CONFIG_RTC_DRV_TEST is not set
1292
1293#
1294# I2C RTC drivers
1295#
1296# CONFIG_RTC_DRV_DS1307 is not set
1297# CONFIG_RTC_DRV_DS1374 is not set
1298# CONFIG_RTC_DRV_DS1672 is not set
1299# CONFIG_RTC_DRV_DS3232 is not set
1300# CONFIG_RTC_DRV_MAX6900 is not set
1301# CONFIG_RTC_DRV_RS5C372 is not set
1302# CONFIG_RTC_DRV_ISL1208 is not set
1303# CONFIG_RTC_DRV_ISL12022 is not set
1304# CONFIG_RTC_DRV_X1205 is not set
1305# CONFIG_RTC_DRV_PCF8563 is not set
1306# CONFIG_RTC_DRV_PCF8583 is not set
1307# CONFIG_RTC_DRV_M41T80 is not set
1308# CONFIG_RTC_DRV_BQ32K is not set
1309# CONFIG_RTC_DRV_S35390A is not set
1310# CONFIG_RTC_DRV_FM3130 is not set
1311# CONFIG_RTC_DRV_RX8581 is not set
1312# CONFIG_RTC_DRV_RX8025 is not set
1313
1314#
1315# SPI RTC drivers
1316#
1317
1318#
1319# Platform RTC drivers
1320#
1321# CONFIG_RTC_DRV_DS1286 is not set
1322# CONFIG_RTC_DRV_DS1511 is not set
1323# CONFIG_RTC_DRV_DS1553 is not set
1324# CONFIG_RTC_DRV_DS1742 is not set
1325# CONFIG_RTC_DRV_STK17TA8 is not set
1326# CONFIG_RTC_DRV_M48T86 is not set
1327# CONFIG_RTC_DRV_M48T35 is not set
1328# CONFIG_RTC_DRV_M48T59 is not set
1329# CONFIG_RTC_DRV_MSM6242 is not set
1330# CONFIG_RTC_DRV_BQ4802 is not set
1331# CONFIG_RTC_DRV_RP5C01 is not set
1332# CONFIG_RTC_DRV_V3020 is not set
1333
1334#
1335# on-CPU RTC drivers
1336#
1337CONFIG_RTC_DRV_TILE=y
1338# CONFIG_DMADEVICES is not set
1339# CONFIG_AUXDISPLAY is not set
1340# CONFIG_UIO is not set
1341# CONFIG_STAGING is not set
1342
1343#
1344# File systems
1345#
1346CONFIG_EXT2_FS=y
1347CONFIG_EXT2_FS_XATTR=y
1348CONFIG_EXT2_FS_POSIX_ACL=y
1349CONFIG_EXT2_FS_SECURITY=y
1350CONFIG_EXT2_FS_XIP=y
1351CONFIG_EXT3_FS=y
1352CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
1353CONFIG_EXT3_FS_XATTR=y
1354CONFIG_EXT3_FS_POSIX_ACL=y
1355CONFIG_EXT3_FS_SECURITY=y
1356CONFIG_EXT4_FS=y
1357CONFIG_EXT4_FS_XATTR=y
1358CONFIG_EXT4_FS_POSIX_ACL=y
1359CONFIG_EXT4_FS_SECURITY=y
1360# CONFIG_EXT4_DEBUG is not set
1361CONFIG_FS_XIP=y
1362CONFIG_JBD=y
1363# CONFIG_JBD_DEBUG is not set
1364CONFIG_JBD2=y
1365CONFIG_JBD2_DEBUG=y
1366CONFIG_FS_MBCACHE=y
1367# CONFIG_REISERFS_FS is not set
1368# CONFIG_JFS_FS is not set
1369CONFIG_XFS_FS=m
1370CONFIG_XFS_QUOTA=y
1371CONFIG_XFS_POSIX_ACL=y
1372# CONFIG_XFS_RT is not set
1373# CONFIG_XFS_DEBUG is not set
1374CONFIG_GFS2_FS=m
1375CONFIG_GFS2_FS_LOCKING_DLM=y
1376# CONFIG_OCFS2_FS is not set
1377CONFIG_BTRFS_FS=m
1378CONFIG_BTRFS_FS_POSIX_ACL=y
1379# CONFIG_NILFS2_FS is not set
1380CONFIG_FS_POSIX_ACL=y
1381CONFIG_EXPORTFS=y
1382CONFIG_FILE_LOCKING=y
1383CONFIG_FSNOTIFY=y
1384CONFIG_DNOTIFY=y
1385CONFIG_INOTIFY_USER=y
1386# CONFIG_FANOTIFY is not set
1387CONFIG_QUOTA=y
1388CONFIG_QUOTA_NETLINK_INTERFACE=y
1389# CONFIG_PRINT_QUOTA_WARNING is not set
1390# CONFIG_QUOTA_DEBUG is not set
1391CONFIG_QUOTA_TREE=y
1392# CONFIG_QFMT_V1 is not set
1393CONFIG_QFMT_V2=y
1394CONFIG_QUOTACTL=y
1395# CONFIG_AUTOFS4_FS is not set
1396CONFIG_FUSE_FS=y
1397CONFIG_CUSE=m
1398CONFIG_GENERIC_ACL=y
1399
1400#
1401# Caches
1402#
1403CONFIG_FSCACHE=m
1404CONFIG_FSCACHE_STATS=y
1405# CONFIG_FSCACHE_HISTOGRAM is not set
1406# CONFIG_FSCACHE_DEBUG is not set
1407# CONFIG_FSCACHE_OBJECT_LIST is not set
1408CONFIG_CACHEFILES=m
1409# CONFIG_CACHEFILES_DEBUG is not set
1410# CONFIG_CACHEFILES_HISTOGRAM is not set
1411
1412#
1413# CD-ROM/DVD Filesystems
1414#
1415CONFIG_ISO9660_FS=m
1416CONFIG_JOLIET=y
1417CONFIG_ZISOFS=y
1418CONFIG_UDF_FS=m
1419CONFIG_UDF_NLS=y
1420
1421#
1422# DOS/FAT/NT Filesystems
1423#
1424CONFIG_FAT_FS=m
1425CONFIG_MSDOS_FS=m
1426CONFIG_VFAT_FS=m
1427CONFIG_FAT_DEFAULT_CODEPAGE=437
1428CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
1429# CONFIG_NTFS_FS is not set
1430
1431#
1432# Pseudo filesystems
1433#
1434CONFIG_PROC_FS=y
1435CONFIG_PROC_KCORE=y
1436CONFIG_PROC_SYSCTL=y
1437CONFIG_PROC_PAGE_MONITOR=y
1438CONFIG_SYSFS=y
1439CONFIG_TMPFS=y
1440CONFIG_TMPFS_POSIX_ACL=y
1441CONFIG_HUGETLBFS=y
1442CONFIG_HUGETLB_PAGE=y
1443CONFIG_CONFIGFS_FS=m
1444CONFIG_MISC_FILESYSTEMS=y
1445# CONFIG_ADFS_FS is not set
1446# CONFIG_AFFS_FS is not set
1447CONFIG_ECRYPT_FS=m
1448# CONFIG_HFS_FS is not set
1449# CONFIG_HFSPLUS_FS is not set
1450# CONFIG_BEFS_FS is not set
1451# CONFIG_BFS_FS is not set
1452# CONFIG_EFS_FS is not set
1453# CONFIG_LOGFS is not set
1454CONFIG_CRAMFS=m
1455CONFIG_SQUASHFS=m
1456# CONFIG_SQUASHFS_XATTR is not set
1457# CONFIG_SQUASHFS_LZO is not set
1458# CONFIG_SQUASHFS_XZ is not set
1459# CONFIG_SQUASHFS_EMBEDDED is not set
1460CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
1461# CONFIG_VXFS_FS is not set
1462# CONFIG_MINIX_FS is not set
1463# CONFIG_OMFS_FS is not set
1464# CONFIG_HPFS_FS is not set
1465# CONFIG_QNX4FS_FS is not set
1466# CONFIG_ROMFS_FS is not set
1467# CONFIG_PSTORE is not set
1468# CONFIG_SYSV_FS is not set
1469# CONFIG_UFS_FS is not set
1470CONFIG_NETWORK_FILESYSTEMS=y
1471CONFIG_NFS_FS=m
1472CONFIG_NFS_V3=y
1473CONFIG_NFS_V3_ACL=y
1474CONFIG_NFS_V4=y
1475CONFIG_NFS_V4_1=y
1476CONFIG_PNFS_FILE_LAYOUT=m
1477CONFIG_NFS_FSCACHE=y
1478# CONFIG_NFS_USE_LEGACY_DNS is not set
1479CONFIG_NFS_USE_KERNEL_DNS=y
1480# CONFIG_NFS_USE_NEW_IDMAPPER is not set
1481CONFIG_NFSD=m
1482CONFIG_NFSD_DEPRECATED=y
1483CONFIG_NFSD_V2_ACL=y
1484CONFIG_NFSD_V3=y
1485CONFIG_NFSD_V3_ACL=y
1486CONFIG_NFSD_V4=y
1487CONFIG_LOCKD=m
1488CONFIG_LOCKD_V4=y
1489CONFIG_NFS_ACL_SUPPORT=m
1490CONFIG_NFS_COMMON=y
1491CONFIG_SUNRPC=m
1492CONFIG_SUNRPC_GSS=m
1493CONFIG_RPCSEC_GSS_KRB5=m
1494# CONFIG_CEPH_FS is not set
1495CONFIG_CIFS=m
1496CONFIG_CIFS_STATS=y
1497# CONFIG_CIFS_STATS2 is not set
1498CONFIG_CIFS_WEAK_PW_HASH=y
1499CONFIG_CIFS_UPCALL=y
1500CONFIG_CIFS_XATTR=y
1501CONFIG_CIFS_POSIX=y
1502# CONFIG_CIFS_DEBUG2 is not set
1503CONFIG_CIFS_DFS_UPCALL=y
1504CONFIG_CIFS_FSCACHE=y
1505# CONFIG_CIFS_ACL is not set
1506CONFIG_CIFS_EXPERIMENTAL=y
1507# CONFIG_NCP_FS is not set
1508# CONFIG_CODA_FS is not set
1509# CONFIG_AFS_FS is not set
1510
1511#
1512# Partition Types
1513#
1514CONFIG_PARTITION_ADVANCED=y
1515# CONFIG_ACORN_PARTITION is not set
1516CONFIG_OSF_PARTITION=y
1517CONFIG_AMIGA_PARTITION=y
1518# CONFIG_ATARI_PARTITION is not set
1519CONFIG_MAC_PARTITION=y
1520CONFIG_MSDOS_PARTITION=y
1521CONFIG_BSD_DISKLABEL=y
1522CONFIG_MINIX_SUBPARTITION=y
1523CONFIG_SOLARIS_X86_PARTITION=y
1524CONFIG_UNIXWARE_DISKLABEL=y
1525# CONFIG_LDM_PARTITION is not set
1526CONFIG_SGI_PARTITION=y
1527# CONFIG_ULTRIX_PARTITION is not set
1528CONFIG_SUN_PARTITION=y
1529CONFIG_KARMA_PARTITION=y
1530CONFIG_EFI_PARTITION=y
1531# CONFIG_SYSV68_PARTITION is not set
1532CONFIG_NLS=y
1533CONFIG_NLS_DEFAULT="utf8"
1534CONFIG_NLS_CODEPAGE_437=y
1535CONFIG_NLS_CODEPAGE_737=m
1536CONFIG_NLS_CODEPAGE_775=m
1537CONFIG_NLS_CODEPAGE_850=m
1538CONFIG_NLS_CODEPAGE_852=m
1539CONFIG_NLS_CODEPAGE_855=m
1540CONFIG_NLS_CODEPAGE_857=m
1541CONFIG_NLS_CODEPAGE_860=m
1542CONFIG_NLS_CODEPAGE_861=m
1543CONFIG_NLS_CODEPAGE_862=m
1544CONFIG_NLS_CODEPAGE_863=m
1545CONFIG_NLS_CODEPAGE_864=m
1546CONFIG_NLS_CODEPAGE_865=m
1547CONFIG_NLS_CODEPAGE_866=m
1548CONFIG_NLS_CODEPAGE_869=m
1549CONFIG_NLS_CODEPAGE_936=m
1550CONFIG_NLS_CODEPAGE_950=m
1551CONFIG_NLS_CODEPAGE_932=m
1552CONFIG_NLS_CODEPAGE_949=m
1553CONFIG_NLS_CODEPAGE_874=m
1554CONFIG_NLS_ISO8859_8=m
1555CONFIG_NLS_CODEPAGE_1250=m
1556CONFIG_NLS_CODEPAGE_1251=m
1557CONFIG_NLS_ASCII=y
1558CONFIG_NLS_ISO8859_1=m
1559CONFIG_NLS_ISO8859_2=m
1560CONFIG_NLS_ISO8859_3=m
1561CONFIG_NLS_ISO8859_4=m
1562CONFIG_NLS_ISO8859_5=m
1563CONFIG_NLS_ISO8859_6=m
1564CONFIG_NLS_ISO8859_7=m
1565CONFIG_NLS_ISO8859_9=m
1566CONFIG_NLS_ISO8859_13=m
1567CONFIG_NLS_ISO8859_14=m
1568CONFIG_NLS_ISO8859_15=m
1569CONFIG_NLS_KOI8_R=m
1570CONFIG_NLS_KOI8_U=m
1571CONFIG_NLS_UTF8=m
1572CONFIG_DLM=m
1573CONFIG_DLM_DEBUG=y
1574
1575#
1576# Kernel hacking
1577#
1578# CONFIG_PRINTK_TIME is not set
1579CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
1580# CONFIG_ENABLE_WARN_DEPRECATED is not set
1581CONFIG_ENABLE_MUST_CHECK=y
1582CONFIG_FRAME_WARN=2048
1583CONFIG_MAGIC_SYSRQ=y
1584CONFIG_STRIP_ASM_SYMS=y
1585# CONFIG_UNUSED_SYMBOLS is not set
1586CONFIG_DEBUG_FS=y
1587CONFIG_HEADERS_CHECK=y
1588# CONFIG_DEBUG_SECTION_MISMATCH is not set
1589CONFIG_DEBUG_KERNEL=y
1590CONFIG_DEBUG_SHIRQ=y
1591CONFIG_LOCKUP_DETECTOR=y
1592# CONFIG_HARDLOCKUP_DETECTOR is not set
1593# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
1594CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
1595# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1596CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1597CONFIG_DETECT_HUNG_TASK=y
1598# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1599CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1600CONFIG_SCHED_DEBUG=y
1601CONFIG_SCHEDSTATS=y
1602CONFIG_TIMER_STATS=y
1603# CONFIG_DEBUG_OBJECTS is not set
1604# CONFIG_SLUB_DEBUG_ON is not set
1605# CONFIG_SLUB_STATS is not set
1606# CONFIG_DEBUG_KMEMLEAK is not set
1607# CONFIG_DEBUG_RT_MUTEXES is not set
1608# CONFIG_RT_MUTEX_TESTER is not set
1609# CONFIG_DEBUG_SPINLOCK is not set
1610# CONFIG_DEBUG_MUTEXES is not set
1611# CONFIG_DEBUG_LOCK_ALLOC is not set
1612# CONFIG_PROVE_LOCKING is not set
1613# CONFIG_SPARSE_RCU_POINTER is not set
1614# CONFIG_LOCK_STAT is not set
1615CONFIG_DEBUG_SPINLOCK_SLEEP=y
1616# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1617CONFIG_STACKTRACE=y
1618# CONFIG_DEBUG_KOBJECT is not set
1619CONFIG_DEBUG_INFO=y
1620CONFIG_DEBUG_INFO_REDUCED=y
1621CONFIG_DEBUG_VM=y
1622# CONFIG_DEBUG_WRITECOUNT is not set
1623CONFIG_DEBUG_MEMORY_INIT=y
1624CONFIG_DEBUG_LIST=y
1625# CONFIG_TEST_LIST_SORT is not set
1626# CONFIG_DEBUG_SG is not set
1627# CONFIG_DEBUG_NOTIFIERS is not set
1628CONFIG_DEBUG_CREDENTIALS=y
1629# CONFIG_RCU_TORTURE_TEST is not set
1630# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1631# CONFIG_BACKTRACE_SELF_TEST is not set
1632# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1633CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
1634# CONFIG_LKDTM is not set
1635# CONFIG_FAULT_INJECTION is not set
1636# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1637# CONFIG_DEBUG_PAGEALLOC is not set
1638CONFIG_TRACING_SUPPORT=y
1639CONFIG_FTRACE=y
1640# CONFIG_IRQSOFF_TRACER is not set
1641# CONFIG_SCHED_TRACER is not set
1642# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1643CONFIG_BRANCH_PROFILE_NONE=y
1644# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1645# CONFIG_PROFILE_ALL_BRANCHES is not set
1646# CONFIG_BLK_DEV_IO_TRACE is not set
1647# CONFIG_BUILD_DOCSRC is not set
1648CONFIG_DYNAMIC_DEBUG=y
1649# CONFIG_ATOMIC64_SELFTEST is not set
1650CONFIG_ASYNC_RAID6_TEST=m
1651# CONFIG_SAMPLES is not set
1652# CONFIG_TEST_KSTRTOX is not set
1653CONFIG_EARLY_PRINTK=y
1654CONFIG_DEBUG_STACKOVERFLOW=y
1655# CONFIG_DEBUG_STACK_USAGE is not set
1656CONFIG_DEBUG_EXTRA_FLAGS=""
1657
1658#
1659# Security options
1660#
1661CONFIG_KEYS=y
1662CONFIG_KEYS_DEBUG_PROC_KEYS=y
1663# CONFIG_SECURITY_DMESG_RESTRICT is not set
1664CONFIG_SECURITY=y
1665CONFIG_SECURITYFS=y
1666CONFIG_SECURITY_NETWORK=y
1667CONFIG_SECURITY_NETWORK_XFRM=y
1668# CONFIG_SECURITY_PATH is not set
1669CONFIG_LSM_MMAP_MIN_ADDR=65536
1670CONFIG_SECURITY_SELINUX=y
1671CONFIG_SECURITY_SELINUX_BOOTPARAM=y
1672CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
1673CONFIG_SECURITY_SELINUX_DISABLE=y
1674CONFIG_SECURITY_SELINUX_DEVELOP=y
1675CONFIG_SECURITY_SELINUX_AVC_STATS=y
1676CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
1677# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
1678# CONFIG_SECURITY_SMACK is not set
1679# CONFIG_SECURITY_TOMOYO is not set
1680# CONFIG_SECURITY_APPARMOR is not set
1681# CONFIG_IMA is not set
1682CONFIG_DEFAULT_SECURITY_SELINUX=y
1683# CONFIG_DEFAULT_SECURITY_DAC is not set
1684CONFIG_DEFAULT_SECURITY="selinux"
1685CONFIG_XOR_BLOCKS=m
1686CONFIG_ASYNC_CORE=m
1687CONFIG_ASYNC_MEMCPY=m
1688CONFIG_ASYNC_XOR=m
1689CONFIG_ASYNC_PQ=m
1690CONFIG_ASYNC_RAID6_RECOV=m
1691CONFIG_CRYPTO=y
1692
1693#
1694# Crypto core or helper
1695#
1696CONFIG_CRYPTO_ALGAPI=y
1697CONFIG_CRYPTO_ALGAPI2=y
1698CONFIG_CRYPTO_AEAD=m
1699CONFIG_CRYPTO_AEAD2=y
1700CONFIG_CRYPTO_BLKCIPHER=m
1701CONFIG_CRYPTO_BLKCIPHER2=y
1702CONFIG_CRYPTO_HASH=y
1703CONFIG_CRYPTO_HASH2=y
1704CONFIG_CRYPTO_RNG=m
1705CONFIG_CRYPTO_RNG2=y
1706CONFIG_CRYPTO_PCOMP=m
1707CONFIG_CRYPTO_PCOMP2=y
1708CONFIG_CRYPTO_MANAGER=y
1709CONFIG_CRYPTO_MANAGER2=y
1710CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
1711CONFIG_CRYPTO_GF128MUL=m
1712CONFIG_CRYPTO_NULL=m
1713CONFIG_CRYPTO_PCRYPT=m
1714CONFIG_CRYPTO_WORKQUEUE=y
1715CONFIG_CRYPTO_CRYPTD=m
1716CONFIG_CRYPTO_AUTHENC=m
1717CONFIG_CRYPTO_TEST=m
1718
1719#
1720# Authenticated Encryption with Associated Data
1721#
1722CONFIG_CRYPTO_CCM=m
1723CONFIG_CRYPTO_GCM=m
1724CONFIG_CRYPTO_SEQIV=m
1725
1726#
1727# Block modes
1728#
1729CONFIG_CRYPTO_CBC=m
1730CONFIG_CRYPTO_CTR=m
1731CONFIG_CRYPTO_CTS=m
1732CONFIG_CRYPTO_ECB=m
1733CONFIG_CRYPTO_LRW=m
1734CONFIG_CRYPTO_PCBC=m
1735CONFIG_CRYPTO_XTS=m
1736
1737#
1738# Hash modes
1739#
1740CONFIG_CRYPTO_HMAC=y
1741CONFIG_CRYPTO_XCBC=m
1742CONFIG_CRYPTO_VMAC=m
1743
1744#
1745# Digest
1746#
1747CONFIG_CRYPTO_CRC32C=y
1748CONFIG_CRYPTO_GHASH=m
1749CONFIG_CRYPTO_MD4=m
1750CONFIG_CRYPTO_MD5=y
1751CONFIG_CRYPTO_MICHAEL_MIC=m
1752CONFIG_CRYPTO_RMD128=m
1753CONFIG_CRYPTO_RMD160=m
1754CONFIG_CRYPTO_RMD256=m
1755CONFIG_CRYPTO_RMD320=m
1756CONFIG_CRYPTO_SHA1=y
1757CONFIG_CRYPTO_SHA256=m
1758CONFIG_CRYPTO_SHA512=m
1759CONFIG_CRYPTO_TGR192=m
1760CONFIG_CRYPTO_WP512=m
1761
1762#
1763# Ciphers
1764#
1765CONFIG_CRYPTO_AES=m
1766CONFIG_CRYPTO_ANUBIS=m
1767CONFIG_CRYPTO_ARC4=m
1768CONFIG_CRYPTO_BLOWFISH=m
1769CONFIG_CRYPTO_CAMELLIA=m
1770CONFIG_CRYPTO_CAST5=m
1771CONFIG_CRYPTO_CAST6=m
1772CONFIG_CRYPTO_DES=m
1773CONFIG_CRYPTO_FCRYPT=m
1774CONFIG_CRYPTO_KHAZAD=m
1775# CONFIG_CRYPTO_SALSA20 is not set
1776CONFIG_CRYPTO_SEED=m
1777CONFIG_CRYPTO_SERPENT=m
1778CONFIG_CRYPTO_TEA=m
1779CONFIG_CRYPTO_TWOFISH=m
1780CONFIG_CRYPTO_TWOFISH_COMMON=m
1781
1782#
1783# Compression
1784#
1785CONFIG_CRYPTO_DEFLATE=m
1786CONFIG_CRYPTO_ZLIB=m
1787CONFIG_CRYPTO_LZO=m
1788
1789#
1790# Random Number Generation
1791#
1792CONFIG_CRYPTO_ANSI_CPRNG=m
1793# CONFIG_CRYPTO_USER_API_HASH is not set
1794# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
1795CONFIG_CRYPTO_HW=y
1796CONFIG_CRYPTO_DEV_HIFN_795X=m
1797CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y
1798# CONFIG_BINARY_PRINTF is not set
1799
1800#
1801# Library routines
1802#
1803CONFIG_RAID6_PQ=m
1804CONFIG_BITREVERSE=y
1805CONFIG_GENERIC_FIND_FIRST_BIT=y
1806CONFIG_GENERIC_FIND_NEXT_BIT=y
1807CONFIG_GENERIC_FIND_LAST_BIT=y
1808# CONFIG_CRC_CCITT is not set
1809CONFIG_CRC16=y
1810CONFIG_CRC_T10DIF=y
1811CONFIG_CRC_ITU_T=m
1812CONFIG_CRC32=y
1813# CONFIG_CRC7 is not set
1814CONFIG_LIBCRC32C=m
1815CONFIG_AUDIT_GENERIC=y
1816CONFIG_ZLIB_INFLATE=y
1817CONFIG_ZLIB_DEFLATE=m
1818CONFIG_LZO_COMPRESS=m
1819CONFIG_LZO_DECOMPRESS=m
1820# CONFIG_XZ_DEC is not set
1821# CONFIG_XZ_DEC_BCJ is not set
1822CONFIG_DECOMPRESS_GZIP=y
1823CONFIG_TEXTSEARCH=y
1824CONFIG_TEXTSEARCH_KMP=m
1825CONFIG_TEXTSEARCH_BM=m
1826CONFIG_TEXTSEARCH_FSM=m
1827CONFIG_HAS_IOMEM=y
1828CONFIG_HAS_IOPORT=y
1829CONFIG_HAS_DMA=y
1830CONFIG_CPU_RMAP=y
1831CONFIG_NLATTR=y
1832# CONFIG_AVERAGE is not set
1833# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
new file mode 100644
index 000000000000..f58dc362b944
--- /dev/null
+++ b/arch/tile/configs/tilepro_defconfig
@@ -0,0 +1,1163 @@
1#
2# Automatically generated make config: don't edit
3# Linux/tile 2.6.39-rc5 Kernel Configuration
4# Tue May 3 09:15:02 2011
5#
6CONFIG_TILE=y
7CONFIG_MMU=y
8CONFIG_GENERIC_CSUM=y
9CONFIG_SEMAPHORE_SLEEPERS=y
10CONFIG_HAVE_ARCH_ALLOC_REMAP=y
11CONFIG_HAVE_SETUP_PER_CPU_AREA=y
12CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
13CONFIG_SYS_SUPPORTS_HUGETLBFS=y
14CONFIG_GENERIC_TIME=y
15CONFIG_GENERIC_CLOCKEVENTS=y
16CONFIG_RWSEM_GENERIC_SPINLOCK=y
17CONFIG_DEFAULT_MIGRATION_COST=10000000
18CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
19CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
20CONFIG_ARCH_DMA_ADDR_T_64BIT=y
21CONFIG_LOCKDEP_SUPPORT=y
22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
24CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
25CONFIG_TRACE_IRQFLAGS_SUPPORT=y
26CONFIG_STRICT_DEVMEM=y
27CONFIG_SMP=y
28# CONFIG_DEBUG_COPY_FROM_USER is not set
29CONFIG_HVC_TILE=y
30# CONFIG_TILEGX is not set
31CONFIG_ARCH_DEFCONFIG="arch/tile/configs/tile_defconfig"
32CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
33CONFIG_CONSTRUCTORS=y
34
35#
36# General setup
37#
38CONFIG_EXPERIMENTAL=y
39CONFIG_INIT_ENV_ARG_LIMIT=32
40CONFIG_CROSS_COMPILE=""
41CONFIG_LOCALVERSION=""
42CONFIG_LOCALVERSION_AUTO=y
43# CONFIG_SWAP is not set
44CONFIG_SYSVIPC=y
45CONFIG_SYSVIPC_SYSCTL=y
46# CONFIG_POSIX_MQUEUE is not set
47# CONFIG_BSD_PROCESS_ACCT is not set
48CONFIG_FHANDLE=y
49# CONFIG_TASKSTATS is not set
50# CONFIG_AUDIT is not set
51CONFIG_HAVE_GENERIC_HARDIRQS=y
52
53#
54# IRQ subsystem
55#
56CONFIG_GENERIC_HARDIRQS=y
57CONFIG_GENERIC_IRQ_PROBE=y
58CONFIG_GENERIC_IRQ_SHOW=y
59CONFIG_GENERIC_PENDING_IRQ=y
60
61#
62# RCU Subsystem
63#
64CONFIG_TREE_RCU=y
65# CONFIG_PREEMPT_RCU is not set
66# CONFIG_RCU_TRACE is not set
67CONFIG_RCU_FANOUT=32
68# CONFIG_RCU_FANOUT_EXACT is not set
69# CONFIG_RCU_FAST_NO_HZ is not set
70# CONFIG_TREE_RCU_TRACE is not set
71# CONFIG_IKCONFIG is not set
72CONFIG_LOG_BUF_SHIFT=17
73# CONFIG_CGROUPS is not set
74# CONFIG_NAMESPACES is not set
75# CONFIG_SCHED_AUTOGROUP is not set
76# CONFIG_SYSFS_DEPRECATED is not set
77# CONFIG_RELAY is not set
78CONFIG_BLK_DEV_INITRD=y
79CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
80CONFIG_INITRAMFS_ROOT_UID=0
81CONFIG_INITRAMFS_ROOT_GID=0
82CONFIG_RD_GZIP=y
83# CONFIG_RD_BZIP2 is not set
84# CONFIG_RD_LZMA is not set
85# CONFIG_RD_XZ is not set
86# CONFIG_RD_LZO is not set
87CONFIG_INITRAMFS_COMPRESSION_NONE=y
88# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
89CONFIG_CC_OPTIMIZE_FOR_SIZE=y
90CONFIG_SYSCTL=y
91CONFIG_ANON_INODES=y
92CONFIG_EXPERT=y
93CONFIG_SYSCTL_SYSCALL=y
94CONFIG_KALLSYMS=y
95# CONFIG_KALLSYMS_ALL is not set
96# CONFIG_KALLSYMS_EXTRA_PASS is not set
97CONFIG_HOTPLUG=y
98CONFIG_PRINTK=y
99CONFIG_BUG=y
100CONFIG_ELF_CORE=y
101CONFIG_BASE_FULL=y
102CONFIG_FUTEX=y
103CONFIG_EPOLL=y
104CONFIG_SIGNALFD=y
105CONFIG_TIMERFD=y
106CONFIG_EVENTFD=y
107CONFIG_SHMEM=y
108CONFIG_AIO=y
109CONFIG_EMBEDDED=y
110
111#
112# Kernel Performance Events And Counters
113#
114CONFIG_VM_EVENT_COUNTERS=y
115CONFIG_PCI_QUIRKS=y
116CONFIG_SLUB_DEBUG=y
117# CONFIG_COMPAT_BRK is not set
118# CONFIG_SLAB is not set
119CONFIG_SLUB=y
120# CONFIG_SLOB is not set
121CONFIG_PROFILING=y
122CONFIG_USE_GENERIC_SMP_HELPERS=y
123
124#
125# GCOV-based kernel profiling
126#
127# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
128CONFIG_SLABINFO=y
129CONFIG_RT_MUTEXES=y
130CONFIG_BASE_SMALL=0
131CONFIG_MODULES=y
132# CONFIG_MODULE_FORCE_LOAD is not set
133CONFIG_MODULE_UNLOAD=y
134# CONFIG_MODULE_FORCE_UNLOAD is not set
135# CONFIG_MODVERSIONS is not set
136# CONFIG_MODULE_SRCVERSION_ALL is not set
137CONFIG_STOP_MACHINE=y
138CONFIG_BLOCK=y
139CONFIG_LBDAF=y
140# CONFIG_BLK_DEV_BSG is not set
141# CONFIG_BLK_DEV_INTEGRITY is not set
142
143#
144# IO Schedulers
145#
146CONFIG_IOSCHED_NOOP=y
147# CONFIG_IOSCHED_DEADLINE is not set
148# CONFIG_IOSCHED_CFQ is not set
149CONFIG_DEFAULT_NOOP=y
150CONFIG_DEFAULT_IOSCHED="noop"
151# CONFIG_INLINE_SPIN_TRYLOCK is not set
152# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
153# CONFIG_INLINE_SPIN_LOCK is not set
154# CONFIG_INLINE_SPIN_LOCK_BH is not set
155# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
156# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
157CONFIG_INLINE_SPIN_UNLOCK=y
158# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
159CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
160# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
161# CONFIG_INLINE_READ_TRYLOCK is not set
162# CONFIG_INLINE_READ_LOCK is not set
163# CONFIG_INLINE_READ_LOCK_BH is not set
164# CONFIG_INLINE_READ_LOCK_IRQ is not set
165# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
166CONFIG_INLINE_READ_UNLOCK=y
167# CONFIG_INLINE_READ_UNLOCK_BH is not set
168CONFIG_INLINE_READ_UNLOCK_IRQ=y
169# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
170# CONFIG_INLINE_WRITE_TRYLOCK is not set
171# CONFIG_INLINE_WRITE_LOCK is not set
172# CONFIG_INLINE_WRITE_LOCK_BH is not set
173# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
174# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
175CONFIG_INLINE_WRITE_UNLOCK=y
176# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
177CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
178# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
179CONFIG_MUTEX_SPIN_ON_OWNER=y
180
181#
182# Tilera-specific configuration
183#
184CONFIG_NR_CPUS=64
185CONFIG_TICK_ONESHOT=y
186CONFIG_NO_HZ=y
187CONFIG_HIGH_RES_TIMERS=y
188CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
189CONFIG_HZ_100=y
190# CONFIG_HZ_250 is not set
191# CONFIG_HZ_300 is not set
192# CONFIG_HZ_1000 is not set
193CONFIG_HZ=100
194CONFIG_SCHED_HRTICK=y
195# CONFIG_KEXEC is not set
196CONFIG_HIGHMEM=y
197CONFIG_NUMA=y
198CONFIG_NODES_SHIFT=2
199# CONFIG_VMSPLIT_3_75G is not set
200# CONFIG_VMSPLIT_3_5G is not set
201CONFIG_VMSPLIT_3G=y
202# CONFIG_VMSPLIT_2_75G is not set
203# CONFIG_VMSPLIT_2_5G is not set
204# CONFIG_VMSPLIT_2_25G is not set
205# CONFIG_VMSPLIT_2G is not set
206# CONFIG_VMSPLIT_1G is not set
207CONFIG_PAGE_OFFSET=0xC0000000
208CONFIG_SELECT_MEMORY_MODEL=y
209CONFIG_DISCONTIGMEM_MANUAL=y
210CONFIG_DISCONTIGMEM=y
211CONFIG_FLAT_NODE_MEM_MAP=y
212CONFIG_NEED_MULTIPLE_NODES=y
213CONFIG_PAGEFLAGS_EXTENDED=y
214CONFIG_SPLIT_PTLOCK_CPUS=4
215# CONFIG_COMPACTION is not set
216CONFIG_MIGRATION=y
217CONFIG_PHYS_ADDR_T_64BIT=y
218CONFIG_ZONE_DMA_FLAG=0
219CONFIG_BOUNCE=y
220CONFIG_VIRT_TO_BUS=y
221# CONFIG_KSM is not set
222CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
223# CONFIG_CMDLINE_BOOL is not set
224CONFIG_VMALLOC_RESERVE=0x1000000
225CONFIG_HARDWALL=y
226CONFIG_KERNEL_PL=1
227
228#
229# Bus options
230#
231CONFIG_PCI=y
232CONFIG_PCI_DOMAINS=y
233# CONFIG_NO_IOMEM is not set
234# CONFIG_NO_IOPORT is not set
235# CONFIG_ARCH_SUPPORTS_MSI is not set
236# CONFIG_PCI_DEBUG is not set
237# CONFIG_PCI_STUB is not set
238# CONFIG_PCI_IOV is not set
239# CONFIG_HOTPLUG_PCI is not set
240
241#
242# Executable file formats
243#
244CONFIG_KCORE_ELF=y
245CONFIG_BINFMT_ELF=y
246# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
247# CONFIG_HAVE_AOUT is not set
248# CONFIG_BINFMT_MISC is not set
249CONFIG_NET=y
250
251#
252# Networking options
253#
254CONFIG_PACKET=y
255CONFIG_UNIX=y
256CONFIG_XFRM=y
257# CONFIG_XFRM_USER is not set
258# CONFIG_XFRM_SUB_POLICY is not set
259# CONFIG_XFRM_MIGRATE is not set
260# CONFIG_XFRM_STATISTICS is not set
261# CONFIG_NET_KEY is not set
262CONFIG_INET=y
263CONFIG_IP_MULTICAST=y
264# CONFIG_IP_ADVANCED_ROUTER is not set
265# CONFIG_IP_PNP is not set
266# CONFIG_NET_IPIP is not set
267# CONFIG_NET_IPGRE_DEMUX is not set
268# CONFIG_IP_MROUTE is not set
269# CONFIG_ARPD is not set
270# CONFIG_SYN_COOKIES is not set
271# CONFIG_INET_AH is not set
272# CONFIG_INET_ESP is not set
273# CONFIG_INET_IPCOMP is not set
274# CONFIG_INET_XFRM_TUNNEL is not set
275CONFIG_INET_TUNNEL=y
276# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
277# CONFIG_INET_XFRM_MODE_TUNNEL is not set
278CONFIG_INET_XFRM_MODE_BEET=y
279# CONFIG_INET_LRO is not set
280# CONFIG_INET_DIAG is not set
281# CONFIG_TCP_CONG_ADVANCED is not set
282CONFIG_TCP_CONG_CUBIC=y
283CONFIG_DEFAULT_TCP_CONG="cubic"
284# CONFIG_TCP_MD5SIG is not set
285CONFIG_IPV6=y
286# CONFIG_IPV6_PRIVACY is not set
287# CONFIG_IPV6_ROUTER_PREF is not set
288# CONFIG_IPV6_OPTIMISTIC_DAD is not set
289# CONFIG_INET6_AH is not set
290# CONFIG_INET6_ESP is not set
291# CONFIG_INET6_IPCOMP is not set
292# CONFIG_IPV6_MIP6 is not set
293# CONFIG_INET6_XFRM_TUNNEL is not set
294# CONFIG_INET6_TUNNEL is not set
295CONFIG_INET6_XFRM_MODE_TRANSPORT=y
296CONFIG_INET6_XFRM_MODE_TUNNEL=y
297CONFIG_INET6_XFRM_MODE_BEET=y
298# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
299CONFIG_IPV6_SIT=y
300# CONFIG_IPV6_SIT_6RD is not set
301CONFIG_IPV6_NDISC_NODETYPE=y
302# CONFIG_IPV6_TUNNEL is not set
303# CONFIG_IPV6_MULTIPLE_TABLES is not set
304# CONFIG_IPV6_MROUTE is not set
305# CONFIG_NETWORK_SECMARK is not set
306# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
307# CONFIG_NETFILTER is not set
308# CONFIG_IP_DCCP is not set
309# CONFIG_IP_SCTP is not set
310# CONFIG_RDS is not set
311# CONFIG_TIPC is not set
312# CONFIG_ATM is not set
313# CONFIG_L2TP is not set
314# CONFIG_BRIDGE is not set
315# CONFIG_NET_DSA is not set
316# CONFIG_VLAN_8021Q is not set
317# CONFIG_DECNET is not set
318# CONFIG_LLC2 is not set
319# CONFIG_IPX is not set
320# CONFIG_ATALK is not set
321# CONFIG_X25 is not set
322# CONFIG_LAPB is not set
323# CONFIG_ECONET is not set
324# CONFIG_WAN_ROUTER is not set
325# CONFIG_PHONET is not set
326# CONFIG_IEEE802154 is not set
327# CONFIG_NET_SCHED is not set
328# CONFIG_DCB is not set
329# CONFIG_BATMAN_ADV is not set
330CONFIG_RPS=y
331CONFIG_RFS_ACCEL=y
332CONFIG_XPS=y
333
334#
335# Network testing
336#
337# CONFIG_NET_PKTGEN is not set
338# CONFIG_HAMRADIO is not set
339# CONFIG_CAN is not set
340# CONFIG_IRDA is not set
341# CONFIG_BT is not set
342# CONFIG_AF_RXRPC is not set
343# CONFIG_WIRELESS is not set
344# CONFIG_WIMAX is not set
345# CONFIG_RFKILL is not set
346# CONFIG_NET_9P is not set
347# CONFIG_CAIF is not set
348# CONFIG_CEPH_LIB is not set
349
350#
351# Device Drivers
352#
353
354#
355# Generic Driver Options
356#
357CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
358# CONFIG_DEVTMPFS is not set
359CONFIG_STANDALONE=y
360CONFIG_PREVENT_FIRMWARE_BUILD=y
361CONFIG_FW_LOADER=y
362CONFIG_FIRMWARE_IN_KERNEL=y
363CONFIG_EXTRA_FIRMWARE=""
364# CONFIG_DEBUG_DRIVER is not set
365# CONFIG_DEBUG_DEVRES is not set
366# CONFIG_SYS_HYPERVISOR is not set
367# CONFIG_CONNECTOR is not set
368# CONFIG_MTD is not set
369# CONFIG_PARPORT is not set
370CONFIG_BLK_DEV=y
371# CONFIG_BLK_CPQ_DA is not set
372# CONFIG_BLK_CPQ_CISS_DA is not set
373# CONFIG_BLK_DEV_DAC960 is not set
374# CONFIG_BLK_DEV_UMEM is not set
375# CONFIG_BLK_DEV_COW_COMMON is not set
376# CONFIG_BLK_DEV_LOOP is not set
377
378#
379# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
380#
381# CONFIG_BLK_DEV_NBD is not set
382# CONFIG_BLK_DEV_SX8 is not set
383# CONFIG_BLK_DEV_RAM is not set
384# CONFIG_CDROM_PKTCDVD is not set
385# CONFIG_ATA_OVER_ETH is not set
386# CONFIG_BLK_DEV_RBD is not set
387# CONFIG_SENSORS_LIS3LV02D is not set
388CONFIG_MISC_DEVICES=y
389# CONFIG_PHANTOM is not set
390# CONFIG_SGI_IOC4 is not set
391# CONFIG_TIFM_CORE is not set
392# CONFIG_ENCLOSURE_SERVICES is not set
393# CONFIG_HP_ILO is not set
394# CONFIG_PCH_PHUB is not set
395# CONFIG_C2PORT is not set
396
397#
398# EEPROM support
399#
400# CONFIG_EEPROM_93CX6 is not set
401# CONFIG_CB710_CORE is not set
402
403#
404# Texas Instruments shared transport line discipline
405#
406
407#
408# SCSI device support
409#
410CONFIG_SCSI_MOD=y
411# CONFIG_RAID_ATTRS is not set
412CONFIG_SCSI=y
413CONFIG_SCSI_DMA=y
414# CONFIG_SCSI_TGT is not set
415# CONFIG_SCSI_NETLINK is not set
416CONFIG_SCSI_PROC_FS=y
417
418#
419# SCSI support type (disk, tape, CD-ROM)
420#
421CONFIG_BLK_DEV_SD=y
422# CONFIG_CHR_DEV_ST is not set
423# CONFIG_CHR_DEV_OSST is not set
424# CONFIG_BLK_DEV_SR is not set
425# CONFIG_CHR_DEV_SG is not set
426# CONFIG_CHR_DEV_SCH is not set
427# CONFIG_SCSI_MULTI_LUN is not set
428CONFIG_SCSI_CONSTANTS=y
429CONFIG_SCSI_LOGGING=y
430# CONFIG_SCSI_SCAN_ASYNC is not set
431CONFIG_SCSI_WAIT_SCAN=m
432
433#
434# SCSI Transports
435#
436# CONFIG_SCSI_SPI_ATTRS is not set
437# CONFIG_SCSI_FC_ATTRS is not set
438# CONFIG_SCSI_ISCSI_ATTRS is not set
439# CONFIG_SCSI_SAS_ATTRS is not set
440# CONFIG_SCSI_SAS_LIBSAS is not set
441# CONFIG_SCSI_SRP_ATTRS is not set
442CONFIG_SCSI_LOWLEVEL=y
443# CONFIG_ISCSI_TCP is not set
444# CONFIG_ISCSI_BOOT_SYSFS is not set
445# CONFIG_SCSI_CXGB3_ISCSI is not set
446# CONFIG_SCSI_CXGB4_ISCSI is not set
447# CONFIG_SCSI_BNX2_ISCSI is not set
448# CONFIG_SCSI_BNX2X_FCOE is not set
449# CONFIG_BE2ISCSI is not set
450# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
451# CONFIG_SCSI_HPSA is not set
452# CONFIG_SCSI_3W_9XXX is not set
453# CONFIG_SCSI_3W_SAS is not set
454# CONFIG_SCSI_ACARD is not set
455# CONFIG_SCSI_AACRAID is not set
456# CONFIG_SCSI_AIC7XXX is not set
457# CONFIG_SCSI_AIC7XXX_OLD is not set
458# CONFIG_SCSI_AIC79XX is not set
459# CONFIG_SCSI_AIC94XX is not set
460# CONFIG_SCSI_MVSAS is not set
461# CONFIG_SCSI_DPT_I2O is not set
462# CONFIG_SCSI_ADVANSYS is not set
463# CONFIG_SCSI_ARCMSR is not set
464# CONFIG_MEGARAID_NEWGEN is not set
465# CONFIG_MEGARAID_LEGACY is not set
466# CONFIG_MEGARAID_SAS is not set
467# CONFIG_SCSI_MPT2SAS is not set
468# CONFIG_SCSI_HPTIOP is not set
469# CONFIG_LIBFC is not set
470# CONFIG_LIBFCOE is not set
471# CONFIG_FCOE is not set
472# CONFIG_SCSI_DMX3191D is not set
473# CONFIG_SCSI_FUTURE_DOMAIN is not set
474# CONFIG_SCSI_IPS is not set
475# CONFIG_SCSI_INITIO is not set
476# CONFIG_SCSI_INIA100 is not set
477# CONFIG_SCSI_STEX is not set
478# CONFIG_SCSI_SYM53C8XX_2 is not set
479# CONFIG_SCSI_QLOGIC_1280 is not set
480# CONFIG_SCSI_QLA_FC is not set
481# CONFIG_SCSI_QLA_ISCSI is not set
482# CONFIG_SCSI_LPFC is not set
483# CONFIG_SCSI_DC395x is not set
484# CONFIG_SCSI_DC390T is not set
485# CONFIG_SCSI_NSP32 is not set
486# CONFIG_SCSI_DEBUG is not set
487# CONFIG_SCSI_PMCRAID is not set
488# CONFIG_SCSI_PM8001 is not set
489# CONFIG_SCSI_SRP is not set
490# CONFIG_SCSI_BFA_FC is not set
491# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
492# CONFIG_SCSI_DH is not set
493# CONFIG_SCSI_OSD_INITIATOR is not set
494# CONFIG_ATA is not set
495# CONFIG_MD is not set
496# CONFIG_TARGET_CORE is not set
497# CONFIG_FUSION is not set
498
499#
500# IEEE 1394 (FireWire) support
501#
502# CONFIG_FIREWIRE is not set
503# CONFIG_FIREWIRE_NOSY is not set
504# CONFIG_I2O is not set
505CONFIG_NETDEVICES=y
506# CONFIG_DUMMY is not set
507# CONFIG_BONDING is not set
508# CONFIG_MACVLAN is not set
509# CONFIG_EQUALIZER is not set
510CONFIG_TUN=y
511# CONFIG_VETH is not set
512# CONFIG_ARCNET is not set
513# CONFIG_MII is not set
514# CONFIG_PHYLIB is not set
515# CONFIG_NET_ETHERNET is not set
516CONFIG_NETDEV_1000=y
517# CONFIG_ACENIC is not set
518# CONFIG_DL2K is not set
519# CONFIG_E1000 is not set
520# CONFIG_E1000E is not set
521# CONFIG_IP1000 is not set
522# CONFIG_IGB is not set
523# CONFIG_IGBVF is not set
524# CONFIG_NS83820 is not set
525# CONFIG_HAMACHI is not set
526# CONFIG_YELLOWFIN is not set
527# CONFIG_R8169 is not set
528# CONFIG_SIS190 is not set
529# CONFIG_SKGE is not set
530# CONFIG_SKY2 is not set
531# CONFIG_VIA_VELOCITY is not set
532# CONFIG_TIGON3 is not set
533# CONFIG_BNX2 is not set
534# CONFIG_CNIC is not set
535# CONFIG_QLA3XXX is not set
536# CONFIG_ATL1 is not set
537# CONFIG_ATL1E is not set
538# CONFIG_ATL1C is not set
539# CONFIG_JME is not set
540# CONFIG_STMMAC_ETH is not set
541# CONFIG_PCH_GBE is not set
542# CONFIG_NETDEV_10000 is not set
543# CONFIG_TR is not set
544# CONFIG_WLAN is not set
545
546#
547# Enable WiMAX (Networking options) to see the WiMAX drivers
548#
549# CONFIG_WAN is not set
550
551#
552# CAIF transport drivers
553#
554CONFIG_TILE_NET=y
555# CONFIG_FDDI is not set
556# CONFIG_HIPPI is not set
557# CONFIG_PPP is not set
558# CONFIG_SLIP is not set
559# CONFIG_NET_FC is not set
560# CONFIG_NETCONSOLE is not set
561# CONFIG_NETPOLL is not set
562# CONFIG_NET_POLL_CONTROLLER is not set
563# CONFIG_VMXNET3 is not set
564# CONFIG_ISDN is not set
565# CONFIG_PHONE is not set
566
567#
568# Input device support
569#
570CONFIG_INPUT=y
571# CONFIG_INPUT_FF_MEMLESS is not set
572# CONFIG_INPUT_POLLDEV is not set
573# CONFIG_INPUT_SPARSEKMAP is not set
574
575#
576# Userland interfaces
577#
578# CONFIG_INPUT_MOUSEDEV is not set
579# CONFIG_INPUT_JOYDEV is not set
580# CONFIG_INPUT_EVDEV is not set
581# CONFIG_INPUT_EVBUG is not set
582
583#
584# Input Device Drivers
585#
586# CONFIG_INPUT_KEYBOARD is not set
587# CONFIG_INPUT_MOUSE is not set
588# CONFIG_INPUT_JOYSTICK is not set
589# CONFIG_INPUT_TABLET is not set
590# CONFIG_INPUT_TOUCHSCREEN is not set
591# CONFIG_INPUT_MISC is not set
592
593#
594# Hardware I/O ports
595#
596# CONFIG_SERIO is not set
597# CONFIG_GAMEPORT is not set
598
599#
600# Character devices
601#
602# CONFIG_VT is not set
603CONFIG_UNIX98_PTYS=y
604# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
605# CONFIG_LEGACY_PTYS is not set
606# CONFIG_SERIAL_NONSTANDARD is not set
607# CONFIG_NOZOMI is not set
608# CONFIG_N_GSM is not set
609CONFIG_DEVKMEM=y
610
611#
612# Serial drivers
613#
614# CONFIG_SERIAL_8250 is not set
615
616#
617# Non-8250 serial port support
618#
619# CONFIG_SERIAL_MFD_HSU is not set
620# CONFIG_SERIAL_JSM is not set
621# CONFIG_SERIAL_TIMBERDALE is not set
622# CONFIG_SERIAL_ALTERA_JTAGUART is not set
623# CONFIG_SERIAL_ALTERA_UART is not set
624# CONFIG_SERIAL_PCH_UART is not set
625# CONFIG_TTY_PRINTK is not set
626CONFIG_HVC_DRIVER=y
627# CONFIG_IPMI_HANDLER is not set
628# CONFIG_HW_RANDOM is not set
629# CONFIG_R3964 is not set
630# CONFIG_APPLICOM is not set
631
632#
633# PCMCIA character devices
634#
635# CONFIG_RAW_DRIVER is not set
636# CONFIG_TCG_TPM is not set
637CONFIG_DEVPORT=y
638# CONFIG_RAMOOPS is not set
639# CONFIG_I2C is not set
640# CONFIG_SPI is not set
641
642#
643# PPS support
644#
645# CONFIG_PPS is not set
646
647#
648# PPS generators support
649#
650# CONFIG_W1 is not set
651# CONFIG_POWER_SUPPLY is not set
652CONFIG_HWMON=y
653# CONFIG_HWMON_VID is not set
654# CONFIG_HWMON_DEBUG_CHIP is not set
655
656#
657# Native drivers
658#
659# CONFIG_SENSORS_I5K_AMB is not set
660# CONFIG_SENSORS_F71805F is not set
661# CONFIG_SENSORS_F71882FG is not set
662# CONFIG_SENSORS_IT87 is not set
663# CONFIG_SENSORS_PC87360 is not set
664# CONFIG_SENSORS_PC87427 is not set
665# CONFIG_SENSORS_SIS5595 is not set
666# CONFIG_SENSORS_SMSC47M1 is not set
667# CONFIG_SENSORS_SMSC47B397 is not set
668# CONFIG_SENSORS_SCH5627 is not set
669# CONFIG_SENSORS_VIA686A is not set
670# CONFIG_SENSORS_VT1211 is not set
671# CONFIG_SENSORS_VT8231 is not set
672# CONFIG_SENSORS_W83627HF is not set
673# CONFIG_SENSORS_W83627EHF is not set
674# CONFIG_THERMAL is not set
675CONFIG_WATCHDOG=y
676CONFIG_WATCHDOG_NOWAYOUT=y
677
678#
679# Watchdog Device Drivers
680#
681# CONFIG_SOFT_WATCHDOG is not set
682# CONFIG_ALIM7101_WDT is not set
683
684#
685# PCI-based Watchdog Cards
686#
687# CONFIG_PCIPCWATCHDOG is not set
688# CONFIG_WDTPCI is not set
689CONFIG_SSB_POSSIBLE=y
690
691#
692# Sonics Silicon Backplane
693#
694# CONFIG_SSB is not set
695CONFIG_MFD_SUPPORT=y
696# CONFIG_MFD_CORE is not set
697# CONFIG_MFD_SM501 is not set
698# CONFIG_HTC_PASIC3 is not set
699# CONFIG_MFD_TMIO is not set
700# CONFIG_ABX500_CORE is not set
701# CONFIG_LPC_SCH is not set
702# CONFIG_MFD_RDC321X is not set
703# CONFIG_MFD_JANZ_CMODIO is not set
704# CONFIG_MFD_VX855 is not set
705# CONFIG_REGULATOR is not set
706# CONFIG_MEDIA_SUPPORT is not set
707
708#
709# Graphics support
710#
711CONFIG_VGA_ARB=y
712CONFIG_VGA_ARB_MAX_GPUS=16
713# CONFIG_DRM is not set
714# CONFIG_STUB_POULSBO is not set
715# CONFIG_VGASTATE is not set
716# CONFIG_VIDEO_OUTPUT_CONTROL is not set
717# CONFIG_FB is not set
718# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
719
720#
721# Display device support
722#
723# CONFIG_DISPLAY_SUPPORT is not set
724# CONFIG_SOUND is not set
725# CONFIG_HID_SUPPORT is not set
726CONFIG_USB_SUPPORT=y
727CONFIG_USB_ARCH_HAS_HCD=y
728CONFIG_USB_ARCH_HAS_OHCI=y
729CONFIG_USB_ARCH_HAS_EHCI=y
730# CONFIG_USB is not set
731# CONFIG_USB_OTG_WHITELIST is not set
732# CONFIG_USB_OTG_BLACKLIST_HUB is not set
733
734#
735# Enable Host or Gadget support to see Inventra options
736#
737
738#
739# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
740#
741# CONFIG_USB_GADGET is not set
742
743#
744# OTG and related infrastructure
745#
746# CONFIG_UWB is not set
747# CONFIG_MMC is not set
748# CONFIG_MEMSTICK is not set
749# CONFIG_NEW_LEDS is not set
750# CONFIG_NFC_DEVICES is not set
751# CONFIG_ACCESSIBILITY is not set
752# CONFIG_INFINIBAND is not set
753CONFIG_EDAC=y
754
755#
756# Reporting subsystems
757#
758# CONFIG_EDAC_DEBUG is not set
759CONFIG_EDAC_MM_EDAC=y
760CONFIG_EDAC_TILE=y
761CONFIG_RTC_LIB=y
762CONFIG_RTC_CLASS=y
763CONFIG_RTC_HCTOSYS=y
764CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
765# CONFIG_RTC_DEBUG is not set
766
767#
768# RTC interfaces
769#
770# CONFIG_RTC_INTF_SYSFS is not set
771# CONFIG_RTC_INTF_PROC is not set
772CONFIG_RTC_INTF_DEV=y
773# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
774# CONFIG_RTC_DRV_TEST is not set
775
776#
777# SPI RTC drivers
778#
779
780#
781# Platform RTC drivers
782#
783# CONFIG_RTC_DRV_DS1286 is not set
784# CONFIG_RTC_DRV_DS1511 is not set
785# CONFIG_RTC_DRV_DS1553 is not set
786# CONFIG_RTC_DRV_DS1742 is not set
787# CONFIG_RTC_DRV_STK17TA8 is not set
788# CONFIG_RTC_DRV_M48T86 is not set
789# CONFIG_RTC_DRV_M48T35 is not set
790# CONFIG_RTC_DRV_M48T59 is not set
791# CONFIG_RTC_DRV_MSM6242 is not set
792# CONFIG_RTC_DRV_BQ4802 is not set
793# CONFIG_RTC_DRV_RP5C01 is not set
794# CONFIG_RTC_DRV_V3020 is not set
795
796#
797# on-CPU RTC drivers
798#
799CONFIG_RTC_DRV_TILE=y
800# CONFIG_DMADEVICES is not set
801# CONFIG_AUXDISPLAY is not set
802# CONFIG_UIO is not set
803# CONFIG_STAGING is not set
804
805#
806# File systems
807#
808CONFIG_EXT2_FS=y
809# CONFIG_EXT2_FS_XATTR is not set
810# CONFIG_EXT2_FS_XIP is not set
811CONFIG_EXT3_FS=y
812# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
813CONFIG_EXT3_FS_XATTR=y
814# CONFIG_EXT3_FS_POSIX_ACL is not set
815# CONFIG_EXT3_FS_SECURITY is not set
816# CONFIG_EXT4_FS is not set
817CONFIG_JBD=y
818CONFIG_FS_MBCACHE=y
819# CONFIG_REISERFS_FS is not set
820# CONFIG_JFS_FS is not set
821# CONFIG_XFS_FS is not set
822# CONFIG_GFS2_FS is not set
823# CONFIG_BTRFS_FS is not set
824# CONFIG_NILFS2_FS is not set
825# CONFIG_FS_POSIX_ACL is not set
826CONFIG_EXPORTFS=y
827CONFIG_FILE_LOCKING=y
828CONFIG_FSNOTIFY=y
829CONFIG_DNOTIFY=y
830CONFIG_INOTIFY_USER=y
831# CONFIG_FANOTIFY is not set
832# CONFIG_QUOTA is not set
833# CONFIG_QUOTACTL is not set
834# CONFIG_AUTOFS4_FS is not set
835CONFIG_FUSE_FS=y
836# CONFIG_CUSE is not set
837
838#
839# Caches
840#
841# CONFIG_FSCACHE is not set
842
843#
844# CD-ROM/DVD Filesystems
845#
846# CONFIG_ISO9660_FS is not set
847# CONFIG_UDF_FS is not set
848
849#
850# DOS/FAT/NT Filesystems
851#
852CONFIG_FAT_FS=y
853CONFIG_MSDOS_FS=y
854CONFIG_VFAT_FS=m
855CONFIG_FAT_DEFAULT_CODEPAGE=437
856CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
857# CONFIG_NTFS_FS is not set
858
859#
860# Pseudo filesystems
861#
862CONFIG_PROC_FS=y
863# CONFIG_PROC_KCORE is not set
864CONFIG_PROC_SYSCTL=y
865CONFIG_PROC_PAGE_MONITOR=y
866CONFIG_SYSFS=y
867CONFIG_TMPFS=y
868# CONFIG_TMPFS_POSIX_ACL is not set
869CONFIG_HUGETLBFS=y
870CONFIG_HUGETLB_PAGE=y
871# CONFIG_CONFIGFS_FS is not set
872CONFIG_MISC_FILESYSTEMS=y
873# CONFIG_ADFS_FS is not set
874# CONFIG_AFFS_FS is not set
875# CONFIG_HFS_FS is not set
876# CONFIG_HFSPLUS_FS is not set
877# CONFIG_BEFS_FS is not set
878# CONFIG_BFS_FS is not set
879# CONFIG_EFS_FS is not set
880# CONFIG_LOGFS is not set
881# CONFIG_CRAMFS is not set
882# CONFIG_SQUASHFS is not set
883# CONFIG_VXFS_FS is not set
884# CONFIG_MINIX_FS is not set
885# CONFIG_OMFS_FS is not set
886# CONFIG_HPFS_FS is not set
887# CONFIG_QNX4FS_FS is not set
888# CONFIG_ROMFS_FS is not set
889# CONFIG_PSTORE is not set
890# CONFIG_SYSV_FS is not set
891# CONFIG_UFS_FS is not set
892CONFIG_NETWORK_FILESYSTEMS=y
893CONFIG_NFS_FS=m
894CONFIG_NFS_V3=y
895# CONFIG_NFS_V3_ACL is not set
896# CONFIG_NFS_V4 is not set
897# CONFIG_NFSD is not set
898CONFIG_LOCKD=m
899CONFIG_LOCKD_V4=y
900CONFIG_NFS_COMMON=y
901CONFIG_SUNRPC=m
902# CONFIG_RPCSEC_GSS_KRB5 is not set
903# CONFIG_CEPH_FS is not set
904# CONFIG_CIFS is not set
905# CONFIG_NCP_FS is not set
906# CONFIG_CODA_FS is not set
907# CONFIG_AFS_FS is not set
908
909#
910# Partition Types
911#
912# CONFIG_PARTITION_ADVANCED is not set
913CONFIG_MSDOS_PARTITION=y
914CONFIG_NLS=y
915CONFIG_NLS_DEFAULT="iso8859-1"
916CONFIG_NLS_CODEPAGE_437=y
917# CONFIG_NLS_CODEPAGE_737 is not set
918# CONFIG_NLS_CODEPAGE_775 is not set
919# CONFIG_NLS_CODEPAGE_850 is not set
920# CONFIG_NLS_CODEPAGE_852 is not set
921# CONFIG_NLS_CODEPAGE_855 is not set
922# CONFIG_NLS_CODEPAGE_857 is not set
923# CONFIG_NLS_CODEPAGE_860 is not set
924# CONFIG_NLS_CODEPAGE_861 is not set
925# CONFIG_NLS_CODEPAGE_862 is not set
926# CONFIG_NLS_CODEPAGE_863 is not set
927# CONFIG_NLS_CODEPAGE_864 is not set
928# CONFIG_NLS_CODEPAGE_865 is not set
929# CONFIG_NLS_CODEPAGE_866 is not set
930# CONFIG_NLS_CODEPAGE_869 is not set
931# CONFIG_NLS_CODEPAGE_936 is not set
932# CONFIG_NLS_CODEPAGE_950 is not set
933# CONFIG_NLS_CODEPAGE_932 is not set
934# CONFIG_NLS_CODEPAGE_949 is not set
935# CONFIG_NLS_CODEPAGE_874 is not set
936# CONFIG_NLS_ISO8859_8 is not set
937# CONFIG_NLS_CODEPAGE_1250 is not set
938# CONFIG_NLS_CODEPAGE_1251 is not set
939# CONFIG_NLS_ASCII is not set
940CONFIG_NLS_ISO8859_1=y
941# CONFIG_NLS_ISO8859_2 is not set
942# CONFIG_NLS_ISO8859_3 is not set
943# CONFIG_NLS_ISO8859_4 is not set
944# CONFIG_NLS_ISO8859_5 is not set
945# CONFIG_NLS_ISO8859_6 is not set
946# CONFIG_NLS_ISO8859_7 is not set
947# CONFIG_NLS_ISO8859_9 is not set
948# CONFIG_NLS_ISO8859_13 is not set
949# CONFIG_NLS_ISO8859_14 is not set
950# CONFIG_NLS_ISO8859_15 is not set
951# CONFIG_NLS_KOI8_R is not set
952# CONFIG_NLS_KOI8_U is not set
953# CONFIG_NLS_UTF8 is not set
954
955#
956# Kernel hacking
957#
958# CONFIG_PRINTK_TIME is not set
959CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
960CONFIG_ENABLE_WARN_DEPRECATED=y
961CONFIG_ENABLE_MUST_CHECK=y
962CONFIG_FRAME_WARN=2048
963CONFIG_MAGIC_SYSRQ=y
964# CONFIG_STRIP_ASM_SYMS is not set
965# CONFIG_UNUSED_SYMBOLS is not set
966# CONFIG_DEBUG_FS is not set
967# CONFIG_HEADERS_CHECK is not set
968# CONFIG_DEBUG_SECTION_MISMATCH is not set
969CONFIG_DEBUG_KERNEL=y
970# CONFIG_DEBUG_SHIRQ is not set
971# CONFIG_LOCKUP_DETECTOR is not set
972# CONFIG_HARDLOCKUP_DETECTOR is not set
973CONFIG_DETECT_HUNG_TASK=y
974# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
975CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
976CONFIG_SCHED_DEBUG=y
977# CONFIG_SCHEDSTATS is not set
978# CONFIG_TIMER_STATS is not set
979# CONFIG_DEBUG_OBJECTS is not set
980# CONFIG_SLUB_DEBUG_ON is not set
981# CONFIG_SLUB_STATS is not set
982# CONFIG_DEBUG_KMEMLEAK is not set
983# CONFIG_DEBUG_RT_MUTEXES is not set
984# CONFIG_RT_MUTEX_TESTER is not set
985# CONFIG_DEBUG_SPINLOCK is not set
986# CONFIG_DEBUG_MUTEXES is not set
987# CONFIG_DEBUG_LOCK_ALLOC is not set
988# CONFIG_PROVE_LOCKING is not set
989# CONFIG_SPARSE_RCU_POINTER is not set
990# CONFIG_LOCK_STAT is not set
991CONFIG_DEBUG_SPINLOCK_SLEEP=y
992# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
993CONFIG_STACKTRACE=y
994# CONFIG_DEBUG_KOBJECT is not set
995# CONFIG_DEBUG_HIGHMEM is not set
996CONFIG_DEBUG_INFO=y
997# CONFIG_DEBUG_INFO_REDUCED is not set
998CONFIG_DEBUG_VM=y
999# CONFIG_DEBUG_WRITECOUNT is not set
1000# CONFIG_DEBUG_MEMORY_INIT is not set
1001# CONFIG_DEBUG_LIST is not set
1002# CONFIG_TEST_LIST_SORT is not set
1003# CONFIG_DEBUG_SG is not set
1004# CONFIG_DEBUG_NOTIFIERS is not set
1005# CONFIG_DEBUG_CREDENTIALS is not set
1006# CONFIG_RCU_TORTURE_TEST is not set
1007# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1008# CONFIG_BACKTRACE_SELF_TEST is not set
1009# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1010# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1011# CONFIG_FAULT_INJECTION is not set
1012# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1013# CONFIG_DEBUG_PAGEALLOC is not set
1014CONFIG_TRACING_SUPPORT=y
1015CONFIG_FTRACE=y
1016# CONFIG_IRQSOFF_TRACER is not set
1017# CONFIG_SCHED_TRACER is not set
1018# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1019CONFIG_BRANCH_PROFILE_NONE=y
1020# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1021# CONFIG_PROFILE_ALL_BRANCHES is not set
1022# CONFIG_BLK_DEV_IO_TRACE is not set
1023# CONFIG_ATOMIC64_SELFTEST is not set
1024# CONFIG_SAMPLES is not set
1025# CONFIG_TEST_KSTRTOX is not set
1026CONFIG_EARLY_PRINTK=y
1027CONFIG_DEBUG_STACKOVERFLOW=y
1028# CONFIG_DEBUG_STACK_USAGE is not set
1029CONFIG_DEBUG_EXTRA_FLAGS="-femit-struct-debug-baseonly"
1030
1031#
1032# Security options
1033#
1034# CONFIG_KEYS is not set
1035# CONFIG_SECURITY_DMESG_RESTRICT is not set
1036# CONFIG_SECURITY is not set
1037# CONFIG_SECURITYFS is not set
1038CONFIG_DEFAULT_SECURITY_DAC=y
1039CONFIG_DEFAULT_SECURITY=""
1040CONFIG_CRYPTO=y
1041
1042#
1043# Crypto core or helper
1044#
1045# CONFIG_CRYPTO_FIPS is not set
1046CONFIG_CRYPTO_ALGAPI=m
1047CONFIG_CRYPTO_ALGAPI2=m
1048CONFIG_CRYPTO_RNG=m
1049CONFIG_CRYPTO_RNG2=m
1050# CONFIG_CRYPTO_MANAGER is not set
1051# CONFIG_CRYPTO_MANAGER2 is not set
1052# CONFIG_CRYPTO_GF128MUL is not set
1053# CONFIG_CRYPTO_NULL is not set
1054# CONFIG_CRYPTO_PCRYPT is not set
1055# CONFIG_CRYPTO_CRYPTD is not set
1056# CONFIG_CRYPTO_AUTHENC is not set
1057# CONFIG_CRYPTO_TEST is not set
1058
1059#
1060# Authenticated Encryption with Associated Data
1061#
1062# CONFIG_CRYPTO_CCM is not set
1063# CONFIG_CRYPTO_GCM is not set
1064# CONFIG_CRYPTO_SEQIV is not set
1065
1066#
1067# Block modes
1068#
1069# CONFIG_CRYPTO_CBC is not set
1070# CONFIG_CRYPTO_CTR is not set
1071# CONFIG_CRYPTO_CTS is not set
1072# CONFIG_CRYPTO_ECB is not set
1073# CONFIG_CRYPTO_LRW is not set
1074# CONFIG_CRYPTO_PCBC is not set
1075# CONFIG_CRYPTO_XTS is not set
1076
1077#
1078# Hash modes
1079#
1080# CONFIG_CRYPTO_HMAC is not set
1081# CONFIG_CRYPTO_XCBC is not set
1082# CONFIG_CRYPTO_VMAC is not set
1083
1084#
1085# Digest
1086#
1087# CONFIG_CRYPTO_CRC32C is not set
1088# CONFIG_CRYPTO_GHASH is not set
1089# CONFIG_CRYPTO_MD4 is not set
1090# CONFIG_CRYPTO_MD5 is not set
1091# CONFIG_CRYPTO_MICHAEL_MIC is not set
1092# CONFIG_CRYPTO_RMD128 is not set
1093# CONFIG_CRYPTO_RMD160 is not set
1094# CONFIG_CRYPTO_RMD256 is not set
1095# CONFIG_CRYPTO_RMD320 is not set
1096# CONFIG_CRYPTO_SHA1 is not set
1097# CONFIG_CRYPTO_SHA256 is not set
1098# CONFIG_CRYPTO_SHA512 is not set
1099# CONFIG_CRYPTO_TGR192 is not set
1100# CONFIG_CRYPTO_WP512 is not set
1101
1102#
1103# Ciphers
1104#
1105CONFIG_CRYPTO_AES=m
1106# CONFIG_CRYPTO_ANUBIS is not set
1107# CONFIG_CRYPTO_ARC4 is not set
1108# CONFIG_CRYPTO_BLOWFISH is not set
1109# CONFIG_CRYPTO_CAMELLIA is not set
1110# CONFIG_CRYPTO_CAST5 is not set
1111# CONFIG_CRYPTO_CAST6 is not set
1112# CONFIG_CRYPTO_DES is not set
1113# CONFIG_CRYPTO_FCRYPT is not set
1114# CONFIG_CRYPTO_KHAZAD is not set
1115# CONFIG_CRYPTO_SALSA20 is not set
1116# CONFIG_CRYPTO_SEED is not set
1117# CONFIG_CRYPTO_SERPENT is not set
1118# CONFIG_CRYPTO_TEA is not set
1119# CONFIG_CRYPTO_TWOFISH is not set
1120
1121#
1122# Compression
1123#
1124# CONFIG_CRYPTO_DEFLATE is not set
1125# CONFIG_CRYPTO_ZLIB is not set
1126# CONFIG_CRYPTO_LZO is not set
1127
1128#
1129# Random Number Generation
1130#
1131CONFIG_CRYPTO_ANSI_CPRNG=m
1132# CONFIG_CRYPTO_USER_API_HASH is not set
1133# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
1134CONFIG_CRYPTO_HW=y
1135# CONFIG_CRYPTO_DEV_HIFN_795X is not set
1136# CONFIG_BINARY_PRINTF is not set
1137
1138#
1139# Library routines
1140#
1141CONFIG_BITREVERSE=y
1142CONFIG_GENERIC_FIND_FIRST_BIT=y
1143CONFIG_GENERIC_FIND_NEXT_BIT=y
1144CONFIG_GENERIC_FIND_LAST_BIT=y
1145# CONFIG_CRC_CCITT is not set
1146# CONFIG_CRC16 is not set
1147# CONFIG_CRC_T10DIF is not set
1148# CONFIG_CRC_ITU_T is not set
1149CONFIG_CRC32=y
1150# CONFIG_CRC7 is not set
1151# CONFIG_LIBCRC32C is not set
1152CONFIG_ZLIB_INFLATE=y
1153# CONFIG_XZ_DEC is not set
1154# CONFIG_XZ_DEC_BCJ is not set
1155CONFIG_DECOMPRESS_GZIP=y
1156CONFIG_HAS_IOMEM=y
1157CONFIG_HAS_IOPORT=y
1158CONFIG_HAS_DMA=y
1159CONFIG_CPU_RMAP=y
1160CONFIG_NLATTR=y
1161# CONFIG_AVERAGE is not set
1162CONFIG_HAVE_KVM=y
1163# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/tile/include/arch/chip_tilegx.h b/arch/tile/include/arch/chip_tilegx.h
new file mode 100644
index 000000000000..ea8e4f2c9483
--- /dev/null
+++ b/arch/tile/include/arch/chip_tilegx.h
@@ -0,0 +1,258 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/*
16 * @file
17 * Global header file.
18 * This header file specifies defines for TILE-Gx.
19 */
20
21#ifndef __ARCH_CHIP_H__
22#define __ARCH_CHIP_H__
23
24/** Specify chip version.
25 * When possible, prefer the CHIP_xxx symbols below for future-proofing.
26 * This is intended for cross-compiling; native compilation should
27 * use the predefined __tile_chip__ symbol.
28 */
29#define TILE_CHIP 10
30
31/** Specify chip revision.
32 * This provides for the case of a respin of a particular chip type;
33 * the normal value for this symbol is "0".
34 * This is intended for cross-compiling; native compilation should
35 * use the predefined __tile_chip_rev__ symbol.
36 */
37#define TILE_CHIP_REV 0
38
39/** The name of this architecture. */
40#define CHIP_ARCH_NAME "tilegx"
41
42/** The ELF e_machine type for binaries for this chip. */
43#define CHIP_ELF_TYPE() EM_TILEGX
44
45/** The alternate ELF e_machine type for binaries for this chip. */
46#define CHIP_COMPAT_ELF_TYPE() 0x2597
47
48/** What is the native word size of the machine? */
49#define CHIP_WORD_SIZE() 64
50
51/** How many bits of a virtual address are used. Extra bits must be
52 * the sign extension of the low bits.
53 */
54#define CHIP_VA_WIDTH() 42
55
56/** How many bits are in a physical address? */
57#define CHIP_PA_WIDTH() 40
58
59/** Size of the L2 cache, in bytes. */
60#define CHIP_L2_CACHE_SIZE() 262144
61
62/** Log size of an L2 cache line in bytes. */
63#define CHIP_L2_LOG_LINE_SIZE() 6
64
65/** Size of an L2 cache line, in bytes. */
66#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
67
68/** Associativity of the L2 cache. */
69#define CHIP_L2_ASSOC() 8
70
71/** Size of the L1 data cache, in bytes. */
72#define CHIP_L1D_CACHE_SIZE() 32768
73
74/** Log size of an L1 data cache line in bytes. */
75#define CHIP_L1D_LOG_LINE_SIZE() 6
76
77/** Size of an L1 data cache line, in bytes. */
78#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
79
80/** Associativity of the L1 data cache. */
81#define CHIP_L1D_ASSOC() 2
82
83/** Size of the L1 instruction cache, in bytes. */
84#define CHIP_L1I_CACHE_SIZE() 32768
85
86/** Log size of an L1 instruction cache line in bytes. */
87#define CHIP_L1I_LOG_LINE_SIZE() 6
88
89/** Size of an L1 instruction cache line, in bytes. */
90#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
91
92/** Associativity of the L1 instruction cache. */
93#define CHIP_L1I_ASSOC() 2
94
95/** Stride with which flush instructions must be issued. */
96#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
97
98/** Stride with which inv instructions must be issued. */
99#define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE()
100
101/** Stride with which finv instructions must be issued. */
102#define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE()
103
104/** Can the local cache coherently cache data that is homed elsewhere? */
105#define CHIP_HAS_COHERENT_LOCAL_CACHE() 1
106
107/** How many simultaneous outstanding victims can the L2 cache have? */
108#define CHIP_MAX_OUTSTANDING_VICTIMS() 128
109
110/** Does the TLB support the NC and NOALLOC bits? */
111#define CHIP_HAS_NC_AND_NOALLOC_BITS() 1
112
113/** Does the chip support hash-for-home caching? */
114#define CHIP_HAS_CBOX_HOME_MAP() 1
115
116/** Number of entries in the chip's home map tables. */
117#define CHIP_CBOX_HOME_MAP_SIZE() 128
118
119/** Do uncacheable requests miss in the cache regardless of whether
120 * there is matching data? */
121#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1
122
123/** Does the mf instruction wait for victims? */
124#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0
125
126/** Does the chip have an "inv" instruction that doesn't also flush? */
127#define CHIP_HAS_INV() 1
128
129/** Does the chip have a "wh64" instruction? */
130#define CHIP_HAS_WH64() 1
131
132/** Does this chip have a 'dword_align' instruction? */
133#define CHIP_HAS_DWORD_ALIGN() 0
134
135/** Number of performance counters. */
136#define CHIP_PERFORMANCE_COUNTERS() 4
137
138/** Does this chip have auxiliary performance counters? */
139#define CHIP_HAS_AUX_PERF_COUNTERS() 1
140
141/** Is the CBOX_MSR1 SPR supported? */
142#define CHIP_HAS_CBOX_MSR1() 0
143
144/** Is the TILE_RTF_HWM SPR supported? */
145#define CHIP_HAS_TILE_RTF_HWM() 1
146
147/** Is the TILE_WRITE_PENDING SPR supported? */
148#define CHIP_HAS_TILE_WRITE_PENDING() 0
149
150/** Is the PROC_STATUS SPR supported? */
151#define CHIP_HAS_PROC_STATUS_SPR() 1
152
153/** Is the DSTREAM_PF SPR supported? */
154#define CHIP_HAS_DSTREAM_PF() 1
155
156/** Log of the number of mshims we have. */
157#define CHIP_LOG_NUM_MSHIMS() 2
158
159/** Are the bases of the interrupt vector areas fixed? */
160#define CHIP_HAS_FIXED_INTVEC_BASE() 0
161
162/** Are the interrupt masks split up into 2 SPRs? */
163#define CHIP_HAS_SPLIT_INTR_MASK() 0
164
165/** Is the cycle count split up into 2 SPRs? */
166#define CHIP_HAS_SPLIT_CYCLE() 0
167
168/** Does the chip have a static network? */
169#define CHIP_HAS_SN() 0
170
171/** Does the chip have a static network processor? */
172#define CHIP_HAS_SN_PROC() 0
173
174/** Size of the L1 static network processor instruction cache, in bytes. */
175/* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 10 */
176
177/** Does the chip have DMA support in each tile? */
178#define CHIP_HAS_TILE_DMA() 0
179
180/** Does the chip have the second revision of the directly accessible
181 * dynamic networks? This encapsulates a number of characteristics,
182 * including the absence of the catch-all, the absence of inline message
183 * tags, the absence of support for network context-switching, and so on.
184 */
185#define CHIP_HAS_REV1_XDN() 1
186
187/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
188#define CHIP_HAS_CMPEXCH() 1
189
190/** Does the chip have memory-mapped I/O support? */
191#define CHIP_HAS_MMIO() 1
192
193/** Does the chip have post-completion interrupts? */
194#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 1
195
196/** Does the chip have native single step support? */
197#define CHIP_HAS_SINGLE_STEP() 1
198
199#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
200
201/** How many entries are present in the instruction TLB? */
202#define CHIP_ITLB_ENTRIES() 16
203
204/** How many entries are present in the data TLB? */
205#define CHIP_DTLB_ENTRIES() 32
206
207/** How many MAF entries does the XAUI shim have? */
208#define CHIP_XAUI_MAF_ENTRIES() 32
209
210/** Does the memory shim have a source-id table? */
211#define CHIP_HAS_MSHIM_SRCID_TABLE() 0
212
213/** Does the L1 instruction cache clear on reset? */
214#define CHIP_HAS_L1I_CLEAR_ON_RESET() 1
215
216/** Does the chip come out of reset with valid coordinates on all tiles?
217 * Note that if defined, this also implies that the upper left is 1,1.
218 */
219#define CHIP_HAS_VALID_TILE_COORD_RESET() 1
220
221/** Does the chip have unified packet formats? */
222#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1
223
224/** Does the chip support write reordering? */
225#define CHIP_HAS_WRITE_REORDERING() 1
226
227/** Does the chip support Y-X routing as well as X-Y? */
228#define CHIP_HAS_Y_X_ROUTING() 1
229
230/** Is INTCTRL_3 managed with the correct MPL? */
231#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1
232
233/** Is it possible to configure the chip to be big-endian? */
234#define CHIP_HAS_BIG_ENDIAN_CONFIG() 1
235
236/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
237#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
238
239/** Is the DIAG_TRACE_WAY SPR supported? */
240#define CHIP_HAS_DIAG_TRACE_WAY() 0
241
242/** Is the MEM_STRIPE_CONFIG SPR supported? */
243#define CHIP_HAS_MEM_STRIPE_CONFIG() 1
244
245/** Are the TLB_PERF SPRs supported? */
246#define CHIP_HAS_TLB_PERF() 1
247
248/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
249#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
250
251/** Does the chip support rev1 DMA packets? */
252#define CHIP_HAS_REV1_DMA_PACKETS() 1
253
254/** Does the chip have an IPI shim? */
255#define CHIP_HAS_IPI() 1
256
257#endif /* !__OPEN_SOURCE__ */
258#endif /* __ARCH_CHIP_H__ */
diff --git a/arch/tile/include/arch/icache.h b/arch/tile/include/arch/icache.h
index 5c87c9016338..762eafa8a11e 100644
--- a/arch/tile/include/arch/icache.h
+++ b/arch/tile/include/arch/icache.h
@@ -16,7 +16,7 @@
16/** 16/**
17 * @file 17 * @file
18 * 18 *
19 * Support for invalidating bytes in the instruction 19 * Support for invalidating bytes in the instruction cache.
20 */ 20 */
21 21
22#ifndef __ARCH_ICACHE_H__ 22#ifndef __ARCH_ICACHE_H__
@@ -30,11 +30,10 @@
30 * 30 *
31 * @param addr The start of memory to be invalidated. 31 * @param addr The start of memory to be invalidated.
32 * @param size The number of bytes to be invalidated. 32 * @param size The number of bytes to be invalidated.
33 * @param page_size The system's page size, typically the PAGE_SIZE constant 33 * @param page_size The system's page size, e.g. getpagesize() in userspace.
34 * in sys/page.h. This value must be a power of two no larger 34 * This value must be a power of two no larger than the page containing
35 * than the page containing the code to be invalidated. If the value 35 * the code to be invalidated. If the value is smaller than the actual page
36 * is smaller than the actual page size, this function will still 36 * size, this function will still work, but may run slower than necessary.
37 * work, but may run slower than necessary.
38 */ 37 */
39static __inline void 38static __inline void
40invalidate_icache(const void* addr, unsigned long size, 39invalidate_icache(const void* addr, unsigned long size,
diff --git a/arch/tile/include/arch/interrupts_64.h b/arch/tile/include/arch/interrupts_64.h
new file mode 100644
index 000000000000..5bb58b2e4e6f
--- /dev/null
+++ b/arch/tile/include/arch/interrupts_64.h
@@ -0,0 +1,276 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef __ARCH_INTERRUPTS_H__
16#define __ARCH_INTERRUPTS_H__
17
18/** Mask for an interrupt. */
19#ifdef __ASSEMBLER__
20/* Note: must handle breaking interrupts into high and low words manually. */
21#define INT_MASK(intno) (1 << (intno))
22#else
23#define INT_MASK(intno) (1ULL << (intno))
24#endif
25
26
27/** Where a given interrupt executes */
28#define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8))
29
30/** Where to store a vector for a given interrupt. */
31#define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0)
32
33/** The base address of user-level interrupts. */
34#define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0)
35
36
37/** Additional synthetic interrupt. */
38#define INT_BREAKPOINT (63)
39
40#define INT_MEM_ERROR 0
41#define INT_SINGLE_STEP_3 1
42#define INT_SINGLE_STEP_2 2
43#define INT_SINGLE_STEP_1 3
44#define INT_SINGLE_STEP_0 4
45#define INT_IDN_COMPLETE 5
46#define INT_UDN_COMPLETE 6
47#define INT_ITLB_MISS 7
48#define INT_ILL 8
49#define INT_GPV 9
50#define INT_IDN_ACCESS 10
51#define INT_UDN_ACCESS 11
52#define INT_SWINT_3 12
53#define INT_SWINT_2 13
54#define INT_SWINT_1 14
55#define INT_SWINT_0 15
56#define INT_ILL_TRANS 16
57#define INT_UNALIGN_DATA 17
58#define INT_DTLB_MISS 18
59#define INT_DTLB_ACCESS 19
60#define INT_IDN_FIREWALL 20
61#define INT_UDN_FIREWALL 21
62#define INT_TILE_TIMER 22
63#define INT_AUX_TILE_TIMER 23
64#define INT_IDN_TIMER 24
65#define INT_UDN_TIMER 25
66#define INT_IDN_AVAIL 26
67#define INT_UDN_AVAIL 27
68#define INT_IPI_3 28
69#define INT_IPI_2 29
70#define INT_IPI_1 30
71#define INT_IPI_0 31
72#define INT_PERF_COUNT 32
73#define INT_AUX_PERF_COUNT 33
74#define INT_INTCTRL_3 34
75#define INT_INTCTRL_2 35
76#define INT_INTCTRL_1 36
77#define INT_INTCTRL_0 37
78#define INT_BOOT_ACCESS 38
79#define INT_WORLD_ACCESS 39
80#define INT_I_ASID 40
81#define INT_D_ASID 41
82#define INT_DOUBLE_FAULT 42
83
84#define NUM_INTERRUPTS 43
85
86#ifndef __ASSEMBLER__
87#define QUEUED_INTERRUPTS ( \
88 INT_MASK(INT_MEM_ERROR) | \
89 INT_MASK(INT_IDN_COMPLETE) | \
90 INT_MASK(INT_UDN_COMPLETE) | \
91 INT_MASK(INT_IDN_FIREWALL) | \
92 INT_MASK(INT_UDN_FIREWALL) | \
93 INT_MASK(INT_TILE_TIMER) | \
94 INT_MASK(INT_AUX_TILE_TIMER) | \
95 INT_MASK(INT_IDN_TIMER) | \
96 INT_MASK(INT_UDN_TIMER) | \
97 INT_MASK(INT_IDN_AVAIL) | \
98 INT_MASK(INT_UDN_AVAIL) | \
99 INT_MASK(INT_IPI_3) | \
100 INT_MASK(INT_IPI_2) | \
101 INT_MASK(INT_IPI_1) | \
102 INT_MASK(INT_IPI_0) | \
103 INT_MASK(INT_PERF_COUNT) | \
104 INT_MASK(INT_AUX_PERF_COUNT) | \
105 INT_MASK(INT_INTCTRL_3) | \
106 INT_MASK(INT_INTCTRL_2) | \
107 INT_MASK(INT_INTCTRL_1) | \
108 INT_MASK(INT_INTCTRL_0) | \
109 INT_MASK(INT_BOOT_ACCESS) | \
110 INT_MASK(INT_WORLD_ACCESS) | \
111 INT_MASK(INT_I_ASID) | \
112 INT_MASK(INT_D_ASID) | \
113 INT_MASK(INT_DOUBLE_FAULT) | \
114 0)
115#define NONQUEUED_INTERRUPTS ( \
116 INT_MASK(INT_SINGLE_STEP_3) | \
117 INT_MASK(INT_SINGLE_STEP_2) | \
118 INT_MASK(INT_SINGLE_STEP_1) | \
119 INT_MASK(INT_SINGLE_STEP_0) | \
120 INT_MASK(INT_ITLB_MISS) | \
121 INT_MASK(INT_ILL) | \
122 INT_MASK(INT_GPV) | \
123 INT_MASK(INT_IDN_ACCESS) | \
124 INT_MASK(INT_UDN_ACCESS) | \
125 INT_MASK(INT_SWINT_3) | \
126 INT_MASK(INT_SWINT_2) | \
127 INT_MASK(INT_SWINT_1) | \
128 INT_MASK(INT_SWINT_0) | \
129 INT_MASK(INT_ILL_TRANS) | \
130 INT_MASK(INT_UNALIGN_DATA) | \
131 INT_MASK(INT_DTLB_MISS) | \
132 INT_MASK(INT_DTLB_ACCESS) | \
133 0)
134#define CRITICAL_MASKED_INTERRUPTS ( \
135 INT_MASK(INT_MEM_ERROR) | \
136 INT_MASK(INT_SINGLE_STEP_3) | \
137 INT_MASK(INT_SINGLE_STEP_2) | \
138 INT_MASK(INT_SINGLE_STEP_1) | \
139 INT_MASK(INT_SINGLE_STEP_0) | \
140 INT_MASK(INT_IDN_COMPLETE) | \
141 INT_MASK(INT_UDN_COMPLETE) | \
142 INT_MASK(INT_IDN_FIREWALL) | \
143 INT_MASK(INT_UDN_FIREWALL) | \
144 INT_MASK(INT_TILE_TIMER) | \
145 INT_MASK(INT_AUX_TILE_TIMER) | \
146 INT_MASK(INT_IDN_TIMER) | \
147 INT_MASK(INT_UDN_TIMER) | \
148 INT_MASK(INT_IDN_AVAIL) | \
149 INT_MASK(INT_UDN_AVAIL) | \
150 INT_MASK(INT_IPI_3) | \
151 INT_MASK(INT_IPI_2) | \
152 INT_MASK(INT_IPI_1) | \
153 INT_MASK(INT_IPI_0) | \
154 INT_MASK(INT_PERF_COUNT) | \
155 INT_MASK(INT_AUX_PERF_COUNT) | \
156 INT_MASK(INT_INTCTRL_3) | \
157 INT_MASK(INT_INTCTRL_2) | \
158 INT_MASK(INT_INTCTRL_1) | \
159 INT_MASK(INT_INTCTRL_0) | \
160 0)
161#define CRITICAL_UNMASKED_INTERRUPTS ( \
162 INT_MASK(INT_ITLB_MISS) | \
163 INT_MASK(INT_ILL) | \
164 INT_MASK(INT_GPV) | \
165 INT_MASK(INT_IDN_ACCESS) | \
166 INT_MASK(INT_UDN_ACCESS) | \
167 INT_MASK(INT_SWINT_3) | \
168 INT_MASK(INT_SWINT_2) | \
169 INT_MASK(INT_SWINT_1) | \
170 INT_MASK(INT_SWINT_0) | \
171 INT_MASK(INT_ILL_TRANS) | \
172 INT_MASK(INT_UNALIGN_DATA) | \
173 INT_MASK(INT_DTLB_MISS) | \
174 INT_MASK(INT_DTLB_ACCESS) | \
175 INT_MASK(INT_BOOT_ACCESS) | \
176 INT_MASK(INT_WORLD_ACCESS) | \
177 INT_MASK(INT_I_ASID) | \
178 INT_MASK(INT_D_ASID) | \
179 INT_MASK(INT_DOUBLE_FAULT) | \
180 0)
181#define MASKABLE_INTERRUPTS ( \
182 INT_MASK(INT_MEM_ERROR) | \
183 INT_MASK(INT_SINGLE_STEP_3) | \
184 INT_MASK(INT_SINGLE_STEP_2) | \
185 INT_MASK(INT_SINGLE_STEP_1) | \
186 INT_MASK(INT_SINGLE_STEP_0) | \
187 INT_MASK(INT_IDN_COMPLETE) | \
188 INT_MASK(INT_UDN_COMPLETE) | \
189 INT_MASK(INT_IDN_FIREWALL) | \
190 INT_MASK(INT_UDN_FIREWALL) | \
191 INT_MASK(INT_TILE_TIMER) | \
192 INT_MASK(INT_AUX_TILE_TIMER) | \
193 INT_MASK(INT_IDN_TIMER) | \
194 INT_MASK(INT_UDN_TIMER) | \
195 INT_MASK(INT_IDN_AVAIL) | \
196 INT_MASK(INT_UDN_AVAIL) | \
197 INT_MASK(INT_IPI_3) | \
198 INT_MASK(INT_IPI_2) | \
199 INT_MASK(INT_IPI_1) | \
200 INT_MASK(INT_IPI_0) | \
201 INT_MASK(INT_PERF_COUNT) | \
202 INT_MASK(INT_AUX_PERF_COUNT) | \
203 INT_MASK(INT_INTCTRL_3) | \
204 INT_MASK(INT_INTCTRL_2) | \
205 INT_MASK(INT_INTCTRL_1) | \
206 INT_MASK(INT_INTCTRL_0) | \
207 0)
208#define UNMASKABLE_INTERRUPTS ( \
209 INT_MASK(INT_ITLB_MISS) | \
210 INT_MASK(INT_ILL) | \
211 INT_MASK(INT_GPV) | \
212 INT_MASK(INT_IDN_ACCESS) | \
213 INT_MASK(INT_UDN_ACCESS) | \
214 INT_MASK(INT_SWINT_3) | \
215 INT_MASK(INT_SWINT_2) | \
216 INT_MASK(INT_SWINT_1) | \
217 INT_MASK(INT_SWINT_0) | \
218 INT_MASK(INT_ILL_TRANS) | \
219 INT_MASK(INT_UNALIGN_DATA) | \
220 INT_MASK(INT_DTLB_MISS) | \
221 INT_MASK(INT_DTLB_ACCESS) | \
222 INT_MASK(INT_BOOT_ACCESS) | \
223 INT_MASK(INT_WORLD_ACCESS) | \
224 INT_MASK(INT_I_ASID) | \
225 INT_MASK(INT_D_ASID) | \
226 INT_MASK(INT_DOUBLE_FAULT) | \
227 0)
228#define SYNC_INTERRUPTS ( \
229 INT_MASK(INT_SINGLE_STEP_3) | \
230 INT_MASK(INT_SINGLE_STEP_2) | \
231 INT_MASK(INT_SINGLE_STEP_1) | \
232 INT_MASK(INT_SINGLE_STEP_0) | \
233 INT_MASK(INT_IDN_COMPLETE) | \
234 INT_MASK(INT_UDN_COMPLETE) | \
235 INT_MASK(INT_ITLB_MISS) | \
236 INT_MASK(INT_ILL) | \
237 INT_MASK(INT_GPV) | \
238 INT_MASK(INT_IDN_ACCESS) | \
239 INT_MASK(INT_UDN_ACCESS) | \
240 INT_MASK(INT_SWINT_3) | \
241 INT_MASK(INT_SWINT_2) | \
242 INT_MASK(INT_SWINT_1) | \
243 INT_MASK(INT_SWINT_0) | \
244 INT_MASK(INT_ILL_TRANS) | \
245 INT_MASK(INT_UNALIGN_DATA) | \
246 INT_MASK(INT_DTLB_MISS) | \
247 INT_MASK(INT_DTLB_ACCESS) | \
248 0)
249#define NON_SYNC_INTERRUPTS ( \
250 INT_MASK(INT_MEM_ERROR) | \
251 INT_MASK(INT_IDN_FIREWALL) | \
252 INT_MASK(INT_UDN_FIREWALL) | \
253 INT_MASK(INT_TILE_TIMER) | \
254 INT_MASK(INT_AUX_TILE_TIMER) | \
255 INT_MASK(INT_IDN_TIMER) | \
256 INT_MASK(INT_UDN_TIMER) | \
257 INT_MASK(INT_IDN_AVAIL) | \
258 INT_MASK(INT_UDN_AVAIL) | \
259 INT_MASK(INT_IPI_3) | \
260 INT_MASK(INT_IPI_2) | \
261 INT_MASK(INT_IPI_1) | \
262 INT_MASK(INT_IPI_0) | \
263 INT_MASK(INT_PERF_COUNT) | \
264 INT_MASK(INT_AUX_PERF_COUNT) | \
265 INT_MASK(INT_INTCTRL_3) | \
266 INT_MASK(INT_INTCTRL_2) | \
267 INT_MASK(INT_INTCTRL_1) | \
268 INT_MASK(INT_INTCTRL_0) | \
269 INT_MASK(INT_BOOT_ACCESS) | \
270 INT_MASK(INT_WORLD_ACCESS) | \
271 INT_MASK(INT_I_ASID) | \
272 INT_MASK(INT_D_ASID) | \
273 INT_MASK(INT_DOUBLE_FAULT) | \
274 0)
275#endif /* !__ASSEMBLER__ */
276#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index 442fcba0d122..f548efeb2de3 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -12,6 +12,15 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15/* Include the proper base SPR definition file. */
16#ifdef __tilegx__
17#include <arch/spr_def_64.h>
18#else
19#include <arch/spr_def_32.h>
20#endif
21
22#ifdef __KERNEL__
23
15/* 24/*
16 * In addition to including the proper base SPR definition file, depending 25 * In addition to including the proper base SPR definition file, depending
17 * on machine architecture, this file defines several macros which allow 26 * on machine architecture, this file defines several macros which allow
@@ -29,7 +38,6 @@
29#define _concat4(a, b, c, d) __concat4(a, b, c, d) 38#define _concat4(a, b, c, d) __concat4(a, b, c, d)
30 39
31#ifdef __tilegx__ 40#ifdef __tilegx__
32#include <arch/spr_def_64.h>
33 41
34/* TILE-Gx dependent, protection-level dependent SPRs. */ 42/* TILE-Gx dependent, protection-level dependent SPRs. */
35 43
@@ -65,7 +73,6 @@
65 _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,) 73 _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
66 74
67#else 75#else
68#include <arch/spr_def_32.h>
69 76
70/* TILEPro dependent, protection-level dependent SPRs. */ 77/* TILEPro dependent, protection-level dependent SPRs. */
71 78
@@ -102,3 +109,5 @@
102 _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,) 109 _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
103#define INT_INTCTRL_K \ 110#define INT_INTCTRL_K \
104 _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,) 111 _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
112
113#endif /* __KERNEL__ */
diff --git a/arch/tile/include/arch/spr_def_64.h b/arch/tile/include/arch/spr_def_64.h
new file mode 100644
index 000000000000..cd3e5f95d5fd
--- /dev/null
+++ b/arch/tile/include/arch/spr_def_64.h
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef __DOXYGEN__
16
17#ifndef __ARCH_SPR_DEF_H__
18#define __ARCH_SPR_DEF_H__
19
20#define SPR_AUX_PERF_COUNT_0 0x2105
21#define SPR_AUX_PERF_COUNT_1 0x2106
22#define SPR_AUX_PERF_COUNT_CTL 0x2107
23#define SPR_AUX_PERF_COUNT_STS 0x2108
24#define SPR_CMPEXCH_VALUE 0x2780
25#define SPR_CYCLE 0x2781
26#define SPR_DONE 0x2705
27#define SPR_DSTREAM_PF 0x2706
28#define SPR_EVENT_BEGIN 0x2782
29#define SPR_EVENT_END 0x2783
30#define SPR_EX_CONTEXT_0_0 0x2580
31#define SPR_EX_CONTEXT_0_1 0x2581
32#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0
33#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3
34#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3
35#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2
36#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1
37#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4
38#define SPR_EX_CONTEXT_1_0 0x2480
39#define SPR_EX_CONTEXT_1_1 0x2481
40#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0
41#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3
42#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3
43#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
44#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
45#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
46#define SPR_EX_CONTEXT_2_0 0x2380
47#define SPR_EX_CONTEXT_2_1 0x2381
48#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
49#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
50#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
51#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
52#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
53#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
54#define SPR_FAIL 0x2707
55#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
56#define SPR_INTCTRL_0_STATUS 0x2505
57#define SPR_INTCTRL_1_STATUS 0x2405
58#define SPR_INTCTRL_2_STATUS 0x2305
59#define SPR_INTERRUPT_CRITICAL_SECTION 0x2708
60#define SPR_INTERRUPT_MASK_0 0x2506
61#define SPR_INTERRUPT_MASK_1 0x2406
62#define SPR_INTERRUPT_MASK_2 0x2306
63#define SPR_INTERRUPT_MASK_RESET_0 0x2507
64#define SPR_INTERRUPT_MASK_RESET_1 0x2407
65#define SPR_INTERRUPT_MASK_RESET_2 0x2307
66#define SPR_INTERRUPT_MASK_SET_0 0x2508
67#define SPR_INTERRUPT_MASK_SET_1 0x2408
68#define SPR_INTERRUPT_MASK_SET_2 0x2308
69#define SPR_INTERRUPT_VECTOR_BASE_0 0x2509
70#define SPR_INTERRUPT_VECTOR_BASE_1 0x2409
71#define SPR_INTERRUPT_VECTOR_BASE_2 0x2309
72#define SPR_INTERRUPT_VECTOR_BASE_3 0x2209
73#define SPR_IPI_EVENT_0 0x1f05
74#define SPR_IPI_EVENT_1 0x1e05
75#define SPR_IPI_EVENT_2 0x1d05
76#define SPR_IPI_EVENT_RESET_0 0x1f06
77#define SPR_IPI_EVENT_RESET_1 0x1e06
78#define SPR_IPI_EVENT_RESET_2 0x1d06
79#define SPR_IPI_EVENT_SET_0 0x1f07
80#define SPR_IPI_EVENT_SET_1 0x1e07
81#define SPR_IPI_EVENT_SET_2 0x1d07
82#define SPR_IPI_MASK_0 0x1f08
83#define SPR_IPI_MASK_1 0x1e08
84#define SPR_IPI_MASK_2 0x1d08
85#define SPR_IPI_MASK_RESET_0 0x1f09
86#define SPR_IPI_MASK_RESET_1 0x1e09
87#define SPR_IPI_MASK_RESET_2 0x1d09
88#define SPR_IPI_MASK_SET_0 0x1f0a
89#define SPR_IPI_MASK_SET_1 0x1e0a
90#define SPR_IPI_MASK_SET_2 0x1d0a
91#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
92#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
93#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
94#define SPR_MPL_INTCTRL_0_SET_0 0x2500
95#define SPR_MPL_INTCTRL_0_SET_1 0x2501
96#define SPR_MPL_INTCTRL_0_SET_2 0x2502
97#define SPR_MPL_INTCTRL_1_SET_0 0x2400
98#define SPR_MPL_INTCTRL_1_SET_1 0x2401
99#define SPR_MPL_INTCTRL_1_SET_2 0x2402
100#define SPR_MPL_INTCTRL_2_SET_0 0x2300
101#define SPR_MPL_INTCTRL_2_SET_1 0x2301
102#define SPR_MPL_INTCTRL_2_SET_2 0x2302
103#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
104#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
105#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
106#define SPR_MPL_UDN_AVAIL_SET_0 0x1b00
107#define SPR_MPL_UDN_AVAIL_SET_1 0x1b01
108#define SPR_MPL_UDN_AVAIL_SET_2 0x1b02
109#define SPR_MPL_UDN_COMPLETE_SET_0 0x0600
110#define SPR_MPL_UDN_COMPLETE_SET_1 0x0601
111#define SPR_MPL_UDN_COMPLETE_SET_2 0x0602
112#define SPR_MPL_UDN_FIREWALL_SET_0 0x1500
113#define SPR_MPL_UDN_FIREWALL_SET_1 0x1501
114#define SPR_MPL_UDN_FIREWALL_SET_2 0x1502
115#define SPR_MPL_UDN_TIMER_SET_0 0x1900
116#define SPR_MPL_UDN_TIMER_SET_1 0x1901
117#define SPR_MPL_UDN_TIMER_SET_2 0x1902
118#define SPR_MPL_WORLD_ACCESS_SET_0 0x2700
119#define SPR_MPL_WORLD_ACCESS_SET_1 0x2701
120#define SPR_MPL_WORLD_ACCESS_SET_2 0x2702
121#define SPR_PASS 0x2709
122#define SPR_PERF_COUNT_0 0x2005
123#define SPR_PERF_COUNT_1 0x2006
124#define SPR_PERF_COUNT_CTL 0x2007
125#define SPR_PERF_COUNT_DN_CTL 0x2008
126#define SPR_PERF_COUNT_STS 0x2009
127#define SPR_PROC_STATUS 0x2784
128#define SPR_SIM_CONTROL 0x2785
129#define SPR_SINGLE_STEP_CONTROL_0 0x0405
130#define SPR_SINGLE_STEP_CONTROL_0__CANCELED_MASK 0x1
131#define SPR_SINGLE_STEP_CONTROL_0__INHIBIT_MASK 0x2
132#define SPR_SINGLE_STEP_CONTROL_1 0x0305
133#define SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK 0x1
134#define SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK 0x2
135#define SPR_SINGLE_STEP_CONTROL_2 0x0205
136#define SPR_SINGLE_STEP_CONTROL_2__CANCELED_MASK 0x1
137#define SPR_SINGLE_STEP_CONTROL_2__INHIBIT_MASK 0x2
138#define SPR_SINGLE_STEP_EN_0_0 0x250a
139#define SPR_SINGLE_STEP_EN_0_1 0x240a
140#define SPR_SINGLE_STEP_EN_0_2 0x230a
141#define SPR_SINGLE_STEP_EN_1_0 0x250b
142#define SPR_SINGLE_STEP_EN_1_1 0x240b
143#define SPR_SINGLE_STEP_EN_1_2 0x230b
144#define SPR_SINGLE_STEP_EN_2_0 0x250c
145#define SPR_SINGLE_STEP_EN_2_1 0x240c
146#define SPR_SINGLE_STEP_EN_2_2 0x230c
147#define SPR_SYSTEM_SAVE_0_0 0x2582
148#define SPR_SYSTEM_SAVE_0_1 0x2583
149#define SPR_SYSTEM_SAVE_0_2 0x2584
150#define SPR_SYSTEM_SAVE_0_3 0x2585
151#define SPR_SYSTEM_SAVE_1_0 0x2482
152#define SPR_SYSTEM_SAVE_1_1 0x2483
153#define SPR_SYSTEM_SAVE_1_2 0x2484
154#define SPR_SYSTEM_SAVE_1_3 0x2485
155#define SPR_SYSTEM_SAVE_2_0 0x2382
156#define SPR_SYSTEM_SAVE_2_1 0x2383
157#define SPR_SYSTEM_SAVE_2_2 0x2384
158#define SPR_SYSTEM_SAVE_2_3 0x2385
159#define SPR_TILE_COORD 0x270b
160#define SPR_TILE_RTF_HWM 0x270c
161#define SPR_TILE_TIMER_CONTROL 0x1605
162#define SPR_UDN_AVAIL_EN 0x1b05
163#define SPR_UDN_DATA_AVAIL 0x0b80
164#define SPR_UDN_DEADLOCK_TIMEOUT 0x1906
165#define SPR_UDN_DEMUX_COUNT_0 0x0b05
166#define SPR_UDN_DEMUX_COUNT_1 0x0b06
167#define SPR_UDN_DEMUX_COUNT_2 0x0b07
168#define SPR_UDN_DEMUX_COUNT_3 0x0b08
169#define SPR_UDN_DIRECTION_PROTECT 0x1505
170
171#endif /* !defined(__ARCH_SPR_DEF_H__) */
172
173#endif /* !defined(__DOXYGEN__) */
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index 75a16028a952..739cfe0499d1 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -130,17 +130,52 @@ static inline int atomic_read(const atomic_t *v)
130 */ 130 */
131#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 131#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
132 132
133
134/*
135 * We define xchg() and cmpxchg() in the included headers.
136 * Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply
137 * that cmpxchg() is an efficient operation, which is not particularly true.
138 */
139
140/* Nonexistent functions intended to cause link errors. */ 133/* Nonexistent functions intended to cause link errors. */
141extern unsigned long __xchg_called_with_bad_pointer(void); 134extern unsigned long __xchg_called_with_bad_pointer(void);
142extern unsigned long __cmpxchg_called_with_bad_pointer(void); 135extern unsigned long __cmpxchg_called_with_bad_pointer(void);
143 136
137#define xchg(ptr, x) \
138 ({ \
139 typeof(*(ptr)) __x; \
140 switch (sizeof(*(ptr))) { \
141 case 4: \
142 __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
143 (atomic_t *)(ptr), \
144 (u32)(typeof((x)-(x)))(x)); \
145 break; \
146 case 8: \
147 __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
148 (atomic64_t *)(ptr), \
149 (u64)(typeof((x)-(x)))(x)); \
150 break; \
151 default: \
152 __xchg_called_with_bad_pointer(); \
153 } \
154 __x; \
155 })
156
157#define cmpxchg(ptr, o, n) \
158 ({ \
159 typeof(*(ptr)) __x; \
160 switch (sizeof(*(ptr))) { \
161 case 4: \
162 __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
163 (atomic_t *)(ptr), \
164 (u32)(typeof((o)-(o)))(o), \
165 (u32)(typeof((n)-(n)))(n)); \
166 break; \
167 case 8: \
168 __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
169 (atomic64_t *)(ptr), \
170 (u64)(typeof((o)-(o)))(o), \
171 (u64)(typeof((n)-(n)))(n)); \
172 break; \
173 default: \
174 __cmpxchg_called_with_bad_pointer(); \
175 } \
176 __x; \
177 })
178
144#define tas(ptr) (xchg((ptr), 1)) 179#define tas(ptr) (xchg((ptr), 1))
145 180
146#endif /* __ASSEMBLY__ */ 181#endif /* __ASSEMBLY__ */
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index ed359aee8837..92a8bee32311 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -110,16 +110,6 @@ static inline void atomic_set(atomic_t *v, int n)
110 _atomic_xchg(v, n); 110 _atomic_xchg(v, n);
111} 111}
112 112
113#define xchg(ptr, x) ((typeof(*(ptr))) \
114 ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
115 atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
116 __xchg_called_with_bad_pointer()))
117
118#define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \
119 ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
120 atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
121 __cmpxchg_called_with_bad_pointer()))
122
123/* A 64bit atomic type */ 113/* A 64bit atomic type */
124 114
125typedef struct { 115typedef struct {
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
new file mode 100644
index 000000000000..1c1e60d8ccb6
--- /dev/null
+++ b/arch/tile/include/asm/atomic_64.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Do not include directly; use <asm/atomic.h>.
15 */
16
17#ifndef _ASM_TILE_ATOMIC_64_H
18#define _ASM_TILE_ATOMIC_64_H
19
20#ifndef __ASSEMBLY__
21
22#include <arch/spr_def.h>
23
24/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
25
26#define atomic_set(v, i) ((v)->counter = (i))
27
28/*
29 * The smp_mb() operations throughout are to support the fact that
30 * Linux requires memory barriers before and after the operation,
31 * on any routine which updates memory and returns a value.
32 */
33
34static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
35{
36 int val;
37 __insn_mtspr(SPR_CMPEXCH_VALUE, o);
38 smp_mb(); /* barrier for proper semantics */
39 val = __insn_cmpexch4((void *)&v->counter, n);
40 smp_mb(); /* barrier for proper semantics */
41 return val;
42}
43
44static inline int atomic_xchg(atomic_t *v, int n)
45{
46 int val;
47 smp_mb(); /* barrier for proper semantics */
48 val = __insn_exch4((void *)&v->counter, n);
49 smp_mb(); /* barrier for proper semantics */
50 return val;
51}
52
53static inline void atomic_add(int i, atomic_t *v)
54{
55 __insn_fetchadd4((void *)&v->counter, i);
56}
57
58static inline int atomic_add_return(int i, atomic_t *v)
59{
60 int val;
61 smp_mb(); /* barrier for proper semantics */
62 val = __insn_fetchadd4((void *)&v->counter, i) + i;
63 barrier(); /* the "+ i" above will wait on memory */
64 return val;
65}
66
67static inline int atomic_add_unless(atomic_t *v, int a, int u)
68{
69 int guess, oldval = v->counter;
70 do {
71 if (oldval == u)
72 break;
73 guess = oldval;
74 oldval = atomic_cmpxchg(v, guess, guess + a);
75 } while (guess != oldval);
76 return oldval != u;
77}
78
79/* Now the true 64-bit operations. */
80
81#define ATOMIC64_INIT(i) { (i) }
82
83#define atomic64_read(v) ((v)->counter)
84#define atomic64_set(v, i) ((v)->counter = (i))
85
86static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
87{
88 long val;
89 smp_mb(); /* barrier for proper semantics */
90 __insn_mtspr(SPR_CMPEXCH_VALUE, o);
91 val = __insn_cmpexch((void *)&v->counter, n);
92 smp_mb(); /* barrier for proper semantics */
93 return val;
94}
95
96static inline long atomic64_xchg(atomic64_t *v, long n)
97{
98 long val;
99 smp_mb(); /* barrier for proper semantics */
100 val = __insn_exch((void *)&v->counter, n);
101 smp_mb(); /* barrier for proper semantics */
102 return val;
103}
104
105static inline void atomic64_add(long i, atomic64_t *v)
106{
107 __insn_fetchadd((void *)&v->counter, i);
108}
109
110static inline long atomic64_add_return(long i, atomic64_t *v)
111{
112 int val;
113 smp_mb(); /* barrier for proper semantics */
114 val = __insn_fetchadd((void *)&v->counter, i) + i;
115 barrier(); /* the "+ i" above will wait on memory */
116 return val;
117}
118
119static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
120{
121 long guess, oldval = v->counter;
122 do {
123 if (oldval == u)
124 break;
125 guess = oldval;
126 oldval = atomic64_cmpxchg(v, guess, guess + a);
127 } while (guess != oldval);
128 return oldval != u;
129}
130
131#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
132#define atomic64_sub(i, v) atomic64_add(-(i), (v))
133#define atomic64_inc_return(v) atomic64_add_return(1, (v))
134#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
135#define atomic64_inc(v) atomic64_add(1, (v))
136#define atomic64_dec(v) atomic64_sub(1, (v))
137
138#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
139#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
140#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
141#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
142
143#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
144
145/* Atomic dec and inc don't implement barrier, so provide them if needed. */
146#define smp_mb__before_atomic_dec() smp_mb()
147#define smp_mb__after_atomic_dec() smp_mb()
148#define smp_mb__before_atomic_inc() smp_mb()
149#define smp_mb__after_atomic_inc() smp_mb()
150
151/* Define this to indicate that cmpxchg is an efficient operation. */
152#define __HAVE_ARCH_CMPXCHG
153
154#endif /* !__ASSEMBLY__ */
155
156#endif /* _ASM_TILE_ATOMIC_64_H */
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h
index f18887d82399..bd5399a69edf 100644
--- a/arch/tile/include/asm/backtrace.h
+++ b/arch/tile/include/asm/backtrace.h
@@ -12,80 +12,41 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15#ifndef _TILE_BACKTRACE_H 15#ifndef _ASM_TILE_BACKTRACE_H
16#define _TILE_BACKTRACE_H 16#define _ASM_TILE_BACKTRACE_H
17
18
19 17
20#include <linux/types.h> 18#include <linux/types.h>
21 19
22#include <arch/chip.h> 20/* Reads 'size' bytes from 'address' and writes the data to 'result'.
23
24#if defined(__tile__)
25typedef unsigned long VirtualAddress;
26#elif CHIP_VA_WIDTH() > 32
27typedef unsigned long long VirtualAddress;
28#else
29typedef unsigned int VirtualAddress;
30#endif
31
32
33/** Reads 'size' bytes from 'address' and writes the data to 'result'.
34 * Returns true if successful, else false (e.g. memory not readable). 21 * Returns true if successful, else false (e.g. memory not readable).
35 */ 22 */
36typedef bool (*BacktraceMemoryReader)(void *result, 23typedef bool (*BacktraceMemoryReader)(void *result,
37 VirtualAddress address, 24 unsigned long address,
38 unsigned int size, 25 unsigned int size,
39 void *extra); 26 void *extra);
40 27
41typedef struct { 28typedef struct {
42 /** Current PC. */ 29 /* Current PC. */
43 VirtualAddress pc; 30 unsigned long pc;
44 31
45 /** Current stack pointer value. */ 32 /* Current stack pointer value. */
46 VirtualAddress sp; 33 unsigned long sp;
47 34
48 /** Current frame pointer value (i.e. caller's stack pointer) */ 35 /* Current frame pointer value (i.e. caller's stack pointer) */
49 VirtualAddress fp; 36 unsigned long fp;
50 37
51 /** Internal use only: caller's PC for first frame. */ 38 /* Internal use only: caller's PC for first frame. */
52 VirtualAddress initial_frame_caller_pc; 39 unsigned long initial_frame_caller_pc;
53 40
54 /** Internal use only: callback to read memory. */ 41 /* Internal use only: callback to read memory. */
55 BacktraceMemoryReader read_memory_func; 42 BacktraceMemoryReader read_memory_func;
56 43
57 /** Internal use only: arbitrary argument to read_memory_func. */ 44 /* Internal use only: arbitrary argument to read_memory_func. */
58 void *read_memory_func_extra; 45 void *read_memory_func_extra;
59 46
60} BacktraceIterator; 47} BacktraceIterator;
61 48
62 49
63/** Initializes a backtracer to start from the given location.
64 *
65 * If the frame pointer cannot be determined it is set to -1.
66 *
67 * @param state The state to be filled in.
68 * @param read_memory_func A callback that reads memory. If NULL, a default
69 * value is provided.
70 * @param read_memory_func_extra An arbitrary argument to read_memory_func.
71 * @param pc The current PC.
72 * @param lr The current value of the 'lr' register.
73 * @param sp The current value of the 'sp' register.
74 * @param r52 The current value of the 'r52' register.
75 */
76extern void backtrace_init(BacktraceIterator *state,
77 BacktraceMemoryReader read_memory_func,
78 void *read_memory_func_extra,
79 VirtualAddress pc, VirtualAddress lr,
80 VirtualAddress sp, VirtualAddress r52);
81
82
83/** Advances the backtracing state to the calling frame, returning
84 * true iff successful.
85 */
86extern bool backtrace_next(BacktraceIterator *state);
87
88
89typedef enum { 50typedef enum {
90 51
91 /* We have no idea what the caller's pc is. */ 52 /* We have no idea what the caller's pc is. */
@@ -138,7 +99,7 @@ enum {
138}; 99};
139 100
140 101
141/** Internal constants used to define 'info' operands. */ 102/* Internal constants used to define 'info' operands. */
142enum { 103enum {
143 /* 0 and 1 are reserved, as are all negative numbers. */ 104 /* 0 and 1 are reserved, as are all negative numbers. */
144 105
@@ -147,13 +108,10 @@ enum {
147 CALLER_SP_IN_R52_BASE = 4, 108 CALLER_SP_IN_R52_BASE = 4,
148 109
149 CALLER_SP_OFFSET_BASE = 8, 110 CALLER_SP_OFFSET_BASE = 8,
150
151 /* Marks the entry point of certain functions. */
152 ENTRY_POINT_INFO_OP = 16
153}; 111};
154 112
155 113
156/** Current backtracer state describing where it thinks the caller is. */ 114/* Current backtracer state describing where it thinks the caller is. */
157typedef struct { 115typedef struct {
158 /* 116 /*
159 * Public fields 117 * Public fields
@@ -192,7 +150,13 @@ typedef struct {
192 150
193} CallerLocation; 151} CallerLocation;
194 152
153extern void backtrace_init(BacktraceIterator *state,
154 BacktraceMemoryReader read_memory_func,
155 void *read_memory_func_extra,
156 unsigned long pc, unsigned long lr,
157 unsigned long sp, unsigned long r52);
195 158
196 159
160extern bool backtrace_next(BacktraceIterator *state);
197 161
198#endif /* _TILE_BACKTRACE_H */ 162#endif /* _ASM_TILE_BACKTRACE_H */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 132e6bbd07e9..16f1fa51fea1 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -122,6 +122,7 @@ static inline unsigned long __arch_hweight64(__u64 w)
122#include <asm-generic/bitops/lock.h> 122#include <asm-generic/bitops/lock.h>
123#include <asm-generic/bitops/find.h> 123#include <asm-generic/bitops/find.h>
124#include <asm-generic/bitops/sched.h> 124#include <asm-generic/bitops/sched.h>
125#include <asm-generic/bitops/non-atomic.h>
125#include <asm-generic/bitops/le.h> 126#include <asm-generic/bitops/le.h>
126 127
127#endif /* _ASM_TILE_BITOPS_H */ 128#endif /* _ASM_TILE_BITOPS_H */
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index 2638be51a164..d31ab905cfa7 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -126,7 +126,6 @@ static inline int test_and_change_bit(unsigned nr,
126#define smp_mb__before_clear_bit() smp_mb() 126#define smp_mb__before_clear_bit() smp_mb()
127#define smp_mb__after_clear_bit() do {} while (0) 127#define smp_mb__after_clear_bit() do {} while (0)
128 128
129#include <asm-generic/bitops/non-atomic.h>
130#include <asm-generic/bitops/ext2-atomic.h> 129#include <asm-generic/bitops/ext2-atomic.h>
131 130
132#endif /* _ASM_TILE_BITOPS_32_H */ 131#endif /* _ASM_TILE_BITOPS_32_H */
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
new file mode 100644
index 000000000000..99615e8d2d8b
--- /dev/null
+++ b/arch/tile/include/asm/bitops_64.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_BITOPS_64_H
16#define _ASM_TILE_BITOPS_64_H
17
18#include <linux/compiler.h>
19#include <asm/atomic.h>
20#include <asm/system.h>
21
22/* See <asm/bitops.h> for API comments. */
23
24static inline void set_bit(unsigned nr, volatile unsigned long *addr)
25{
26 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
27 __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
28}
29
30static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
31{
32 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
33 __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
34}
35
36#define smp_mb__before_clear_bit() smp_mb()
37#define smp_mb__after_clear_bit() smp_mb()
38
39
40static inline void change_bit(unsigned nr, volatile unsigned long *addr)
41{
42 unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
43 long guess, oldval;
44 addr += nr / BITS_PER_LONG;
45 old = *addr;
46 do {
47 guess = oldval;
48 oldval = atomic64_cmpxchg((atomic64_t *)addr,
49 guess, guess ^ mask);
50 } while (guess != oldval);
51}
52
53
54/*
55 * The test_and_xxx_bit() routines require a memory fence before we
56 * start the operation, and after the operation completes. We use
57 * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
58 * barrier(), to block until the atomic op is complete.
59 */
60
61static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
62{
63 int val;
64 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
65 smp_mb(); /* barrier for proper semantics */
66 val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
67 & mask) != 0;
68 barrier();
69 return val;
70}
71
72
73static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
74{
75 int val;
76 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
77 smp_mb(); /* barrier for proper semantics */
78 val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
79 & mask) != 0;
80 barrier();
81 return val;
82}
83
84
85static inline int test_and_change_bit(unsigned nr,
86 volatile unsigned long *addr)
87{
88 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
89 long guess, oldval = *addr;
90 addr += nr / BITS_PER_LONG;
91 oldval = *addr;
92 do {
93 guess = oldval;
94 oldval = atomic64_cmpxchg((atomic64_t *)addr,
95 guess, guess ^ mask);
96 } while (guess != oldval);
97 return (oldval & mask) != 0;
98}
99
100#define ext2_set_bit_atomic(lock, nr, addr) \
101 test_and_set_bit((nr), (unsigned long *)(addr))
102#define ext2_clear_bit_atomic(lock, nr, addr) \
103 test_and_clear_bit((nr), (unsigned long *)(addr))
104
105#endif /* _ASM_TILE_BITOPS_64_H */
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h
index 12fb0fb330ee..e925f4bb498f 100644
--- a/arch/tile/include/asm/cacheflush.h
+++ b/arch/tile/include/asm/cacheflush.h
@@ -116,22 +116,28 @@ static inline void __finv_buffer(void *buffer, size_t size)
116} 116}
117 117
118 118
119/* Invalidate a VA range, then memory fence. */ 119/* Invalidate a VA range and wait for it to be complete. */
120static inline void inv_buffer(void *buffer, size_t size) 120static inline void inv_buffer(void *buffer, size_t size)
121{ 121{
122 __inv_buffer(buffer, size); 122 __inv_buffer(buffer, size);
123 mb_incoherent(); 123 mb();
124} 124}
125 125
126/* Flush a VA range, then memory fence. */ 126/*
127static inline void flush_buffer(void *buffer, size_t size) 127 * Flush a locally-homecached VA range and wait for the evicted
128 * cachelines to hit memory.
129 */
130static inline void flush_buffer_local(void *buffer, size_t size)
128{ 131{
129 __flush_buffer(buffer, size); 132 __flush_buffer(buffer, size);
130 mb_incoherent(); 133 mb_incoherent();
131} 134}
132 135
133/* Flush & invalidate a VA range, then memory fence. */ 136/*
134static inline void finv_buffer(void *buffer, size_t size) 137 * Flush and invalidate a locally-homecached VA range and wait for the
138 * evicted cachelines to hit memory.
139 */
140static inline void finv_buffer_local(void *buffer, size_t size)
135{ 141{
136 __finv_buffer(buffer, size); 142 __finv_buffer(buffer, size);
137 mb_incoherent(); 143 mb_incoherent();
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index c3ae570c0a5d..bf95f55b82b0 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -215,8 +215,8 @@ struct compat_sigaction;
215struct compat_siginfo; 215struct compat_siginfo;
216struct compat_sigaltstack; 216struct compat_sigaltstack;
217long compat_sys_execve(const char __user *path, 217long compat_sys_execve(const char __user *path,
218 const compat_uptr_t __user *argv, 218 compat_uptr_t __user *argv,
219 const compat_uptr_t __user *envp, struct pt_regs *); 219 compat_uptr_t __user *envp, struct pt_regs *);
220long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, 220long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
221 struct compat_sigaction __user *oact, 221 struct compat_sigaction __user *oact,
222 size_t sigsetsize); 222 size_t sigsetsize);
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 15e1dceecc64..eaa06d175b39 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -65,7 +65,8 @@ extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t,
65extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, 65extern void dma_sync_single_range_for_device(struct device *, dma_addr_t,
66 unsigned long offset, size_t, 66 unsigned long offset, size_t,
67 enum dma_data_direction); 67 enum dma_data_direction);
68extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction); 68extern void dma_cache_sync(struct device *dev, void *vaddr, size_t,
69 enum dma_data_direction);
69 70
70static inline int 71static inline int
71dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 72dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/tile/include/asm/fb.h b/arch/tile/include/asm/fb.h
new file mode 100644
index 000000000000..3a4988e8df45
--- /dev/null
+++ b/arch/tile/include/asm/fb.h
@@ -0,0 +1 @@
#include <asm-generic/fb.h>
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index d3cbb9b14cbe..c9ea1652af03 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -52,6 +52,7 @@ extern void iounmap(volatile void __iomem *addr);
52#endif 52#endif
53 53
54#define ioremap_nocache(physaddr, size) ioremap(physaddr, size) 54#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
55#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
55#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) 56#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
56#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) 57#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
57 58
@@ -161,6 +162,15 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
161#define iowrite32 writel 162#define iowrite32 writel
162#define iowrite64 writeq 163#define iowrite64 writeq
163 164
165static inline void memset_io(void *dst, int val, size_t len)
166{
167 int x;
168 BUG_ON((unsigned long)dst & 0x3);
169 val = (val & 0xff) * 0x01010101;
170 for (x = 0; x < len; x += 4)
171 writel(val, dst + x);
172}
173
164static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, 174static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
165 size_t len) 175 size_t len)
166{ 176{
@@ -269,6 +279,11 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
269 ioport_panic(); 279 ioport_panic();
270} 280}
271 281
282#define ioread16be(addr) be16_to_cpu(ioread16(addr))
283#define ioread32be(addr) be32_to_cpu(ioread32(addr))
284#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
285#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
286
272#define ioread8_rep(p, dst, count) \ 287#define ioread8_rep(p, dst, count) \
273 insb((unsigned long) (p), (dst), (count)) 288 insb((unsigned long) (p), (dst), (count))
274#define ioread16_rep(p, dst, count) \ 289#define ioread16_rep(p, dst, count) \
@@ -283,4 +298,7 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
283#define iowrite32_rep(p, src, count) \ 298#define iowrite32_rep(p, src, count) \
284 outsl((unsigned long) (p), (src), (count)) 299 outsl((unsigned long) (p), (src), (count))
285 300
301#define virt_to_bus virt_to_phys
302#define bus_to_virt phys_to_virt
303
286#endif /* _ASM_TILE_IO_H */ 304#endif /* _ASM_TILE_IO_H */
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 572fd3ef1d73..94e9a511de84 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -23,6 +23,8 @@
23/* IRQ numbers used for linux IPIs. */ 23/* IRQ numbers used for linux IPIs. */
24#define IRQ_RESCHEDULE 1 24#define IRQ_RESCHEDULE 1
25 25
26#define irq_canonicalize(irq) (irq)
27
26void ack_bad_irq(unsigned int irq); 28void ack_bad_irq(unsigned int irq);
27 29
28/* 30/*
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 9bc0d0725c28..15fb24641120 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -100,8 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
100 __get_cpu_var(current_asid) = asid; 100 __get_cpu_var(current_asid) = asid;
101 101
102 /* Clear cpu from the old mm, and set it in the new one. */ 102 /* Clear cpu from the old mm, and set it in the new one. */
103 cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); 103 cpumask_clear_cpu(cpu, mm_cpumask(prev));
104 cpumask_set_cpu(cpu, &next->cpu_vm_mask); 104 cpumask_set_cpu(cpu, mm_cpumask(next));
105 105
106 /* Re-load page tables */ 106 /* Re-load page tables */
107 install_page_table(next->pgd, asid); 107 install_page_table(next->pgd, asid);
diff --git a/arch/tile/include/asm/opcode-tile_32.h b/arch/tile/include/asm/opcode-tile_32.h
index eda60ecbae3d..03df7b1e77bf 100644
--- a/arch/tile/include/asm/opcode-tile_32.h
+++ b/arch/tile/include/asm/opcode-tile_32.h
@@ -1502,5 +1502,12 @@ extern int parse_insn_tile(tile_bundle_bits bits,
1502 decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); 1502 decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]);
1503 1503
1504 1504
1505/* Given a set of bundle bits and a specific pipe, returns which
1506 * instruction the bundle contains in that pipe.
1507 */
1508extern const struct tile_opcode *
1509find_opcode(tile_bundle_bits bits, tile_pipeline pipe);
1510
1511
1505 1512
1506#endif /* opcode_tile_h */ 1513#endif /* opcode_tile_h */
diff --git a/arch/tile/include/asm/opcode-tile_64.h b/arch/tile/include/asm/opcode-tile_64.h
index eda60ecbae3d..c0633466cd5c 100644
--- a/arch/tile/include/asm/opcode-tile_64.h
+++ b/arch/tile/include/asm/opcode-tile_64.h
@@ -5,863 +5,711 @@
5#ifndef opcode_tile_h 5#ifndef opcode_tile_h
6#define opcode_tile_h 6#define opcode_tile_h
7 7
8typedef unsigned long long tile_bundle_bits; 8typedef unsigned long long tilegx_bundle_bits;
9 9
10 10
11enum 11enum
12{ 12{
13 TILE_MAX_OPERANDS = 5 /* mm */ 13 TILEGX_MAX_OPERANDS = 4 /* bfexts */
14}; 14};
15 15
16typedef enum 16typedef enum
17{ 17{
18 TILE_OPC_BPT, 18 TILEGX_OPC_BPT,
19 TILE_OPC_INFO, 19 TILEGX_OPC_INFO,
20 TILE_OPC_INFOL, 20 TILEGX_OPC_INFOL,
21 TILE_OPC_J, 21 TILEGX_OPC_MOVE,
22 TILE_OPC_JAL, 22 TILEGX_OPC_MOVEI,
23 TILE_OPC_MOVE, 23 TILEGX_OPC_MOVELI,
24 TILE_OPC_MOVE_SN, 24 TILEGX_OPC_PREFETCH,
25 TILE_OPC_MOVEI, 25 TILEGX_OPC_PREFETCH_ADD_L1,
26 TILE_OPC_MOVEI_SN, 26 TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
27 TILE_OPC_MOVELI, 27 TILEGX_OPC_PREFETCH_ADD_L2,
28 TILE_OPC_MOVELI_SN, 28 TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
29 TILE_OPC_MOVELIS, 29 TILEGX_OPC_PREFETCH_ADD_L3,
30 TILE_OPC_PREFETCH, 30 TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
31 TILE_OPC_RAISE, 31 TILEGX_OPC_PREFETCH_L1,
32 TILE_OPC_ADD, 32 TILEGX_OPC_PREFETCH_L1_FAULT,
33 TILE_OPC_ADD_SN, 33 TILEGX_OPC_PREFETCH_L2,
34 TILE_OPC_ADDB, 34 TILEGX_OPC_PREFETCH_L2_FAULT,
35 TILE_OPC_ADDB_SN, 35 TILEGX_OPC_PREFETCH_L3,
36 TILE_OPC_ADDBS_U, 36 TILEGX_OPC_PREFETCH_L3_FAULT,
37 TILE_OPC_ADDBS_U_SN, 37 TILEGX_OPC_RAISE,
38 TILE_OPC_ADDH, 38 TILEGX_OPC_ADD,
39 TILE_OPC_ADDH_SN, 39 TILEGX_OPC_ADDI,
40 TILE_OPC_ADDHS, 40 TILEGX_OPC_ADDLI,
41 TILE_OPC_ADDHS_SN, 41 TILEGX_OPC_ADDX,
42 TILE_OPC_ADDI, 42 TILEGX_OPC_ADDXI,
43 TILE_OPC_ADDI_SN, 43 TILEGX_OPC_ADDXLI,
44 TILE_OPC_ADDIB, 44 TILEGX_OPC_ADDXSC,
45 TILE_OPC_ADDIB_SN, 45 TILEGX_OPC_AND,
46 TILE_OPC_ADDIH, 46 TILEGX_OPC_ANDI,
47 TILE_OPC_ADDIH_SN, 47 TILEGX_OPC_BEQZ,
48 TILE_OPC_ADDLI, 48 TILEGX_OPC_BEQZT,
49 TILE_OPC_ADDLI_SN, 49 TILEGX_OPC_BFEXTS,
50 TILE_OPC_ADDLIS, 50 TILEGX_OPC_BFEXTU,
51 TILE_OPC_ADDS, 51 TILEGX_OPC_BFINS,
52 TILE_OPC_ADDS_SN, 52 TILEGX_OPC_BGEZ,
53 TILE_OPC_ADIFFB_U, 53 TILEGX_OPC_BGEZT,
54 TILE_OPC_ADIFFB_U_SN, 54 TILEGX_OPC_BGTZ,
55 TILE_OPC_ADIFFH, 55 TILEGX_OPC_BGTZT,
56 TILE_OPC_ADIFFH_SN, 56 TILEGX_OPC_BLBC,
57 TILE_OPC_AND, 57 TILEGX_OPC_BLBCT,
58 TILE_OPC_AND_SN, 58 TILEGX_OPC_BLBS,
59 TILE_OPC_ANDI, 59 TILEGX_OPC_BLBST,
60 TILE_OPC_ANDI_SN, 60 TILEGX_OPC_BLEZ,
61 TILE_OPC_AULI, 61 TILEGX_OPC_BLEZT,
62 TILE_OPC_AVGB_U, 62 TILEGX_OPC_BLTZ,
63 TILE_OPC_AVGB_U_SN, 63 TILEGX_OPC_BLTZT,
64 TILE_OPC_AVGH, 64 TILEGX_OPC_BNEZ,
65 TILE_OPC_AVGH_SN, 65 TILEGX_OPC_BNEZT,
66 TILE_OPC_BBNS, 66 TILEGX_OPC_CLZ,
67 TILE_OPC_BBNS_SN, 67 TILEGX_OPC_CMOVEQZ,
68 TILE_OPC_BBNST, 68 TILEGX_OPC_CMOVNEZ,
69 TILE_OPC_BBNST_SN, 69 TILEGX_OPC_CMPEQ,
70 TILE_OPC_BBS, 70 TILEGX_OPC_CMPEQI,
71 TILE_OPC_BBS_SN, 71 TILEGX_OPC_CMPEXCH,
72 TILE_OPC_BBST, 72 TILEGX_OPC_CMPEXCH4,
73 TILE_OPC_BBST_SN, 73 TILEGX_OPC_CMPLES,
74 TILE_OPC_BGEZ, 74 TILEGX_OPC_CMPLEU,
75 TILE_OPC_BGEZ_SN, 75 TILEGX_OPC_CMPLTS,
76 TILE_OPC_BGEZT, 76 TILEGX_OPC_CMPLTSI,
77 TILE_OPC_BGEZT_SN, 77 TILEGX_OPC_CMPLTU,
78 TILE_OPC_BGZ, 78 TILEGX_OPC_CMPLTUI,
79 TILE_OPC_BGZ_SN, 79 TILEGX_OPC_CMPNE,
80 TILE_OPC_BGZT, 80 TILEGX_OPC_CMUL,
81 TILE_OPC_BGZT_SN, 81 TILEGX_OPC_CMULA,
82 TILE_OPC_BITX, 82 TILEGX_OPC_CMULAF,
83 TILE_OPC_BITX_SN, 83 TILEGX_OPC_CMULF,
84 TILE_OPC_BLEZ, 84 TILEGX_OPC_CMULFR,
85 TILE_OPC_BLEZ_SN, 85 TILEGX_OPC_CMULH,
86 TILE_OPC_BLEZT, 86 TILEGX_OPC_CMULHR,
87 TILE_OPC_BLEZT_SN, 87 TILEGX_OPC_CRC32_32,
88 TILE_OPC_BLZ, 88 TILEGX_OPC_CRC32_8,
89 TILE_OPC_BLZ_SN, 89 TILEGX_OPC_CTZ,
90 TILE_OPC_BLZT, 90 TILEGX_OPC_DBLALIGN,
91 TILE_OPC_BLZT_SN, 91 TILEGX_OPC_DBLALIGN2,
92 TILE_OPC_BNZ, 92 TILEGX_OPC_DBLALIGN4,
93 TILE_OPC_BNZ_SN, 93 TILEGX_OPC_DBLALIGN6,
94 TILE_OPC_BNZT, 94 TILEGX_OPC_DRAIN,
95 TILE_OPC_BNZT_SN, 95 TILEGX_OPC_DTLBPR,
96 TILE_OPC_BYTEX, 96 TILEGX_OPC_EXCH,
97 TILE_OPC_BYTEX_SN, 97 TILEGX_OPC_EXCH4,
98 TILE_OPC_BZ, 98 TILEGX_OPC_FDOUBLE_ADD_FLAGS,
99 TILE_OPC_BZ_SN, 99 TILEGX_OPC_FDOUBLE_ADDSUB,
100 TILE_OPC_BZT, 100 TILEGX_OPC_FDOUBLE_MUL_FLAGS,
101 TILE_OPC_BZT_SN, 101 TILEGX_OPC_FDOUBLE_PACK1,
102 TILE_OPC_CLZ, 102 TILEGX_OPC_FDOUBLE_PACK2,
103 TILE_OPC_CLZ_SN, 103 TILEGX_OPC_FDOUBLE_SUB_FLAGS,
104 TILE_OPC_CRC32_32, 104 TILEGX_OPC_FDOUBLE_UNPACK_MAX,
105 TILE_OPC_CRC32_32_SN, 105 TILEGX_OPC_FDOUBLE_UNPACK_MIN,
106 TILE_OPC_CRC32_8, 106 TILEGX_OPC_FETCHADD,
107 TILE_OPC_CRC32_8_SN, 107 TILEGX_OPC_FETCHADD4,
108 TILE_OPC_CTZ, 108 TILEGX_OPC_FETCHADDGEZ,
109 TILE_OPC_CTZ_SN, 109 TILEGX_OPC_FETCHADDGEZ4,
110 TILE_OPC_DRAIN, 110 TILEGX_OPC_FETCHAND,
111 TILE_OPC_DTLBPR, 111 TILEGX_OPC_FETCHAND4,
112 TILE_OPC_DWORD_ALIGN, 112 TILEGX_OPC_FETCHOR,
113 TILE_OPC_DWORD_ALIGN_SN, 113 TILEGX_OPC_FETCHOR4,
114 TILE_OPC_FINV, 114 TILEGX_OPC_FINV,
115 TILE_OPC_FLUSH, 115 TILEGX_OPC_FLUSH,
116 TILE_OPC_FNOP, 116 TILEGX_OPC_FLUSHWB,
117 TILE_OPC_ICOH, 117 TILEGX_OPC_FNOP,
118 TILE_OPC_ILL, 118 TILEGX_OPC_FSINGLE_ADD1,
119 TILE_OPC_INTHB, 119 TILEGX_OPC_FSINGLE_ADDSUB2,
120 TILE_OPC_INTHB_SN, 120 TILEGX_OPC_FSINGLE_MUL1,
121 TILE_OPC_INTHH, 121 TILEGX_OPC_FSINGLE_MUL2,
122 TILE_OPC_INTHH_SN, 122 TILEGX_OPC_FSINGLE_PACK1,
123 TILE_OPC_INTLB, 123 TILEGX_OPC_FSINGLE_PACK2,
124 TILE_OPC_INTLB_SN, 124 TILEGX_OPC_FSINGLE_SUB1,
125 TILE_OPC_INTLH, 125 TILEGX_OPC_ICOH,
126 TILE_OPC_INTLH_SN, 126 TILEGX_OPC_ILL,
127 TILE_OPC_INV, 127 TILEGX_OPC_INV,
128 TILE_OPC_IRET, 128 TILEGX_OPC_IRET,
129 TILE_OPC_JALB, 129 TILEGX_OPC_J,
130 TILE_OPC_JALF, 130 TILEGX_OPC_JAL,
131 TILE_OPC_JALR, 131 TILEGX_OPC_JALR,
132 TILE_OPC_JALRP, 132 TILEGX_OPC_JALRP,
133 TILE_OPC_JB, 133 TILEGX_OPC_JR,
134 TILE_OPC_JF, 134 TILEGX_OPC_JRP,
135 TILE_OPC_JR, 135 TILEGX_OPC_LD,
136 TILE_OPC_JRP, 136 TILEGX_OPC_LD1S,
137 TILE_OPC_LB, 137 TILEGX_OPC_LD1S_ADD,
138 TILE_OPC_LB_SN, 138 TILEGX_OPC_LD1U,
139 TILE_OPC_LB_U, 139 TILEGX_OPC_LD1U_ADD,
140 TILE_OPC_LB_U_SN, 140 TILEGX_OPC_LD2S,
141 TILE_OPC_LBADD, 141 TILEGX_OPC_LD2S_ADD,
142 TILE_OPC_LBADD_SN, 142 TILEGX_OPC_LD2U,
143 TILE_OPC_LBADD_U, 143 TILEGX_OPC_LD2U_ADD,
144 TILE_OPC_LBADD_U_SN, 144 TILEGX_OPC_LD4S,
145 TILE_OPC_LH, 145 TILEGX_OPC_LD4S_ADD,
146 TILE_OPC_LH_SN, 146 TILEGX_OPC_LD4U,
147 TILE_OPC_LH_U, 147 TILEGX_OPC_LD4U_ADD,
148 TILE_OPC_LH_U_SN, 148 TILEGX_OPC_LD_ADD,
149 TILE_OPC_LHADD, 149 TILEGX_OPC_LDNA,
150 TILE_OPC_LHADD_SN, 150 TILEGX_OPC_LDNA_ADD,
151 TILE_OPC_LHADD_U, 151 TILEGX_OPC_LDNT,
152 TILE_OPC_LHADD_U_SN, 152 TILEGX_OPC_LDNT1S,
153 TILE_OPC_LNK, 153 TILEGX_OPC_LDNT1S_ADD,
154 TILE_OPC_LNK_SN, 154 TILEGX_OPC_LDNT1U,
155 TILE_OPC_LW, 155 TILEGX_OPC_LDNT1U_ADD,
156 TILE_OPC_LW_SN, 156 TILEGX_OPC_LDNT2S,
157 TILE_OPC_LW_NA, 157 TILEGX_OPC_LDNT2S_ADD,
158 TILE_OPC_LW_NA_SN, 158 TILEGX_OPC_LDNT2U,
159 TILE_OPC_LWADD, 159 TILEGX_OPC_LDNT2U_ADD,
160 TILE_OPC_LWADD_SN, 160 TILEGX_OPC_LDNT4S,
161 TILE_OPC_LWADD_NA, 161 TILEGX_OPC_LDNT4S_ADD,
162 TILE_OPC_LWADD_NA_SN, 162 TILEGX_OPC_LDNT4U,
163 TILE_OPC_MAXB_U, 163 TILEGX_OPC_LDNT4U_ADD,
164 TILE_OPC_MAXB_U_SN, 164 TILEGX_OPC_LDNT_ADD,
165 TILE_OPC_MAXH, 165 TILEGX_OPC_LNK,
166 TILE_OPC_MAXH_SN, 166 TILEGX_OPC_MF,
167 TILE_OPC_MAXIB_U, 167 TILEGX_OPC_MFSPR,
168 TILE_OPC_MAXIB_U_SN, 168 TILEGX_OPC_MM,
169 TILE_OPC_MAXIH, 169 TILEGX_OPC_MNZ,
170 TILE_OPC_MAXIH_SN, 170 TILEGX_OPC_MTSPR,
171 TILE_OPC_MF, 171 TILEGX_OPC_MUL_HS_HS,
172 TILE_OPC_MFSPR, 172 TILEGX_OPC_MUL_HS_HU,
173 TILE_OPC_MINB_U, 173 TILEGX_OPC_MUL_HS_LS,
174 TILE_OPC_MINB_U_SN, 174 TILEGX_OPC_MUL_HS_LU,
175 TILE_OPC_MINH, 175 TILEGX_OPC_MUL_HU_HU,
176 TILE_OPC_MINH_SN, 176 TILEGX_OPC_MUL_HU_LS,
177 TILE_OPC_MINIB_U, 177 TILEGX_OPC_MUL_HU_LU,
178 TILE_OPC_MINIB_U_SN, 178 TILEGX_OPC_MUL_LS_LS,
179 TILE_OPC_MINIH, 179 TILEGX_OPC_MUL_LS_LU,
180 TILE_OPC_MINIH_SN, 180 TILEGX_OPC_MUL_LU_LU,
181 TILE_OPC_MM, 181 TILEGX_OPC_MULA_HS_HS,
182 TILE_OPC_MNZ, 182 TILEGX_OPC_MULA_HS_HU,
183 TILE_OPC_MNZ_SN, 183 TILEGX_OPC_MULA_HS_LS,
184 TILE_OPC_MNZB, 184 TILEGX_OPC_MULA_HS_LU,
185 TILE_OPC_MNZB_SN, 185 TILEGX_OPC_MULA_HU_HU,
186 TILE_OPC_MNZH, 186 TILEGX_OPC_MULA_HU_LS,
187 TILE_OPC_MNZH_SN, 187 TILEGX_OPC_MULA_HU_LU,
188 TILE_OPC_MTSPR, 188 TILEGX_OPC_MULA_LS_LS,
189 TILE_OPC_MULHH_SS, 189 TILEGX_OPC_MULA_LS_LU,
190 TILE_OPC_MULHH_SS_SN, 190 TILEGX_OPC_MULA_LU_LU,
191 TILE_OPC_MULHH_SU, 191 TILEGX_OPC_MULAX,
192 TILE_OPC_MULHH_SU_SN, 192 TILEGX_OPC_MULX,
193 TILE_OPC_MULHH_UU, 193 TILEGX_OPC_MZ,
194 TILE_OPC_MULHH_UU_SN, 194 TILEGX_OPC_NAP,
195 TILE_OPC_MULHHA_SS, 195 TILEGX_OPC_NOP,
196 TILE_OPC_MULHHA_SS_SN, 196 TILEGX_OPC_NOR,
197 TILE_OPC_MULHHA_SU, 197 TILEGX_OPC_OR,
198 TILE_OPC_MULHHA_SU_SN, 198 TILEGX_OPC_ORI,
199 TILE_OPC_MULHHA_UU, 199 TILEGX_OPC_PCNT,
200 TILE_OPC_MULHHA_UU_SN, 200 TILEGX_OPC_REVBITS,
201 TILE_OPC_MULHHSA_UU, 201 TILEGX_OPC_REVBYTES,
202 TILE_OPC_MULHHSA_UU_SN, 202 TILEGX_OPC_ROTL,
203 TILE_OPC_MULHL_SS, 203 TILEGX_OPC_ROTLI,
204 TILE_OPC_MULHL_SS_SN, 204 TILEGX_OPC_SHL,
205 TILE_OPC_MULHL_SU, 205 TILEGX_OPC_SHL16INSLI,
206 TILE_OPC_MULHL_SU_SN, 206 TILEGX_OPC_SHL1ADD,
207 TILE_OPC_MULHL_US, 207 TILEGX_OPC_SHL1ADDX,
208 TILE_OPC_MULHL_US_SN, 208 TILEGX_OPC_SHL2ADD,
209 TILE_OPC_MULHL_UU, 209 TILEGX_OPC_SHL2ADDX,
210 TILE_OPC_MULHL_UU_SN, 210 TILEGX_OPC_SHL3ADD,
211 TILE_OPC_MULHLA_SS, 211 TILEGX_OPC_SHL3ADDX,
212 TILE_OPC_MULHLA_SS_SN, 212 TILEGX_OPC_SHLI,
213 TILE_OPC_MULHLA_SU, 213 TILEGX_OPC_SHLX,
214 TILE_OPC_MULHLA_SU_SN, 214 TILEGX_OPC_SHLXI,
215 TILE_OPC_MULHLA_US, 215 TILEGX_OPC_SHRS,
216 TILE_OPC_MULHLA_US_SN, 216 TILEGX_OPC_SHRSI,
217 TILE_OPC_MULHLA_UU, 217 TILEGX_OPC_SHRU,
218 TILE_OPC_MULHLA_UU_SN, 218 TILEGX_OPC_SHRUI,
219 TILE_OPC_MULHLSA_UU, 219 TILEGX_OPC_SHRUX,
220 TILE_OPC_MULHLSA_UU_SN, 220 TILEGX_OPC_SHRUXI,
221 TILE_OPC_MULLL_SS, 221 TILEGX_OPC_SHUFFLEBYTES,
222 TILE_OPC_MULLL_SS_SN, 222 TILEGX_OPC_ST,
223 TILE_OPC_MULLL_SU, 223 TILEGX_OPC_ST1,
224 TILE_OPC_MULLL_SU_SN, 224 TILEGX_OPC_ST1_ADD,
225 TILE_OPC_MULLL_UU, 225 TILEGX_OPC_ST2,
226 TILE_OPC_MULLL_UU_SN, 226 TILEGX_OPC_ST2_ADD,
227 TILE_OPC_MULLLA_SS, 227 TILEGX_OPC_ST4,
228 TILE_OPC_MULLLA_SS_SN, 228 TILEGX_OPC_ST4_ADD,
229 TILE_OPC_MULLLA_SU, 229 TILEGX_OPC_ST_ADD,
230 TILE_OPC_MULLLA_SU_SN, 230 TILEGX_OPC_STNT,
231 TILE_OPC_MULLLA_UU, 231 TILEGX_OPC_STNT1,
232 TILE_OPC_MULLLA_UU_SN, 232 TILEGX_OPC_STNT1_ADD,
233 TILE_OPC_MULLLSA_UU, 233 TILEGX_OPC_STNT2,
234 TILE_OPC_MULLLSA_UU_SN, 234 TILEGX_OPC_STNT2_ADD,
235 TILE_OPC_MVNZ, 235 TILEGX_OPC_STNT4,
236 TILE_OPC_MVNZ_SN, 236 TILEGX_OPC_STNT4_ADD,
237 TILE_OPC_MVZ, 237 TILEGX_OPC_STNT_ADD,
238 TILE_OPC_MVZ_SN, 238 TILEGX_OPC_SUB,
239 TILE_OPC_MZ, 239 TILEGX_OPC_SUBX,
240 TILE_OPC_MZ_SN, 240 TILEGX_OPC_SUBXSC,
241 TILE_OPC_MZB, 241 TILEGX_OPC_SWINT0,
242 TILE_OPC_MZB_SN, 242 TILEGX_OPC_SWINT1,
243 TILE_OPC_MZH, 243 TILEGX_OPC_SWINT2,
244 TILE_OPC_MZH_SN, 244 TILEGX_OPC_SWINT3,
245 TILE_OPC_NAP, 245 TILEGX_OPC_TBLIDXB0,
246 TILE_OPC_NOP, 246 TILEGX_OPC_TBLIDXB1,
247 TILE_OPC_NOR, 247 TILEGX_OPC_TBLIDXB2,
248 TILE_OPC_NOR_SN, 248 TILEGX_OPC_TBLIDXB3,
249 TILE_OPC_OR, 249 TILEGX_OPC_V1ADD,
250 TILE_OPC_OR_SN, 250 TILEGX_OPC_V1ADDI,
251 TILE_OPC_ORI, 251 TILEGX_OPC_V1ADDUC,
252 TILE_OPC_ORI_SN, 252 TILEGX_OPC_V1ADIFFU,
253 TILE_OPC_PACKBS_U, 253 TILEGX_OPC_V1AVGU,
254 TILE_OPC_PACKBS_U_SN, 254 TILEGX_OPC_V1CMPEQ,
255 TILE_OPC_PACKHB, 255 TILEGX_OPC_V1CMPEQI,
256 TILE_OPC_PACKHB_SN, 256 TILEGX_OPC_V1CMPLES,
257 TILE_OPC_PACKHS, 257 TILEGX_OPC_V1CMPLEU,
258 TILE_OPC_PACKHS_SN, 258 TILEGX_OPC_V1CMPLTS,
259 TILE_OPC_PACKLB, 259 TILEGX_OPC_V1CMPLTSI,
260 TILE_OPC_PACKLB_SN, 260 TILEGX_OPC_V1CMPLTU,
261 TILE_OPC_PCNT, 261 TILEGX_OPC_V1CMPLTUI,
262 TILE_OPC_PCNT_SN, 262 TILEGX_OPC_V1CMPNE,
263 TILE_OPC_RL, 263 TILEGX_OPC_V1DDOTPU,
264 TILE_OPC_RL_SN, 264 TILEGX_OPC_V1DDOTPUA,
265 TILE_OPC_RLI, 265 TILEGX_OPC_V1DDOTPUS,
266 TILE_OPC_RLI_SN, 266 TILEGX_OPC_V1DDOTPUSA,
267 TILE_OPC_S1A, 267 TILEGX_OPC_V1DOTP,
268 TILE_OPC_S1A_SN, 268 TILEGX_OPC_V1DOTPA,
269 TILE_OPC_S2A, 269 TILEGX_OPC_V1DOTPU,
270 TILE_OPC_S2A_SN, 270 TILEGX_OPC_V1DOTPUA,
271 TILE_OPC_S3A, 271 TILEGX_OPC_V1DOTPUS,
272 TILE_OPC_S3A_SN, 272 TILEGX_OPC_V1DOTPUSA,
273 TILE_OPC_SADAB_U, 273 TILEGX_OPC_V1INT_H,
274 TILE_OPC_SADAB_U_SN, 274 TILEGX_OPC_V1INT_L,
275 TILE_OPC_SADAH, 275 TILEGX_OPC_V1MAXU,
276 TILE_OPC_SADAH_SN, 276 TILEGX_OPC_V1MAXUI,
277 TILE_OPC_SADAH_U, 277 TILEGX_OPC_V1MINU,
278 TILE_OPC_SADAH_U_SN, 278 TILEGX_OPC_V1MINUI,
279 TILE_OPC_SADB_U, 279 TILEGX_OPC_V1MNZ,
280 TILE_OPC_SADB_U_SN, 280 TILEGX_OPC_V1MULTU,
281 TILE_OPC_SADH, 281 TILEGX_OPC_V1MULU,
282 TILE_OPC_SADH_SN, 282 TILEGX_OPC_V1MULUS,
283 TILE_OPC_SADH_U, 283 TILEGX_OPC_V1MZ,
284 TILE_OPC_SADH_U_SN, 284 TILEGX_OPC_V1SADAU,
285 TILE_OPC_SB, 285 TILEGX_OPC_V1SADU,
286 TILE_OPC_SBADD, 286 TILEGX_OPC_V1SHL,
287 TILE_OPC_SEQ, 287 TILEGX_OPC_V1SHLI,
288 TILE_OPC_SEQ_SN, 288 TILEGX_OPC_V1SHRS,
289 TILE_OPC_SEQB, 289 TILEGX_OPC_V1SHRSI,
290 TILE_OPC_SEQB_SN, 290 TILEGX_OPC_V1SHRU,
291 TILE_OPC_SEQH, 291 TILEGX_OPC_V1SHRUI,
292 TILE_OPC_SEQH_SN, 292 TILEGX_OPC_V1SUB,
293 TILE_OPC_SEQI, 293 TILEGX_OPC_V1SUBUC,
294 TILE_OPC_SEQI_SN, 294 TILEGX_OPC_V2ADD,
295 TILE_OPC_SEQIB, 295 TILEGX_OPC_V2ADDI,
296 TILE_OPC_SEQIB_SN, 296 TILEGX_OPC_V2ADDSC,
297 TILE_OPC_SEQIH, 297 TILEGX_OPC_V2ADIFFS,
298 TILE_OPC_SEQIH_SN, 298 TILEGX_OPC_V2AVGS,
299 TILE_OPC_SH, 299 TILEGX_OPC_V2CMPEQ,
300 TILE_OPC_SHADD, 300 TILEGX_OPC_V2CMPEQI,
301 TILE_OPC_SHL, 301 TILEGX_OPC_V2CMPLES,
302 TILE_OPC_SHL_SN, 302 TILEGX_OPC_V2CMPLEU,
303 TILE_OPC_SHLB, 303 TILEGX_OPC_V2CMPLTS,
304 TILE_OPC_SHLB_SN, 304 TILEGX_OPC_V2CMPLTSI,
305 TILE_OPC_SHLH, 305 TILEGX_OPC_V2CMPLTU,
306 TILE_OPC_SHLH_SN, 306 TILEGX_OPC_V2CMPLTUI,
307 TILE_OPC_SHLI, 307 TILEGX_OPC_V2CMPNE,
308 TILE_OPC_SHLI_SN, 308 TILEGX_OPC_V2DOTP,
309 TILE_OPC_SHLIB, 309 TILEGX_OPC_V2DOTPA,
310 TILE_OPC_SHLIB_SN, 310 TILEGX_OPC_V2INT_H,
311 TILE_OPC_SHLIH, 311 TILEGX_OPC_V2INT_L,
312 TILE_OPC_SHLIH_SN, 312 TILEGX_OPC_V2MAXS,
313 TILE_OPC_SHR, 313 TILEGX_OPC_V2MAXSI,
314 TILE_OPC_SHR_SN, 314 TILEGX_OPC_V2MINS,
315 TILE_OPC_SHRB, 315 TILEGX_OPC_V2MINSI,
316 TILE_OPC_SHRB_SN, 316 TILEGX_OPC_V2MNZ,
317 TILE_OPC_SHRH, 317 TILEGX_OPC_V2MULFSC,
318 TILE_OPC_SHRH_SN, 318 TILEGX_OPC_V2MULS,
319 TILE_OPC_SHRI, 319 TILEGX_OPC_V2MULTS,
320 TILE_OPC_SHRI_SN, 320 TILEGX_OPC_V2MZ,
321 TILE_OPC_SHRIB, 321 TILEGX_OPC_V2PACKH,
322 TILE_OPC_SHRIB_SN, 322 TILEGX_OPC_V2PACKL,
323 TILE_OPC_SHRIH, 323 TILEGX_OPC_V2PACKUC,
324 TILE_OPC_SHRIH_SN, 324 TILEGX_OPC_V2SADAS,
325 TILE_OPC_SLT, 325 TILEGX_OPC_V2SADAU,
326 TILE_OPC_SLT_SN, 326 TILEGX_OPC_V2SADS,
327 TILE_OPC_SLT_U, 327 TILEGX_OPC_V2SADU,
328 TILE_OPC_SLT_U_SN, 328 TILEGX_OPC_V2SHL,
329 TILE_OPC_SLTB, 329 TILEGX_OPC_V2SHLI,
330 TILE_OPC_SLTB_SN, 330 TILEGX_OPC_V2SHLSC,
331 TILE_OPC_SLTB_U, 331 TILEGX_OPC_V2SHRS,
332 TILE_OPC_SLTB_U_SN, 332 TILEGX_OPC_V2SHRSI,
333 TILE_OPC_SLTE, 333 TILEGX_OPC_V2SHRU,
334 TILE_OPC_SLTE_SN, 334 TILEGX_OPC_V2SHRUI,
335 TILE_OPC_SLTE_U, 335 TILEGX_OPC_V2SUB,
336 TILE_OPC_SLTE_U_SN, 336 TILEGX_OPC_V2SUBSC,
337 TILE_OPC_SLTEB, 337 TILEGX_OPC_V4ADD,
338 TILE_OPC_SLTEB_SN, 338 TILEGX_OPC_V4ADDSC,
339 TILE_OPC_SLTEB_U, 339 TILEGX_OPC_V4INT_H,
340 TILE_OPC_SLTEB_U_SN, 340 TILEGX_OPC_V4INT_L,
341 TILE_OPC_SLTEH, 341 TILEGX_OPC_V4PACKSC,
342 TILE_OPC_SLTEH_SN, 342 TILEGX_OPC_V4SHL,
343 TILE_OPC_SLTEH_U, 343 TILEGX_OPC_V4SHLSC,
344 TILE_OPC_SLTEH_U_SN, 344 TILEGX_OPC_V4SHRS,
345 TILE_OPC_SLTH, 345 TILEGX_OPC_V4SHRU,
346 TILE_OPC_SLTH_SN, 346 TILEGX_OPC_V4SUB,
347 TILE_OPC_SLTH_U, 347 TILEGX_OPC_V4SUBSC,
348 TILE_OPC_SLTH_U_SN, 348 TILEGX_OPC_WH64,
349 TILE_OPC_SLTI, 349 TILEGX_OPC_XOR,
350 TILE_OPC_SLTI_SN, 350 TILEGX_OPC_XORI,
351 TILE_OPC_SLTI_U, 351 TILEGX_OPC_NONE
352 TILE_OPC_SLTI_U_SN, 352} tilegx_mnemonic;
353 TILE_OPC_SLTIB,
354 TILE_OPC_SLTIB_SN,
355 TILE_OPC_SLTIB_U,
356 TILE_OPC_SLTIB_U_SN,
357 TILE_OPC_SLTIH,
358 TILE_OPC_SLTIH_SN,
359 TILE_OPC_SLTIH_U,
360 TILE_OPC_SLTIH_U_SN,
361 TILE_OPC_SNE,
362 TILE_OPC_SNE_SN,
363 TILE_OPC_SNEB,
364 TILE_OPC_SNEB_SN,
365 TILE_OPC_SNEH,
366 TILE_OPC_SNEH_SN,
367 TILE_OPC_SRA,
368 TILE_OPC_SRA_SN,
369 TILE_OPC_SRAB,
370 TILE_OPC_SRAB_SN,
371 TILE_OPC_SRAH,
372 TILE_OPC_SRAH_SN,
373 TILE_OPC_SRAI,
374 TILE_OPC_SRAI_SN,
375 TILE_OPC_SRAIB,
376 TILE_OPC_SRAIB_SN,
377 TILE_OPC_SRAIH,
378 TILE_OPC_SRAIH_SN,
379 TILE_OPC_SUB,
380 TILE_OPC_SUB_SN,
381 TILE_OPC_SUBB,
382 TILE_OPC_SUBB_SN,
383 TILE_OPC_SUBBS_U,
384 TILE_OPC_SUBBS_U_SN,
385 TILE_OPC_SUBH,
386 TILE_OPC_SUBH_SN,
387 TILE_OPC_SUBHS,
388 TILE_OPC_SUBHS_SN,
389 TILE_OPC_SUBS,
390 TILE_OPC_SUBS_SN,
391 TILE_OPC_SW,
392 TILE_OPC_SWADD,
393 TILE_OPC_SWINT0,
394 TILE_OPC_SWINT1,
395 TILE_OPC_SWINT2,
396 TILE_OPC_SWINT3,
397 TILE_OPC_TBLIDXB0,
398 TILE_OPC_TBLIDXB0_SN,
399 TILE_OPC_TBLIDXB1,
400 TILE_OPC_TBLIDXB1_SN,
401 TILE_OPC_TBLIDXB2,
402 TILE_OPC_TBLIDXB2_SN,
403 TILE_OPC_TBLIDXB3,
404 TILE_OPC_TBLIDXB3_SN,
405 TILE_OPC_TNS,
406 TILE_OPC_TNS_SN,
407 TILE_OPC_WH64,
408 TILE_OPC_XOR,
409 TILE_OPC_XOR_SN,
410 TILE_OPC_XORI,
411 TILE_OPC_XORI_SN,
412 TILE_OPC_NONE
413} tile_mnemonic;
414 353
415/* 64-bit pattern for a { bpt ; nop } bundle. */ 354/* 64-bit pattern for a { bpt ; nop } bundle. */
416#define TILE_BPT_BUNDLE 0x400b3cae70166000ULL 355#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
417 356
418 357
419#define TILE_ELF_MACHINE_CODE EM_TILEPRO 358#define TILE_ELF_MACHINE_CODE EM_TILE64
420 359
421#define TILE_ELF_NAME "elf32-tilepro" 360#define TILE_ELF_NAME "elf32-tile64"
422 361
423 362
424static __inline unsigned int 363static __inline unsigned int
425get_BrOff_SN(tile_bundle_bits num) 364get_BFEnd_X0(tilegx_bundle_bits num)
426{ 365{
427 const unsigned int n = (unsigned int)num; 366 const unsigned int n = (unsigned int)num;
428 return (((n >> 0)) & 0x3ff); 367 return (((n >> 12)) & 0x3f);
429} 368}
430 369
431static __inline unsigned int 370static __inline unsigned int
432get_BrOff_X1(tile_bundle_bits n) 371get_BFOpcodeExtension_X0(tilegx_bundle_bits num)
433{ 372{
434 return (((unsigned int)(n >> 43)) & 0x00007fff) | 373 const unsigned int n = (unsigned int)num;
435 (((unsigned int)(n >> 20)) & 0x00018000); 374 return (((n >> 24)) & 0xf);
436} 375}
437 376
438static __inline unsigned int 377static __inline unsigned int
439get_BrType_X1(tile_bundle_bits n) 378get_BFStart_X0(tilegx_bundle_bits num)
440{ 379{
441 return (((unsigned int)(n >> 31)) & 0xf); 380 const unsigned int n = (unsigned int)num;
381 return (((n >> 18)) & 0x3f);
442} 382}
443 383
444static __inline unsigned int 384static __inline unsigned int
445get_Dest_Imm8_X1(tile_bundle_bits n) 385get_BrOff_X1(tilegx_bundle_bits n)
446{ 386{
447 return (((unsigned int)(n >> 31)) & 0x0000003f) | 387 return (((unsigned int)(n >> 31)) & 0x0000003f) |
448 (((unsigned int)(n >> 43)) & 0x000000c0); 388 (((unsigned int)(n >> 37)) & 0x0001ffc0);
449} 389}
450 390
451static __inline unsigned int 391static __inline unsigned int
452get_Dest_SN(tile_bundle_bits num) 392get_BrType_X1(tilegx_bundle_bits n)
453{ 393{
454 const unsigned int n = (unsigned int)num; 394 return (((unsigned int)(n >> 54)) & 0x1f);
455 return (((n >> 2)) & 0x3);
456} 395}
457 396
458static __inline unsigned int 397static __inline unsigned int
459get_Dest_X0(tile_bundle_bits num) 398get_Dest_Imm8_X1(tilegx_bundle_bits n)
399{
400 return (((unsigned int)(n >> 31)) & 0x0000003f) |
401 (((unsigned int)(n >> 43)) & 0x000000c0);
402}
403
404static __inline unsigned int
405get_Dest_X0(tilegx_bundle_bits num)
460{ 406{
461 const unsigned int n = (unsigned int)num; 407 const unsigned int n = (unsigned int)num;
462 return (((n >> 0)) & 0x3f); 408 return (((n >> 0)) & 0x3f);
463} 409}
464 410
465static __inline unsigned int 411static __inline unsigned int
466get_Dest_X1(tile_bundle_bits n) 412get_Dest_X1(tilegx_bundle_bits n)
467{ 413{
468 return (((unsigned int)(n >> 31)) & 0x3f); 414 return (((unsigned int)(n >> 31)) & 0x3f);
469} 415}
470 416
471static __inline unsigned int 417static __inline unsigned int
472get_Dest_Y0(tile_bundle_bits num) 418get_Dest_Y0(tilegx_bundle_bits num)
473{ 419{
474 const unsigned int n = (unsigned int)num; 420 const unsigned int n = (unsigned int)num;
475 return (((n >> 0)) & 0x3f); 421 return (((n >> 0)) & 0x3f);
476} 422}
477 423
478static __inline unsigned int 424static __inline unsigned int
479get_Dest_Y1(tile_bundle_bits n) 425get_Dest_Y1(tilegx_bundle_bits n)
480{ 426{
481 return (((unsigned int)(n >> 31)) & 0x3f); 427 return (((unsigned int)(n >> 31)) & 0x3f);
482} 428}
483 429
484static __inline unsigned int 430static __inline unsigned int
485get_Imm16_X0(tile_bundle_bits num) 431get_Imm16_X0(tilegx_bundle_bits num)
486{ 432{
487 const unsigned int n = (unsigned int)num; 433 const unsigned int n = (unsigned int)num;
488 return (((n >> 12)) & 0xffff); 434 return (((n >> 12)) & 0xffff);
489} 435}
490 436
491static __inline unsigned int 437static __inline unsigned int
492get_Imm16_X1(tile_bundle_bits n) 438get_Imm16_X1(tilegx_bundle_bits n)
493{ 439{
494 return (((unsigned int)(n >> 43)) & 0xffff); 440 return (((unsigned int)(n >> 43)) & 0xffff);
495} 441}
496 442
497static __inline unsigned int 443static __inline unsigned int
498get_Imm8_SN(tile_bundle_bits num) 444get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num)
499{
500 const unsigned int n = (unsigned int)num;
501 return (((n >> 0)) & 0xff);
502}
503
504static __inline unsigned int
505get_Imm8_X0(tile_bundle_bits num)
506{ 445{
507 const unsigned int n = (unsigned int)num; 446 const unsigned int n = (unsigned int)num;
508 return (((n >> 12)) & 0xff); 447 return (((n >> 20)) & 0xff);
509} 448}
510 449
511static __inline unsigned int 450static __inline unsigned int
512get_Imm8_X1(tile_bundle_bits n) 451get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n)
513{ 452{
514 return (((unsigned int)(n >> 43)) & 0xff); 453 return (((unsigned int)(n >> 51)) & 0xff);
515} 454}
516 455
517static __inline unsigned int 456static __inline unsigned int
518get_Imm8_Y0(tile_bundle_bits num) 457get_Imm8_X0(tilegx_bundle_bits num)
519{ 458{
520 const unsigned int n = (unsigned int)num; 459 const unsigned int n = (unsigned int)num;
521 return (((n >> 12)) & 0xff); 460 return (((n >> 12)) & 0xff);
522} 461}
523 462
524static __inline unsigned int 463static __inline unsigned int
525get_Imm8_Y1(tile_bundle_bits n) 464get_Imm8_X1(tilegx_bundle_bits n)
526{ 465{
527 return (((unsigned int)(n >> 43)) & 0xff); 466 return (((unsigned int)(n >> 43)) & 0xff);
528} 467}
529 468
530static __inline unsigned int 469static __inline unsigned int
531get_ImmOpcodeExtension_X0(tile_bundle_bits num) 470get_Imm8_Y0(tilegx_bundle_bits num)
532{
533 const unsigned int n = (unsigned int)num;
534 return (((n >> 20)) & 0x7f);
535}
536
537static __inline unsigned int
538get_ImmOpcodeExtension_X1(tile_bundle_bits n)
539{
540 return (((unsigned int)(n >> 51)) & 0x7f);
541}
542
543static __inline unsigned int
544get_ImmRROpcodeExtension_SN(tile_bundle_bits num)
545{ 471{
546 const unsigned int n = (unsigned int)num; 472 const unsigned int n = (unsigned int)num;
547 return (((n >> 8)) & 0x3); 473 return (((n >> 12)) & 0xff);
548}
549
550static __inline unsigned int
551get_JOffLong_X1(tile_bundle_bits n)
552{
553 return (((unsigned int)(n >> 43)) & 0x00007fff) |
554 (((unsigned int)(n >> 20)) & 0x00018000) |
555 (((unsigned int)(n >> 14)) & 0x001e0000) |
556 (((unsigned int)(n >> 16)) & 0x07e00000) |
557 (((unsigned int)(n >> 31)) & 0x18000000);
558}
559
560static __inline unsigned int
561get_JOff_X1(tile_bundle_bits n)
562{
563 return (((unsigned int)(n >> 43)) & 0x00007fff) |
564 (((unsigned int)(n >> 20)) & 0x00018000) |
565 (((unsigned int)(n >> 14)) & 0x001e0000) |
566 (((unsigned int)(n >> 16)) & 0x07e00000) |
567 (((unsigned int)(n >> 31)) & 0x08000000);
568}
569
570static __inline unsigned int
571get_MF_Imm15_X1(tile_bundle_bits n)
572{
573 return (((unsigned int)(n >> 37)) & 0x00003fff) |
574 (((unsigned int)(n >> 44)) & 0x00004000);
575} 474}
576 475
577static __inline unsigned int 476static __inline unsigned int
578get_MMEnd_X0(tile_bundle_bits num) 477get_Imm8_Y1(tilegx_bundle_bits n)
579{ 478{
580 const unsigned int n = (unsigned int)num; 479 return (((unsigned int)(n >> 43)) & 0xff);
581 return (((n >> 18)) & 0x1f);
582} 480}
583 481
584static __inline unsigned int 482static __inline unsigned int
585get_MMEnd_X1(tile_bundle_bits n) 483get_JumpOff_X1(tilegx_bundle_bits n)
586{ 484{
587 return (((unsigned int)(n >> 49)) & 0x1f); 485 return (((unsigned int)(n >> 31)) & 0x7ffffff);
588} 486}
589 487
590static __inline unsigned int 488static __inline unsigned int
591get_MMStart_X0(tile_bundle_bits num) 489get_JumpOpcodeExtension_X1(tilegx_bundle_bits n)
592{ 490{
593 const unsigned int n = (unsigned int)num; 491 return (((unsigned int)(n >> 58)) & 0x1);
594 return (((n >> 23)) & 0x1f);
595} 492}
596 493
597static __inline unsigned int 494static __inline unsigned int
598get_MMStart_X1(tile_bundle_bits n) 495get_MF_Imm14_X1(tilegx_bundle_bits n)
599{ 496{
600 return (((unsigned int)(n >> 54)) & 0x1f); 497 return (((unsigned int)(n >> 37)) & 0x3fff);
601} 498}
602 499
603static __inline unsigned int 500static __inline unsigned int
604get_MT_Imm15_X1(tile_bundle_bits n) 501get_MT_Imm14_X1(tilegx_bundle_bits n)
605{ 502{
606 return (((unsigned int)(n >> 31)) & 0x0000003f) | 503 return (((unsigned int)(n >> 31)) & 0x0000003f) |
607 (((unsigned int)(n >> 37)) & 0x00003fc0) | 504 (((unsigned int)(n >> 37)) & 0x00003fc0);
608 (((unsigned int)(n >> 44)) & 0x00004000);
609} 505}
610 506
611static __inline unsigned int 507static __inline unsigned int
612get_Mode(tile_bundle_bits n) 508get_Mode(tilegx_bundle_bits n)
613{ 509{
614 return (((unsigned int)(n >> 63)) & 0x1); 510 return (((unsigned int)(n >> 62)) & 0x3);
615} 511}
616 512
617static __inline unsigned int 513static __inline unsigned int
618get_NoRegOpcodeExtension_SN(tile_bundle_bits num) 514get_Opcode_X0(tilegx_bundle_bits num)
619{
620 const unsigned int n = (unsigned int)num;
621 return (((n >> 0)) & 0xf);
622}
623
624static __inline unsigned int
625get_Opcode_SN(tile_bundle_bits num)
626{
627 const unsigned int n = (unsigned int)num;
628 return (((n >> 10)) & 0x3f);
629}
630
631static __inline unsigned int
632get_Opcode_X0(tile_bundle_bits num)
633{ 515{
634 const unsigned int n = (unsigned int)num; 516 const unsigned int n = (unsigned int)num;
635 return (((n >> 28)) & 0x7); 517 return (((n >> 28)) & 0x7);
636} 518}
637 519
638static __inline unsigned int 520static __inline unsigned int
639get_Opcode_X1(tile_bundle_bits n) 521get_Opcode_X1(tilegx_bundle_bits n)
640{ 522{
641 return (((unsigned int)(n >> 59)) & 0xf); 523 return (((unsigned int)(n >> 59)) & 0x7);
642} 524}
643 525
644static __inline unsigned int 526static __inline unsigned int
645get_Opcode_Y0(tile_bundle_bits num) 527get_Opcode_Y0(tilegx_bundle_bits num)
646{ 528{
647 const unsigned int n = (unsigned int)num; 529 const unsigned int n = (unsigned int)num;
648 return (((n >> 27)) & 0xf); 530 return (((n >> 27)) & 0xf);
649} 531}
650 532
651static __inline unsigned int 533static __inline unsigned int
652get_Opcode_Y1(tile_bundle_bits n) 534get_Opcode_Y1(tilegx_bundle_bits n)
653{ 535{
654 return (((unsigned int)(n >> 59)) & 0xf); 536 return (((unsigned int)(n >> 58)) & 0xf);
655} 537}
656 538
657static __inline unsigned int 539static __inline unsigned int
658get_Opcode_Y2(tile_bundle_bits n) 540get_Opcode_Y2(tilegx_bundle_bits n)
659{ 541{
660 return (((unsigned int)(n >> 56)) & 0x7); 542 return (((n >> 26)) & 0x00000001) |
661} 543 (((unsigned int)(n >> 56)) & 0x00000002);
662
663static __inline unsigned int
664get_RROpcodeExtension_SN(tile_bundle_bits num)
665{
666 const unsigned int n = (unsigned int)num;
667 return (((n >> 4)) & 0xf);
668} 544}
669 545
670static __inline unsigned int 546static __inline unsigned int
671get_RRROpcodeExtension_X0(tile_bundle_bits num) 547get_RRROpcodeExtension_X0(tilegx_bundle_bits num)
672{ 548{
673 const unsigned int n = (unsigned int)num; 549 const unsigned int n = (unsigned int)num;
674 return (((n >> 18)) & 0x1ff); 550 return (((n >> 18)) & 0x3ff);
675} 551}
676 552
677static __inline unsigned int 553static __inline unsigned int
678get_RRROpcodeExtension_X1(tile_bundle_bits n) 554get_RRROpcodeExtension_X1(tilegx_bundle_bits n)
679{ 555{
680 return (((unsigned int)(n >> 49)) & 0x1ff); 556 return (((unsigned int)(n >> 49)) & 0x3ff);
681} 557}
682 558
683static __inline unsigned int 559static __inline unsigned int
684get_RRROpcodeExtension_Y0(tile_bundle_bits num) 560get_RRROpcodeExtension_Y0(tilegx_bundle_bits num)
685{ 561{
686 const unsigned int n = (unsigned int)num; 562 const unsigned int n = (unsigned int)num;
687 return (((n >> 18)) & 0x3); 563 return (((n >> 18)) & 0x3);
688} 564}
689 565
690static __inline unsigned int 566static __inline unsigned int
691get_RRROpcodeExtension_Y1(tile_bundle_bits n) 567get_RRROpcodeExtension_Y1(tilegx_bundle_bits n)
692{ 568{
693 return (((unsigned int)(n >> 49)) & 0x3); 569 return (((unsigned int)(n >> 49)) & 0x3);
694} 570}
695 571
696static __inline unsigned int 572static __inline unsigned int
697get_RouteOpcodeExtension_SN(tile_bundle_bits num) 573get_ShAmt_X0(tilegx_bundle_bits num)
698{
699 const unsigned int n = (unsigned int)num;
700 return (((n >> 0)) & 0x3ff);
701}
702
703static __inline unsigned int
704get_S_X0(tile_bundle_bits num)
705{ 574{
706 const unsigned int n = (unsigned int)num; 575 const unsigned int n = (unsigned int)num;
707 return (((n >> 27)) & 0x1); 576 return (((n >> 12)) & 0x3f);
708} 577}
709 578
710static __inline unsigned int 579static __inline unsigned int
711get_S_X1(tile_bundle_bits n) 580get_ShAmt_X1(tilegx_bundle_bits n)
712{ 581{
713 return (((unsigned int)(n >> 58)) & 0x1); 582 return (((unsigned int)(n >> 43)) & 0x3f);
714} 583}
715 584
716static __inline unsigned int 585static __inline unsigned int
717get_ShAmt_X0(tile_bundle_bits num) 586get_ShAmt_Y0(tilegx_bundle_bits num)
718{ 587{
719 const unsigned int n = (unsigned int)num; 588 const unsigned int n = (unsigned int)num;
720 return (((n >> 12)) & 0x1f); 589 return (((n >> 12)) & 0x3f);
721} 590}
722 591
723static __inline unsigned int 592static __inline unsigned int
724get_ShAmt_X1(tile_bundle_bits n) 593get_ShAmt_Y1(tilegx_bundle_bits n)
725{ 594{
726 return (((unsigned int)(n >> 43)) & 0x1f); 595 return (((unsigned int)(n >> 43)) & 0x3f);
727} 596}
728 597
729static __inline unsigned int 598static __inline unsigned int
730get_ShAmt_Y0(tile_bundle_bits num) 599get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num)
731{ 600{
732 const unsigned int n = (unsigned int)num; 601 const unsigned int n = (unsigned int)num;
733 return (((n >> 12)) & 0x1f); 602 return (((n >> 18)) & 0x3ff);
734} 603}
735 604
736static __inline unsigned int 605static __inline unsigned int
737get_ShAmt_Y1(tile_bundle_bits n) 606get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n)
738{ 607{
739 return (((unsigned int)(n >> 43)) & 0x1f); 608 return (((unsigned int)(n >> 49)) & 0x3ff);
740} 609}
741 610
742static __inline unsigned int 611static __inline unsigned int
743get_SrcA_X0(tile_bundle_bits num) 612get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num)
744{ 613{
745 const unsigned int n = (unsigned int)num; 614 const unsigned int n = (unsigned int)num;
746 return (((n >> 6)) & 0x3f); 615 return (((n >> 18)) & 0x3);
747} 616}
748 617
749static __inline unsigned int 618static __inline unsigned int
750get_SrcA_X1(tile_bundle_bits n) 619get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n)
751{ 620{
752 return (((unsigned int)(n >> 37)) & 0x3f); 621 return (((unsigned int)(n >> 49)) & 0x3);
753} 622}
754 623
755static __inline unsigned int 624static __inline unsigned int
756get_SrcA_Y0(tile_bundle_bits num) 625get_SrcA_X0(tilegx_bundle_bits num)
757{ 626{
758 const unsigned int n = (unsigned int)num; 627 const unsigned int n = (unsigned int)num;
759 return (((n >> 6)) & 0x3f); 628 return (((n >> 6)) & 0x3f);
760} 629}
761 630
762static __inline unsigned int 631static __inline unsigned int
763get_SrcA_Y1(tile_bundle_bits n) 632get_SrcA_X1(tilegx_bundle_bits n)
764{ 633{
765 return (((unsigned int)(n >> 37)) & 0x3f); 634 return (((unsigned int)(n >> 37)) & 0x3f);
766} 635}
767 636
768static __inline unsigned int 637static __inline unsigned int
769get_SrcA_Y2(tile_bundle_bits n) 638get_SrcA_Y0(tilegx_bundle_bits num)
770{ 639{
771 return (((n >> 26)) & 0x00000001) | 640 const unsigned int n = (unsigned int)num;
772 (((unsigned int)(n >> 50)) & 0x0000003e); 641 return (((n >> 6)) & 0x3f);
773} 642}
774 643
775static __inline unsigned int 644static __inline unsigned int
776get_SrcBDest_Y2(tile_bundle_bits num) 645get_SrcA_Y1(tilegx_bundle_bits n)
777{ 646{
778 const unsigned int n = (unsigned int)num; 647 return (((unsigned int)(n >> 37)) & 0x3f);
779 return (((n >> 20)) & 0x3f);
780} 648}
781 649
782static __inline unsigned int 650static __inline unsigned int
783get_SrcB_X0(tile_bundle_bits num) 651get_SrcA_Y2(tilegx_bundle_bits num)
784{ 652{
785 const unsigned int n = (unsigned int)num; 653 const unsigned int n = (unsigned int)num;
786 return (((n >> 12)) & 0x3f); 654 return (((n >> 20)) & 0x3f);
787} 655}
788 656
789static __inline unsigned int 657static __inline unsigned int
790get_SrcB_X1(tile_bundle_bits n) 658get_SrcBDest_Y2(tilegx_bundle_bits n)
791{ 659{
792 return (((unsigned int)(n >> 43)) & 0x3f); 660 return (((unsigned int)(n >> 51)) & 0x3f);
793} 661}
794 662
795static __inline unsigned int 663static __inline unsigned int
796get_SrcB_Y0(tile_bundle_bits num) 664get_SrcB_X0(tilegx_bundle_bits num)
797{ 665{
798 const unsigned int n = (unsigned int)num; 666 const unsigned int n = (unsigned int)num;
799 return (((n >> 12)) & 0x3f); 667 return (((n >> 12)) & 0x3f);
800} 668}
801 669
802static __inline unsigned int 670static __inline unsigned int
803get_SrcB_Y1(tile_bundle_bits n) 671get_SrcB_X1(tilegx_bundle_bits n)
804{ 672{
805 return (((unsigned int)(n >> 43)) & 0x3f); 673 return (((unsigned int)(n >> 43)) & 0x3f);
806} 674}
807 675
808static __inline unsigned int 676static __inline unsigned int
809get_Src_SN(tile_bundle_bits num) 677get_SrcB_Y0(tilegx_bundle_bits num)
810{ 678{
811 const unsigned int n = (unsigned int)num; 679 const unsigned int n = (unsigned int)num;
812 return (((n >> 0)) & 0x3); 680 return (((n >> 12)) & 0x3f);
813}
814
815static __inline unsigned int
816get_UnOpcodeExtension_X0(tile_bundle_bits num)
817{
818 const unsigned int n = (unsigned int)num;
819 return (((n >> 12)) & 0x1f);
820}
821
822static __inline unsigned int
823get_UnOpcodeExtension_X1(tile_bundle_bits n)
824{
825 return (((unsigned int)(n >> 43)) & 0x1f);
826}
827
828static __inline unsigned int
829get_UnOpcodeExtension_Y0(tile_bundle_bits num)
830{
831 const unsigned int n = (unsigned int)num;
832 return (((n >> 12)) & 0x1f);
833} 681}
834 682
835static __inline unsigned int 683static __inline unsigned int
836get_UnOpcodeExtension_Y1(tile_bundle_bits n) 684get_SrcB_Y1(tilegx_bundle_bits n)
837{ 685{
838 return (((unsigned int)(n >> 43)) & 0x1f); 686 return (((unsigned int)(n >> 43)) & 0x3f);
839} 687}
840 688
841static __inline unsigned int 689static __inline unsigned int
842get_UnShOpcodeExtension_X0(tile_bundle_bits num) 690get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num)
843{ 691{
844 const unsigned int n = (unsigned int)num; 692 const unsigned int n = (unsigned int)num;
845 return (((n >> 17)) & 0x3ff); 693 return (((n >> 12)) & 0x3f);
846} 694}
847 695
848static __inline unsigned int 696static __inline unsigned int
849get_UnShOpcodeExtension_X1(tile_bundle_bits n) 697get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n)
850{ 698{
851 return (((unsigned int)(n >> 48)) & 0x3ff); 699 return (((unsigned int)(n >> 43)) & 0x3f);
852} 700}
853 701
854static __inline unsigned int 702static __inline unsigned int
855get_UnShOpcodeExtension_Y0(tile_bundle_bits num) 703get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num)
856{ 704{
857 const unsigned int n = (unsigned int)num; 705 const unsigned int n = (unsigned int)num;
858 return (((n >> 17)) & 0x7); 706 return (((n >> 12)) & 0x3f);
859} 707}
860 708
861static __inline unsigned int 709static __inline unsigned int
862get_UnShOpcodeExtension_Y1(tile_bundle_bits n) 710get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n)
863{ 711{
864 return (((unsigned int)(n >> 48)) & 0x7); 712 return (((unsigned int)(n >> 43)) & 0x3f);
865} 713}
866 714
867 715
@@ -874,546 +722,441 @@ sign_extend(int n, int num_bits)
874 722
875 723
876 724
877static __inline tile_bundle_bits 725static __inline tilegx_bundle_bits
878create_BrOff_SN(int num) 726create_BFEnd_X0(int num)
879{ 727{
880 const unsigned int n = (unsigned int)num; 728 const unsigned int n = (unsigned int)num;
881 return ((n & 0x3ff) << 0); 729 return ((n & 0x3f) << 12);
882} 730}
883 731
884static __inline tile_bundle_bits 732static __inline tilegx_bundle_bits
885create_BrOff_X1(int num) 733create_BFOpcodeExtension_X0(int num)
886{ 734{
887 const unsigned int n = (unsigned int)num; 735 const unsigned int n = (unsigned int)num;
888 return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | 736 return ((n & 0xf) << 24);
889 (((tile_bundle_bits)(n & 0x00018000)) << 20);
890} 737}
891 738
892static __inline tile_bundle_bits 739static __inline tilegx_bundle_bits
893create_BrType_X1(int num) 740create_BFStart_X0(int num)
894{ 741{
895 const unsigned int n = (unsigned int)num; 742 const unsigned int n = (unsigned int)num;
896 return (((tile_bundle_bits)(n & 0xf)) << 31); 743 return ((n & 0x3f) << 18);
897} 744}
898 745
899static __inline tile_bundle_bits 746static __inline tilegx_bundle_bits
900create_Dest_Imm8_X1(int num) 747create_BrOff_X1(int num)
901{ 748{
902 const unsigned int n = (unsigned int)num; 749 const unsigned int n = (unsigned int)num;
903 return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | 750 return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
904 (((tile_bundle_bits)(n & 0x000000c0)) << 43); 751 (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37);
905} 752}
906 753
907static __inline tile_bundle_bits 754static __inline tilegx_bundle_bits
908create_Dest_SN(int num) 755create_BrType_X1(int num)
756{
757 const unsigned int n = (unsigned int)num;
758 return (((tilegx_bundle_bits)(n & 0x1f)) << 54);
759}
760
761static __inline tilegx_bundle_bits
762create_Dest_Imm8_X1(int num)
909{ 763{
910 const unsigned int n = (unsigned int)num; 764 const unsigned int n = (unsigned int)num;
911 return ((n & 0x3) << 2); 765 return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
766 (((tilegx_bundle_bits)(n & 0x000000c0)) << 43);
912} 767}
913 768
914static __inline tile_bundle_bits 769static __inline tilegx_bundle_bits
915create_Dest_X0(int num) 770create_Dest_X0(int num)
916{ 771{
917 const unsigned int n = (unsigned int)num; 772 const unsigned int n = (unsigned int)num;
918 return ((n & 0x3f) << 0); 773 return ((n & 0x3f) << 0);
919} 774}
920 775
921static __inline tile_bundle_bits 776static __inline tilegx_bundle_bits
922create_Dest_X1(int num) 777create_Dest_X1(int num)
923{ 778{
924 const unsigned int n = (unsigned int)num; 779 const unsigned int n = (unsigned int)num;
925 return (((tile_bundle_bits)(n & 0x3f)) << 31); 780 return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
926} 781}
927 782
928static __inline tile_bundle_bits 783static __inline tilegx_bundle_bits
929create_Dest_Y0(int num) 784create_Dest_Y0(int num)
930{ 785{
931 const unsigned int n = (unsigned int)num; 786 const unsigned int n = (unsigned int)num;
932 return ((n & 0x3f) << 0); 787 return ((n & 0x3f) << 0);
933} 788}
934 789
935static __inline tile_bundle_bits 790static __inline tilegx_bundle_bits
936create_Dest_Y1(int num) 791create_Dest_Y1(int num)
937{ 792{
938 const unsigned int n = (unsigned int)num; 793 const unsigned int n = (unsigned int)num;
939 return (((tile_bundle_bits)(n & 0x3f)) << 31); 794 return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
940} 795}
941 796
942static __inline tile_bundle_bits 797static __inline tilegx_bundle_bits
943create_Imm16_X0(int num) 798create_Imm16_X0(int num)
944{ 799{
945 const unsigned int n = (unsigned int)num; 800 const unsigned int n = (unsigned int)num;
946 return ((n & 0xffff) << 12); 801 return ((n & 0xffff) << 12);
947} 802}
948 803
949static __inline tile_bundle_bits 804static __inline tilegx_bundle_bits
950create_Imm16_X1(int num) 805create_Imm16_X1(int num)
951{ 806{
952 const unsigned int n = (unsigned int)num; 807 const unsigned int n = (unsigned int)num;
953 return (((tile_bundle_bits)(n & 0xffff)) << 43); 808 return (((tilegx_bundle_bits)(n & 0xffff)) << 43);
954} 809}
955 810
956static __inline tile_bundle_bits 811static __inline tilegx_bundle_bits
957create_Imm8_SN(int num) 812create_Imm8OpcodeExtension_X0(int num)
958{ 813{
959 const unsigned int n = (unsigned int)num; 814 const unsigned int n = (unsigned int)num;
960 return ((n & 0xff) << 0); 815 return ((n & 0xff) << 20);
961} 816}
962 817
963static __inline tile_bundle_bits 818static __inline tilegx_bundle_bits
819create_Imm8OpcodeExtension_X1(int num)
820{
821 const unsigned int n = (unsigned int)num;
822 return (((tilegx_bundle_bits)(n & 0xff)) << 51);
823}
824
825static __inline tilegx_bundle_bits
964create_Imm8_X0(int num) 826create_Imm8_X0(int num)
965{ 827{
966 const unsigned int n = (unsigned int)num; 828 const unsigned int n = (unsigned int)num;
967 return ((n & 0xff) << 12); 829 return ((n & 0xff) << 12);
968} 830}
969 831
970static __inline tile_bundle_bits 832static __inline tilegx_bundle_bits
971create_Imm8_X1(int num) 833create_Imm8_X1(int num)
972{ 834{
973 const unsigned int n = (unsigned int)num; 835 const unsigned int n = (unsigned int)num;
974 return (((tile_bundle_bits)(n & 0xff)) << 43); 836 return (((tilegx_bundle_bits)(n & 0xff)) << 43);
975} 837}
976 838
977static __inline tile_bundle_bits 839static __inline tilegx_bundle_bits
978create_Imm8_Y0(int num) 840create_Imm8_Y0(int num)
979{ 841{
980 const unsigned int n = (unsigned int)num; 842 const unsigned int n = (unsigned int)num;
981 return ((n & 0xff) << 12); 843 return ((n & 0xff) << 12);
982} 844}
983 845
984static __inline tile_bundle_bits 846static __inline tilegx_bundle_bits
985create_Imm8_Y1(int num) 847create_Imm8_Y1(int num)
986{ 848{
987 const unsigned int n = (unsigned int)num; 849 const unsigned int n = (unsigned int)num;
988 return (((tile_bundle_bits)(n & 0xff)) << 43); 850 return (((tilegx_bundle_bits)(n & 0xff)) << 43);
989}
990
991static __inline tile_bundle_bits
992create_ImmOpcodeExtension_X0(int num)
993{
994 const unsigned int n = (unsigned int)num;
995 return ((n & 0x7f) << 20);
996}
997
998static __inline tile_bundle_bits
999create_ImmOpcodeExtension_X1(int num)
1000{
1001 const unsigned int n = (unsigned int)num;
1002 return (((tile_bundle_bits)(n & 0x7f)) << 51);
1003}
1004
1005static __inline tile_bundle_bits
1006create_ImmRROpcodeExtension_SN(int num)
1007{
1008 const unsigned int n = (unsigned int)num;
1009 return ((n & 0x3) << 8);
1010}
1011
1012static __inline tile_bundle_bits
1013create_JOffLong_X1(int num)
1014{
1015 const unsigned int n = (unsigned int)num;
1016 return (((tile_bundle_bits)(n & 0x00007fff)) << 43) |
1017 (((tile_bundle_bits)(n & 0x00018000)) << 20) |
1018 (((tile_bundle_bits)(n & 0x001e0000)) << 14) |
1019 (((tile_bundle_bits)(n & 0x07e00000)) << 16) |
1020 (((tile_bundle_bits)(n & 0x18000000)) << 31);
1021}
1022
1023static __inline tile_bundle_bits
1024create_JOff_X1(int num)
1025{
1026 const unsigned int n = (unsigned int)num;
1027 return (((tile_bundle_bits)(n & 0x00007fff)) << 43) |
1028 (((tile_bundle_bits)(n & 0x00018000)) << 20) |
1029 (((tile_bundle_bits)(n & 0x001e0000)) << 14) |
1030 (((tile_bundle_bits)(n & 0x07e00000)) << 16) |
1031 (((tile_bundle_bits)(n & 0x08000000)) << 31);
1032}
1033
1034static __inline tile_bundle_bits
1035create_MF_Imm15_X1(int num)
1036{
1037 const unsigned int n = (unsigned int)num;
1038 return (((tile_bundle_bits)(n & 0x00003fff)) << 37) |
1039 (((tile_bundle_bits)(n & 0x00004000)) << 44);
1040} 851}
1041 852
1042static __inline tile_bundle_bits 853static __inline tilegx_bundle_bits
1043create_MMEnd_X0(int num) 854create_JumpOff_X1(int num)
1044{ 855{
1045 const unsigned int n = (unsigned int)num; 856 const unsigned int n = (unsigned int)num;
1046 return ((n & 0x1f) << 18); 857 return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31);
1047} 858}
1048 859
1049static __inline tile_bundle_bits 860static __inline tilegx_bundle_bits
1050create_MMEnd_X1(int num) 861create_JumpOpcodeExtension_X1(int num)
1051{ 862{
1052 const unsigned int n = (unsigned int)num; 863 const unsigned int n = (unsigned int)num;
1053 return (((tile_bundle_bits)(n & 0x1f)) << 49); 864 return (((tilegx_bundle_bits)(n & 0x1)) << 58);
1054} 865}
1055 866
1056static __inline tile_bundle_bits 867static __inline tilegx_bundle_bits
1057create_MMStart_X0(int num) 868create_MF_Imm14_X1(int num)
1058{ 869{
1059 const unsigned int n = (unsigned int)num; 870 const unsigned int n = (unsigned int)num;
1060 return ((n & 0x1f) << 23); 871 return (((tilegx_bundle_bits)(n & 0x3fff)) << 37);
1061} 872}
1062 873
1063static __inline tile_bundle_bits 874static __inline tilegx_bundle_bits
1064create_MMStart_X1(int num) 875create_MT_Imm14_X1(int num)
1065{ 876{
1066 const unsigned int n = (unsigned int)num; 877 const unsigned int n = (unsigned int)num;
1067 return (((tile_bundle_bits)(n & 0x1f)) << 54); 878 return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
879 (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37);
1068} 880}
1069 881
1070static __inline tile_bundle_bits 882static __inline tilegx_bundle_bits
1071create_MT_Imm15_X1(int num)
1072{
1073 const unsigned int n = (unsigned int)num;
1074 return (((tile_bundle_bits)(n & 0x0000003f)) << 31) |
1075 (((tile_bundle_bits)(n & 0x00003fc0)) << 37) |
1076 (((tile_bundle_bits)(n & 0x00004000)) << 44);
1077}
1078
1079static __inline tile_bundle_bits
1080create_Mode(int num) 883create_Mode(int num)
1081{ 884{
1082 const unsigned int n = (unsigned int)num; 885 const unsigned int n = (unsigned int)num;
1083 return (((tile_bundle_bits)(n & 0x1)) << 63); 886 return (((tilegx_bundle_bits)(n & 0x3)) << 62);
1084} 887}
1085 888
1086static __inline tile_bundle_bits 889static __inline tilegx_bundle_bits
1087create_NoRegOpcodeExtension_SN(int num)
1088{
1089 const unsigned int n = (unsigned int)num;
1090 return ((n & 0xf) << 0);
1091}
1092
1093static __inline tile_bundle_bits
1094create_Opcode_SN(int num)
1095{
1096 const unsigned int n = (unsigned int)num;
1097 return ((n & 0x3f) << 10);
1098}
1099
1100static __inline tile_bundle_bits
1101create_Opcode_X0(int num) 890create_Opcode_X0(int num)
1102{ 891{
1103 const unsigned int n = (unsigned int)num; 892 const unsigned int n = (unsigned int)num;
1104 return ((n & 0x7) << 28); 893 return ((n & 0x7) << 28);
1105} 894}
1106 895
1107static __inline tile_bundle_bits 896static __inline tilegx_bundle_bits
1108create_Opcode_X1(int num) 897create_Opcode_X1(int num)
1109{ 898{
1110 const unsigned int n = (unsigned int)num; 899 const unsigned int n = (unsigned int)num;
1111 return (((tile_bundle_bits)(n & 0xf)) << 59); 900 return (((tilegx_bundle_bits)(n & 0x7)) << 59);
1112} 901}
1113 902
1114static __inline tile_bundle_bits 903static __inline tilegx_bundle_bits
1115create_Opcode_Y0(int num) 904create_Opcode_Y0(int num)
1116{ 905{
1117 const unsigned int n = (unsigned int)num; 906 const unsigned int n = (unsigned int)num;
1118 return ((n & 0xf) << 27); 907 return ((n & 0xf) << 27);
1119} 908}
1120 909
1121static __inline tile_bundle_bits 910static __inline tilegx_bundle_bits
1122create_Opcode_Y1(int num) 911create_Opcode_Y1(int num)
1123{ 912{
1124 const unsigned int n = (unsigned int)num; 913 const unsigned int n = (unsigned int)num;
1125 return (((tile_bundle_bits)(n & 0xf)) << 59); 914 return (((tilegx_bundle_bits)(n & 0xf)) << 58);
1126} 915}
1127 916
1128static __inline tile_bundle_bits 917static __inline tilegx_bundle_bits
1129create_Opcode_Y2(int num) 918create_Opcode_Y2(int num)
1130{ 919{
1131 const unsigned int n = (unsigned int)num; 920 const unsigned int n = (unsigned int)num;
1132 return (((tile_bundle_bits)(n & 0x7)) << 56); 921 return ((n & 0x00000001) << 26) |
1133} 922 (((tilegx_bundle_bits)(n & 0x00000002)) << 56);
1134
1135static __inline tile_bundle_bits
1136create_RROpcodeExtension_SN(int num)
1137{
1138 const unsigned int n = (unsigned int)num;
1139 return ((n & 0xf) << 4);
1140} 923}
1141 924
1142static __inline tile_bundle_bits 925static __inline tilegx_bundle_bits
1143create_RRROpcodeExtension_X0(int num) 926create_RRROpcodeExtension_X0(int num)
1144{ 927{
1145 const unsigned int n = (unsigned int)num; 928 const unsigned int n = (unsigned int)num;
1146 return ((n & 0x1ff) << 18); 929 return ((n & 0x3ff) << 18);
1147} 930}
1148 931
1149static __inline tile_bundle_bits 932static __inline tilegx_bundle_bits
1150create_RRROpcodeExtension_X1(int num) 933create_RRROpcodeExtension_X1(int num)
1151{ 934{
1152 const unsigned int n = (unsigned int)num; 935 const unsigned int n = (unsigned int)num;
1153 return (((tile_bundle_bits)(n & 0x1ff)) << 49); 936 return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
1154} 937}
1155 938
1156static __inline tile_bundle_bits 939static __inline tilegx_bundle_bits
1157create_RRROpcodeExtension_Y0(int num) 940create_RRROpcodeExtension_Y0(int num)
1158{ 941{
1159 const unsigned int n = (unsigned int)num; 942 const unsigned int n = (unsigned int)num;
1160 return ((n & 0x3) << 18); 943 return ((n & 0x3) << 18);
1161} 944}
1162 945
1163static __inline tile_bundle_bits 946static __inline tilegx_bundle_bits
1164create_RRROpcodeExtension_Y1(int num) 947create_RRROpcodeExtension_Y1(int num)
1165{ 948{
1166 const unsigned int n = (unsigned int)num; 949 const unsigned int n = (unsigned int)num;
1167 return (((tile_bundle_bits)(n & 0x3)) << 49); 950 return (((tilegx_bundle_bits)(n & 0x3)) << 49);
1168} 951}
1169 952
1170static __inline tile_bundle_bits 953static __inline tilegx_bundle_bits
1171create_RouteOpcodeExtension_SN(int num) 954create_ShAmt_X0(int num)
1172{ 955{
1173 const unsigned int n = (unsigned int)num; 956 const unsigned int n = (unsigned int)num;
1174 return ((n & 0x3ff) << 0); 957 return ((n & 0x3f) << 12);
1175} 958}
1176 959
1177static __inline tile_bundle_bits 960static __inline tilegx_bundle_bits
1178create_S_X0(int num) 961create_ShAmt_X1(int num)
1179{ 962{
1180 const unsigned int n = (unsigned int)num; 963 const unsigned int n = (unsigned int)num;
1181 return ((n & 0x1) << 27); 964 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1182} 965}
1183 966
1184static __inline tile_bundle_bits 967static __inline tilegx_bundle_bits
1185create_S_X1(int num) 968create_ShAmt_Y0(int num)
1186{ 969{
1187 const unsigned int n = (unsigned int)num; 970 const unsigned int n = (unsigned int)num;
1188 return (((tile_bundle_bits)(n & 0x1)) << 58); 971 return ((n & 0x3f) << 12);
1189} 972}
1190 973
1191static __inline tile_bundle_bits 974static __inline tilegx_bundle_bits
1192create_ShAmt_X0(int num) 975create_ShAmt_Y1(int num)
1193{ 976{
1194 const unsigned int n = (unsigned int)num; 977 const unsigned int n = (unsigned int)num;
1195 return ((n & 0x1f) << 12); 978 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1196} 979}
1197 980
1198static __inline tile_bundle_bits 981static __inline tilegx_bundle_bits
1199create_ShAmt_X1(int num) 982create_ShiftOpcodeExtension_X0(int num)
1200{ 983{
1201 const unsigned int n = (unsigned int)num; 984 const unsigned int n = (unsigned int)num;
1202 return (((tile_bundle_bits)(n & 0x1f)) << 43); 985 return ((n & 0x3ff) << 18);
1203} 986}
1204 987
1205static __inline tile_bundle_bits 988static __inline tilegx_bundle_bits
1206create_ShAmt_Y0(int num) 989create_ShiftOpcodeExtension_X1(int num)
1207{ 990{
1208 const unsigned int n = (unsigned int)num; 991 const unsigned int n = (unsigned int)num;
1209 return ((n & 0x1f) << 12); 992 return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
1210} 993}
1211 994
1212static __inline tile_bundle_bits 995static __inline tilegx_bundle_bits
1213create_ShAmt_Y1(int num) 996create_ShiftOpcodeExtension_Y0(int num)
1214{ 997{
1215 const unsigned int n = (unsigned int)num; 998 const unsigned int n = (unsigned int)num;
1216 return (((tile_bundle_bits)(n & 0x1f)) << 43); 999 return ((n & 0x3) << 18);
1217} 1000}
1218 1001
1219static __inline tile_bundle_bits 1002static __inline tilegx_bundle_bits
1003create_ShiftOpcodeExtension_Y1(int num)
1004{
1005 const unsigned int n = (unsigned int)num;
1006 return (((tilegx_bundle_bits)(n & 0x3)) << 49);
1007}
1008
1009static __inline tilegx_bundle_bits
1220create_SrcA_X0(int num) 1010create_SrcA_X0(int num)
1221{ 1011{
1222 const unsigned int n = (unsigned int)num; 1012 const unsigned int n = (unsigned int)num;
1223 return ((n & 0x3f) << 6); 1013 return ((n & 0x3f) << 6);
1224} 1014}
1225 1015
1226static __inline tile_bundle_bits 1016static __inline tilegx_bundle_bits
1227create_SrcA_X1(int num) 1017create_SrcA_X1(int num)
1228{ 1018{
1229 const unsigned int n = (unsigned int)num; 1019 const unsigned int n = (unsigned int)num;
1230 return (((tile_bundle_bits)(n & 0x3f)) << 37); 1020 return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
1231} 1021}
1232 1022
1233static __inline tile_bundle_bits 1023static __inline tilegx_bundle_bits
1234create_SrcA_Y0(int num) 1024create_SrcA_Y0(int num)
1235{ 1025{
1236 const unsigned int n = (unsigned int)num; 1026 const unsigned int n = (unsigned int)num;
1237 return ((n & 0x3f) << 6); 1027 return ((n & 0x3f) << 6);
1238} 1028}
1239 1029
1240static __inline tile_bundle_bits 1030static __inline tilegx_bundle_bits
1241create_SrcA_Y1(int num) 1031create_SrcA_Y1(int num)
1242{ 1032{
1243 const unsigned int n = (unsigned int)num; 1033 const unsigned int n = (unsigned int)num;
1244 return (((tile_bundle_bits)(n & 0x3f)) << 37); 1034 return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
1245} 1035}
1246 1036
1247static __inline tile_bundle_bits 1037static __inline tilegx_bundle_bits
1248create_SrcA_Y2(int num) 1038create_SrcA_Y2(int num)
1249{ 1039{
1250 const unsigned int n = (unsigned int)num; 1040 const unsigned int n = (unsigned int)num;
1251 return ((n & 0x00000001) << 26) | 1041 return ((n & 0x3f) << 20);
1252 (((tile_bundle_bits)(n & 0x0000003e)) << 50);
1253} 1042}
1254 1043
1255static __inline tile_bundle_bits 1044static __inline tilegx_bundle_bits
1256create_SrcBDest_Y2(int num) 1045create_SrcBDest_Y2(int num)
1257{ 1046{
1258 const unsigned int n = (unsigned int)num; 1047 const unsigned int n = (unsigned int)num;
1259 return ((n & 0x3f) << 20); 1048 return (((tilegx_bundle_bits)(n & 0x3f)) << 51);
1260} 1049}
1261 1050
1262static __inline tile_bundle_bits 1051static __inline tilegx_bundle_bits
1263create_SrcB_X0(int num) 1052create_SrcB_X0(int num)
1264{ 1053{
1265 const unsigned int n = (unsigned int)num; 1054 const unsigned int n = (unsigned int)num;
1266 return ((n & 0x3f) << 12); 1055 return ((n & 0x3f) << 12);
1267} 1056}
1268 1057
1269static __inline tile_bundle_bits 1058static __inline tilegx_bundle_bits
1270create_SrcB_X1(int num) 1059create_SrcB_X1(int num)
1271{ 1060{
1272 const unsigned int n = (unsigned int)num; 1061 const unsigned int n = (unsigned int)num;
1273 return (((tile_bundle_bits)(n & 0x3f)) << 43); 1062 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1274} 1063}
1275 1064
1276static __inline tile_bundle_bits 1065static __inline tilegx_bundle_bits
1277create_SrcB_Y0(int num) 1066create_SrcB_Y0(int num)
1278{ 1067{
1279 const unsigned int n = (unsigned int)num; 1068 const unsigned int n = (unsigned int)num;
1280 return ((n & 0x3f) << 12); 1069 return ((n & 0x3f) << 12);
1281} 1070}
1282 1071
1283static __inline tile_bundle_bits 1072static __inline tilegx_bundle_bits
1284create_SrcB_Y1(int num) 1073create_SrcB_Y1(int num)
1285{ 1074{
1286 const unsigned int n = (unsigned int)num; 1075 const unsigned int n = (unsigned int)num;
1287 return (((tile_bundle_bits)(n & 0x3f)) << 43); 1076 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1288} 1077}
1289 1078
1290static __inline tile_bundle_bits 1079static __inline tilegx_bundle_bits
1291create_Src_SN(int num) 1080create_UnaryOpcodeExtension_X0(int num)
1292{ 1081{
1293 const unsigned int n = (unsigned int)num; 1082 const unsigned int n = (unsigned int)num;
1294 return ((n & 0x3) << 0); 1083 return ((n & 0x3f) << 12);
1295}
1296
1297static __inline tile_bundle_bits
1298create_UnOpcodeExtension_X0(int num)
1299{
1300 const unsigned int n = (unsigned int)num;
1301 return ((n & 0x1f) << 12);
1302}
1303
1304static __inline tile_bundle_bits
1305create_UnOpcodeExtension_X1(int num)
1306{
1307 const unsigned int n = (unsigned int)num;
1308 return (((tile_bundle_bits)(n & 0x1f)) << 43);
1309}
1310
1311static __inline tile_bundle_bits
1312create_UnOpcodeExtension_Y0(int num)
1313{
1314 const unsigned int n = (unsigned int)num;
1315 return ((n & 0x1f) << 12);
1316}
1317
1318static __inline tile_bundle_bits
1319create_UnOpcodeExtension_Y1(int num)
1320{
1321 const unsigned int n = (unsigned int)num;
1322 return (((tile_bundle_bits)(n & 0x1f)) << 43);
1323}
1324
1325static __inline tile_bundle_bits
1326create_UnShOpcodeExtension_X0(int num)
1327{
1328 const unsigned int n = (unsigned int)num;
1329 return ((n & 0x3ff) << 17);
1330} 1084}
1331 1085
1332static __inline tile_bundle_bits 1086static __inline tilegx_bundle_bits
1333create_UnShOpcodeExtension_X1(int num) 1087create_UnaryOpcodeExtension_X1(int num)
1334{ 1088{
1335 const unsigned int n = (unsigned int)num; 1089 const unsigned int n = (unsigned int)num;
1336 return (((tile_bundle_bits)(n & 0x3ff)) << 48); 1090 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1337} 1091}
1338 1092
1339static __inline tile_bundle_bits 1093static __inline tilegx_bundle_bits
1340create_UnShOpcodeExtension_Y0(int num) 1094create_UnaryOpcodeExtension_Y0(int num)
1341{ 1095{
1342 const unsigned int n = (unsigned int)num; 1096 const unsigned int n = (unsigned int)num;
1343 return ((n & 0x7) << 17); 1097 return ((n & 0x3f) << 12);
1344} 1098}
1345 1099
1346static __inline tile_bundle_bits 1100static __inline tilegx_bundle_bits
1347create_UnShOpcodeExtension_Y1(int num) 1101create_UnaryOpcodeExtension_Y1(int num)
1348{ 1102{
1349 const unsigned int n = (unsigned int)num; 1103 const unsigned int n = (unsigned int)num;
1350 return (((tile_bundle_bits)(n & 0x7)) << 48); 1104 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1351} 1105}
1352 1106
1353 1107
1354
1355typedef enum 1108typedef enum
1356{ 1109{
1357 TILE_PIPELINE_X0, 1110 TILEGX_PIPELINE_X0,
1358 TILE_PIPELINE_X1, 1111 TILEGX_PIPELINE_X1,
1359 TILE_PIPELINE_Y0, 1112 TILEGX_PIPELINE_Y0,
1360 TILE_PIPELINE_Y1, 1113 TILEGX_PIPELINE_Y1,
1361 TILE_PIPELINE_Y2, 1114 TILEGX_PIPELINE_Y2,
1362} tile_pipeline; 1115} tilegx_pipeline;
1363 1116
1364#define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) 1117#define tilegx_is_x_pipeline(p) ((int)(p) <= (int)TILEGX_PIPELINE_X1)
1365 1118
1366typedef enum 1119typedef enum
1367{ 1120{
1368 TILE_OP_TYPE_REGISTER, 1121 TILEGX_OP_TYPE_REGISTER,
1369 TILE_OP_TYPE_IMMEDIATE, 1122 TILEGX_OP_TYPE_IMMEDIATE,
1370 TILE_OP_TYPE_ADDRESS, 1123 TILEGX_OP_TYPE_ADDRESS,
1371 TILE_OP_TYPE_SPR 1124 TILEGX_OP_TYPE_SPR
1372} tile_operand_type; 1125} tilegx_operand_type;
1373 1126
1374/* This is the bit that determines if a bundle is in the Y encoding. */ 1127/* These are the bits that determine if a bundle is in the X encoding. */
1375#define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) 1128#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62)
1376 1129
1377enum 1130enum
1378{ 1131{
1379 /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ 1132 /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
1380 TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, 1133 TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
1381 1134
1382 /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ 1135 /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
1383 TILE_NUM_PIPELINE_ENCODINGS = 5, 1136 TILEGX_NUM_PIPELINE_ENCODINGS = 5,
1384 1137
1385 /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ 1138 /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */
1386 TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, 1139 TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
1387 1140
1388 /* Instructions take this many bytes. */ 1141 /* Instructions take this many bytes. */
1389 TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, 1142 TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES,
1390 1143
1391 /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ 1144 /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */
1392 TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, 1145 TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
1393 1146
1394 /* Bundles should be aligned modulo this number of bytes. */ 1147 /* Bundles should be aligned modulo this number of bytes. */
1395 TILE_BUNDLE_ALIGNMENT_IN_BYTES = 1148 TILEGX_BUNDLE_ALIGNMENT_IN_BYTES =
1396 (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), 1149 (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
1397
1398 /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */
1399 TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1,
1400
1401 /* Static network instructions take this many bytes. */
1402 TILE_SN_INSTRUCTION_SIZE_IN_BYTES =
1403 (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES),
1404 1150
1405 /* Number of registers (some are magic, such as network I/O). */ 1151 /* Number of registers (some are magic, such as network I/O). */
1406 TILE_NUM_REGISTERS = 64, 1152 TILEGX_NUM_REGISTERS = 64,
1407
1408 /* Number of static network registers. */
1409 TILE_NUM_SN_REGISTERS = 4
1410}; 1153};
1411 1154
1412 1155
1413struct tile_operand 1156struct tilegx_operand
1414{ 1157{
1415 /* Is this operand a register, immediate or address? */ 1158 /* Is this operand a register, immediate or address? */
1416 tile_operand_type type; 1159 tilegx_operand_type type;
1417 1160
1418 /* The default relocation type for this operand. */ 1161 /* The default relocation type for this operand. */
1419 signed int default_reloc : 16; 1162 signed int default_reloc : 16;
@@ -1437,27 +1180,27 @@ struct tile_operand
1437 unsigned int rightshift : 2; 1180 unsigned int rightshift : 2;
1438 1181
1439 /* Return the bits for this operand to be ORed into an existing bundle. */ 1182 /* Return the bits for this operand to be ORed into an existing bundle. */
1440 tile_bundle_bits (*insert) (int op); 1183 tilegx_bundle_bits (*insert) (int op);
1441 1184
1442 /* Extract this operand and return it. */ 1185 /* Extract this operand and return it. */
1443 unsigned int (*extract) (tile_bundle_bits bundle); 1186 unsigned int (*extract) (tilegx_bundle_bits bundle);
1444}; 1187};
1445 1188
1446 1189
1447extern const struct tile_operand tile_operands[]; 1190extern const struct tilegx_operand tilegx_operands[];
1448 1191
1449/* One finite-state machine per pipe for rapid instruction decoding. */ 1192/* One finite-state machine per pipe for rapid instruction decoding. */
1450extern const unsigned short * const 1193extern const unsigned short * const
1451tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; 1194tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS];
1452 1195
1453 1196
1454struct tile_opcode 1197struct tilegx_opcode
1455{ 1198{
1456 /* The opcode mnemonic, e.g. "add" */ 1199 /* The opcode mnemonic, e.g. "add" */
1457 const char *name; 1200 const char *name;
1458 1201
1459 /* The enum value for this mnemonic. */ 1202 /* The enum value for this mnemonic. */
1460 tile_mnemonic mnemonic; 1203 tilegx_mnemonic mnemonic;
1461 1204
1462 /* A bit mask of which of the five pipes this instruction 1205 /* A bit mask of which of the five pipes this instruction
1463 is compatible with: 1206 is compatible with:
@@ -1478,29 +1221,28 @@ struct tile_opcode
1478 unsigned char can_bundle; 1221 unsigned char can_bundle;
1479 1222
1480 /* The description of the operands. Each of these is an 1223 /* The description of the operands. Each of these is an
1481 * index into the tile_operands[] table. */ 1224 * index into the tilegx_operands[] table. */
1482 unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; 1225 unsigned char operands[TILEGX_NUM_PIPELINE_ENCODINGS][TILEGX_MAX_OPERANDS];
1483 1226
1484}; 1227};
1485 1228
1486extern const struct tile_opcode tile_opcodes[]; 1229extern const struct tilegx_opcode tilegx_opcodes[];
1487
1488 1230
1489/* Used for non-textual disassembly into structs. */ 1231/* Used for non-textual disassembly into structs. */
1490struct tile_decoded_instruction 1232struct tilegx_decoded_instruction
1491{ 1233{
1492 const struct tile_opcode *opcode; 1234 const struct tilegx_opcode *opcode;
1493 const struct tile_operand *operands[TILE_MAX_OPERANDS]; 1235 const struct tilegx_operand *operands[TILEGX_MAX_OPERANDS];
1494 int operand_values[TILE_MAX_OPERANDS]; 1236 long long operand_values[TILEGX_MAX_OPERANDS];
1495}; 1237};
1496 1238
1497 1239
1498/* Disassemble a bundle into a struct for machine processing. */ 1240/* Disassemble a bundle into a struct for machine processing. */
1499extern int parse_insn_tile(tile_bundle_bits bits, 1241extern int parse_insn_tilegx(tilegx_bundle_bits bits,
1500 unsigned int pc, 1242 unsigned long long pc,
1501 struct tile_decoded_instruction 1243 struct tilegx_decoded_instruction
1502 decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); 1244 decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]);
1503 1245
1504 1246
1505 1247
1506#endif /* opcode_tile_h */ 1248#endif /* opcode_tilegx_h */
diff --git a/arch/tile/include/asm/opcode_constants_64.h b/arch/tile/include/asm/opcode_constants_64.h
index 227d033b180c..710192869476 100644
--- a/arch/tile/include/asm/opcode_constants_64.h
+++ b/arch/tile/include/asm/opcode_constants_64.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -19,462 +19,591 @@
19#define _TILE_OPCODE_CONSTANTS_H 19#define _TILE_OPCODE_CONSTANTS_H
20enum 20enum
21{ 21{
22 ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, 22 ADDI_IMM8_OPCODE_X0 = 1,
23 ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, 23 ADDI_IMM8_OPCODE_X1 = 1,
24 ADDB_SPECIAL_0_OPCODE_X0 = 1, 24 ADDI_OPCODE_Y0 = 0,
25 ADDB_SPECIAL_0_OPCODE_X1 = 1, 25 ADDI_OPCODE_Y1 = 1,
26 ADDHS_SPECIAL_0_OPCODE_X0 = 99, 26 ADDLI_OPCODE_X0 = 1,
27 ADDHS_SPECIAL_0_OPCODE_X1 = 69, 27 ADDLI_OPCODE_X1 = 0,
28 ADDH_SPECIAL_0_OPCODE_X0 = 2, 28 ADDXI_IMM8_OPCODE_X0 = 2,
29 ADDH_SPECIAL_0_OPCODE_X1 = 2, 29 ADDXI_IMM8_OPCODE_X1 = 2,
30 ADDIB_IMM_0_OPCODE_X0 = 1, 30 ADDXI_OPCODE_Y0 = 1,
31 ADDIB_IMM_0_OPCODE_X1 = 1, 31 ADDXI_OPCODE_Y1 = 2,
32 ADDIH_IMM_0_OPCODE_X0 = 2, 32 ADDXLI_OPCODE_X0 = 2,
33 ADDIH_IMM_0_OPCODE_X1 = 2, 33 ADDXLI_OPCODE_X1 = 1,
34 ADDI_IMM_0_OPCODE_X0 = 3, 34 ADDXSC_RRR_0_OPCODE_X0 = 1,
35 ADDI_IMM_0_OPCODE_X1 = 3, 35 ADDXSC_RRR_0_OPCODE_X1 = 1,
36 ADDI_IMM_1_OPCODE_SN = 1, 36 ADDX_RRR_0_OPCODE_X0 = 2,
37 ADDI_OPCODE_Y0 = 9, 37 ADDX_RRR_0_OPCODE_X1 = 2,
38 ADDI_OPCODE_Y1 = 7, 38 ADDX_RRR_0_OPCODE_Y0 = 0,
39 ADDLIS_OPCODE_X0 = 1, 39 ADDX_SPECIAL_0_OPCODE_Y1 = 0,
40 ADDLIS_OPCODE_X1 = 2, 40 ADD_RRR_0_OPCODE_X0 = 3,
41 ADDLI_OPCODE_X0 = 2, 41 ADD_RRR_0_OPCODE_X1 = 3,
42 ADDLI_OPCODE_X1 = 3, 42 ADD_RRR_0_OPCODE_Y0 = 1,
43 ADDS_SPECIAL_0_OPCODE_X0 = 96, 43 ADD_SPECIAL_0_OPCODE_Y1 = 1,
44 ADDS_SPECIAL_0_OPCODE_X1 = 66, 44 ANDI_IMM8_OPCODE_X0 = 3,
45 ADD_SPECIAL_0_OPCODE_X0 = 3, 45 ANDI_IMM8_OPCODE_X1 = 3,
46 ADD_SPECIAL_0_OPCODE_X1 = 3, 46 ANDI_OPCODE_Y0 = 2,
47 ADD_SPECIAL_0_OPCODE_Y0 = 0, 47 ANDI_OPCODE_Y1 = 3,
48 ADD_SPECIAL_0_OPCODE_Y1 = 0, 48 AND_RRR_0_OPCODE_X0 = 4,
49 ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, 49 AND_RRR_0_OPCODE_X1 = 4,
50 ADIFFH_SPECIAL_0_OPCODE_X0 = 5, 50 AND_RRR_5_OPCODE_Y0 = 0,
51 ANDI_IMM_0_OPCODE_X0 = 1, 51 AND_RRR_5_OPCODE_Y1 = 0,
52 ANDI_IMM_0_OPCODE_X1 = 4, 52 BEQZT_BRANCH_OPCODE_X1 = 16,
53 ANDI_OPCODE_Y0 = 10, 53 BEQZ_BRANCH_OPCODE_X1 = 17,
54 ANDI_OPCODE_Y1 = 8, 54 BFEXTS_BF_OPCODE_X0 = 4,
55 AND_SPECIAL_0_OPCODE_X0 = 6, 55 BFEXTU_BF_OPCODE_X0 = 5,
56 AND_SPECIAL_0_OPCODE_X1 = 4, 56 BFINS_BF_OPCODE_X0 = 6,
57 AND_SPECIAL_2_OPCODE_Y0 = 0, 57 BF_OPCODE_X0 = 3,
58 AND_SPECIAL_2_OPCODE_Y1 = 0, 58 BGEZT_BRANCH_OPCODE_X1 = 18,
59 AULI_OPCODE_X0 = 3, 59 BGEZ_BRANCH_OPCODE_X1 = 19,
60 AULI_OPCODE_X1 = 4, 60 BGTZT_BRANCH_OPCODE_X1 = 20,
61 AVGB_U_SPECIAL_0_OPCODE_X0 = 7, 61 BGTZ_BRANCH_OPCODE_X1 = 21,
62 AVGH_SPECIAL_0_OPCODE_X0 = 8, 62 BLBCT_BRANCH_OPCODE_X1 = 22,
63 BBNST_BRANCH_OPCODE_X1 = 15, 63 BLBC_BRANCH_OPCODE_X1 = 23,
64 BBNS_BRANCH_OPCODE_X1 = 14, 64 BLBST_BRANCH_OPCODE_X1 = 24,
65 BBNS_OPCODE_SN = 63, 65 BLBS_BRANCH_OPCODE_X1 = 25,
66 BBST_BRANCH_OPCODE_X1 = 13, 66 BLEZT_BRANCH_OPCODE_X1 = 26,
67 BBS_BRANCH_OPCODE_X1 = 12, 67 BLEZ_BRANCH_OPCODE_X1 = 27,
68 BBS_OPCODE_SN = 62, 68 BLTZT_BRANCH_OPCODE_X1 = 28,
69 BGEZT_BRANCH_OPCODE_X1 = 7, 69 BLTZ_BRANCH_OPCODE_X1 = 29,
70 BGEZ_BRANCH_OPCODE_X1 = 6, 70 BNEZT_BRANCH_OPCODE_X1 = 30,
71 BGEZ_OPCODE_SN = 61, 71 BNEZ_BRANCH_OPCODE_X1 = 31,
72 BGZT_BRANCH_OPCODE_X1 = 5, 72 BRANCH_OPCODE_X1 = 2,
73 BGZ_BRANCH_OPCODE_X1 = 4, 73 CMOVEQZ_RRR_0_OPCODE_X0 = 5,
74 BGZ_OPCODE_SN = 58, 74 CMOVEQZ_RRR_4_OPCODE_Y0 = 0,
75 BITX_UN_0_SHUN_0_OPCODE_X0 = 1, 75 CMOVNEZ_RRR_0_OPCODE_X0 = 6,
76 BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, 76 CMOVNEZ_RRR_4_OPCODE_Y0 = 1,
77 BLEZT_BRANCH_OPCODE_X1 = 11, 77 CMPEQI_IMM8_OPCODE_X0 = 4,
78 BLEZ_BRANCH_OPCODE_X1 = 10, 78 CMPEQI_IMM8_OPCODE_X1 = 4,
79 BLEZ_OPCODE_SN = 59, 79 CMPEQI_OPCODE_Y0 = 3,
80 BLZT_BRANCH_OPCODE_X1 = 9, 80 CMPEQI_OPCODE_Y1 = 4,
81 BLZ_BRANCH_OPCODE_X1 = 8, 81 CMPEQ_RRR_0_OPCODE_X0 = 7,
82 BLZ_OPCODE_SN = 60, 82 CMPEQ_RRR_0_OPCODE_X1 = 5,
83 BNZT_BRANCH_OPCODE_X1 = 3, 83 CMPEQ_RRR_3_OPCODE_Y0 = 0,
84 BNZ_BRANCH_OPCODE_X1 = 2, 84 CMPEQ_RRR_3_OPCODE_Y1 = 2,
85 BNZ_OPCODE_SN = 57, 85 CMPEXCH4_RRR_0_OPCODE_X1 = 6,
86 BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, 86 CMPEXCH_RRR_0_OPCODE_X1 = 7,
87 BRANCH_OPCODE_X1 = 5, 87 CMPLES_RRR_0_OPCODE_X0 = 8,
88 BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, 88 CMPLES_RRR_0_OPCODE_X1 = 8,
89 BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, 89 CMPLES_RRR_2_OPCODE_Y0 = 0,
90 BZT_BRANCH_OPCODE_X1 = 1, 90 CMPLES_RRR_2_OPCODE_Y1 = 0,
91 BZ_BRANCH_OPCODE_X1 = 0, 91 CMPLEU_RRR_0_OPCODE_X0 = 9,
92 BZ_OPCODE_SN = 56, 92 CMPLEU_RRR_0_OPCODE_X1 = 9,
93 CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, 93 CMPLEU_RRR_2_OPCODE_Y0 = 1,
94 CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, 94 CMPLEU_RRR_2_OPCODE_Y1 = 1,
95 CRC32_32_SPECIAL_0_OPCODE_X0 = 9, 95 CMPLTSI_IMM8_OPCODE_X0 = 5,
96 CRC32_8_SPECIAL_0_OPCODE_X0 = 10, 96 CMPLTSI_IMM8_OPCODE_X1 = 5,
97 CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, 97 CMPLTSI_OPCODE_Y0 = 4,
98 CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, 98 CMPLTSI_OPCODE_Y1 = 5,
99 DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, 99 CMPLTS_RRR_0_OPCODE_X0 = 10,
100 DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, 100 CMPLTS_RRR_0_OPCODE_X1 = 10,
101 DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, 101 CMPLTS_RRR_2_OPCODE_Y0 = 2,
102 FINV_UN_0_SHUN_0_OPCODE_X1 = 3, 102 CMPLTS_RRR_2_OPCODE_Y1 = 2,
103 FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, 103 CMPLTUI_IMM8_OPCODE_X0 = 6,
104 FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, 104 CMPLTUI_IMM8_OPCODE_X1 = 6,
105 FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, 105 CMPLTU_RRR_0_OPCODE_X0 = 11,
106 FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, 106 CMPLTU_RRR_0_OPCODE_X1 = 11,
107 FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, 107 CMPLTU_RRR_2_OPCODE_Y0 = 3,
108 FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, 108 CMPLTU_RRR_2_OPCODE_Y1 = 3,
109 HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, 109 CMPNE_RRR_0_OPCODE_X0 = 12,
110 ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, 110 CMPNE_RRR_0_OPCODE_X1 = 12,
111 ILL_UN_0_SHUN_0_OPCODE_X1 = 7, 111 CMPNE_RRR_3_OPCODE_Y0 = 1,
112 ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, 112 CMPNE_RRR_3_OPCODE_Y1 = 3,
113 IMM_0_OPCODE_SN = 0, 113 CMULAF_RRR_0_OPCODE_X0 = 13,
114 IMM_0_OPCODE_X0 = 4, 114 CMULA_RRR_0_OPCODE_X0 = 14,
115 IMM_0_OPCODE_X1 = 6, 115 CMULFR_RRR_0_OPCODE_X0 = 15,
116 IMM_1_OPCODE_SN = 1, 116 CMULF_RRR_0_OPCODE_X0 = 16,
117 IMM_OPCODE_0_X0 = 5, 117 CMULHR_RRR_0_OPCODE_X0 = 17,
118 INTHB_SPECIAL_0_OPCODE_X0 = 11, 118 CMULH_RRR_0_OPCODE_X0 = 18,
119 INTHB_SPECIAL_0_OPCODE_X1 = 5, 119 CMUL_RRR_0_OPCODE_X0 = 19,
120 INTHH_SPECIAL_0_OPCODE_X0 = 12, 120 CNTLZ_UNARY_OPCODE_X0 = 1,
121 INTHH_SPECIAL_0_OPCODE_X1 = 6, 121 CNTLZ_UNARY_OPCODE_Y0 = 1,
122 INTLB_SPECIAL_0_OPCODE_X0 = 13, 122 CNTTZ_UNARY_OPCODE_X0 = 2,
123 INTLB_SPECIAL_0_OPCODE_X1 = 7, 123 CNTTZ_UNARY_OPCODE_Y0 = 2,
124 INTLH_SPECIAL_0_OPCODE_X0 = 14, 124 CRC32_32_RRR_0_OPCODE_X0 = 20,
125 INTLH_SPECIAL_0_OPCODE_X1 = 8, 125 CRC32_8_RRR_0_OPCODE_X0 = 21,
126 INV_UN_0_SHUN_0_OPCODE_X1 = 8, 126 DBLALIGN2_RRR_0_OPCODE_X0 = 22,
127 IRET_UN_0_SHUN_0_OPCODE_X1 = 9, 127 DBLALIGN2_RRR_0_OPCODE_X1 = 13,
128 JALB_OPCODE_X1 = 13, 128 DBLALIGN4_RRR_0_OPCODE_X0 = 23,
129 JALF_OPCODE_X1 = 12, 129 DBLALIGN4_RRR_0_OPCODE_X1 = 14,
130 JALRP_SPECIAL_0_OPCODE_X1 = 9, 130 DBLALIGN6_RRR_0_OPCODE_X0 = 24,
131 JALRR_IMM_1_OPCODE_SN = 3, 131 DBLALIGN6_RRR_0_OPCODE_X1 = 15,
132 JALR_RR_IMM_0_OPCODE_SN = 5, 132 DBLALIGN_RRR_0_OPCODE_X0 = 25,
133 JALR_SPECIAL_0_OPCODE_X1 = 10, 133 DRAIN_UNARY_OPCODE_X1 = 1,
134 JB_OPCODE_X1 = 11, 134 DTLBPR_UNARY_OPCODE_X1 = 2,
135 JF_OPCODE_X1 = 10, 135 EXCH4_RRR_0_OPCODE_X1 = 16,
136 JRP_SPECIAL_0_OPCODE_X1 = 11, 136 EXCH_RRR_0_OPCODE_X1 = 17,
137 JRR_IMM_1_OPCODE_SN = 2, 137 FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26,
138 JR_RR_IMM_0_OPCODE_SN = 4, 138 FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27,
139 JR_SPECIAL_0_OPCODE_X1 = 12, 139 FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28,
140 LBADD_IMM_0_OPCODE_X1 = 22, 140 FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29,
141 LBADD_U_IMM_0_OPCODE_X1 = 23, 141 FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30,
142 LB_OPCODE_Y2 = 0, 142 FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31,
143 LB_UN_0_SHUN_0_OPCODE_X1 = 10, 143 FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32,
144 LB_U_OPCODE_Y2 = 1, 144 FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33,
145 LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, 145 FETCHADD4_RRR_0_OPCODE_X1 = 18,
146 LHADD_IMM_0_OPCODE_X1 = 24, 146 FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19,
147 LHADD_U_IMM_0_OPCODE_X1 = 25, 147 FETCHADDGEZ_RRR_0_OPCODE_X1 = 20,
148 LH_OPCODE_Y2 = 2, 148 FETCHADD_RRR_0_OPCODE_X1 = 21,
149 LH_UN_0_SHUN_0_OPCODE_X1 = 12, 149 FETCHAND4_RRR_0_OPCODE_X1 = 22,
150 LH_U_OPCODE_Y2 = 3, 150 FETCHAND_RRR_0_OPCODE_X1 = 23,
151 LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, 151 FETCHOR4_RRR_0_OPCODE_X1 = 24,
152 LNK_SPECIAL_0_OPCODE_X1 = 13, 152 FETCHOR_RRR_0_OPCODE_X1 = 25,
153 LWADD_IMM_0_OPCODE_X1 = 26, 153 FINV_UNARY_OPCODE_X1 = 3,
154 LWADD_NA_IMM_0_OPCODE_X1 = 27, 154 FLUSHWB_UNARY_OPCODE_X1 = 4,
155 LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, 155 FLUSH_UNARY_OPCODE_X1 = 5,
156 LW_OPCODE_Y2 = 4, 156 FNOP_UNARY_OPCODE_X0 = 3,
157 LW_UN_0_SHUN_0_OPCODE_X1 = 14, 157 FNOP_UNARY_OPCODE_X1 = 6,
158 MAXB_U_SPECIAL_0_OPCODE_X0 = 15, 158 FNOP_UNARY_OPCODE_Y0 = 3,
159 MAXB_U_SPECIAL_0_OPCODE_X1 = 14, 159 FNOP_UNARY_OPCODE_Y1 = 8,
160 MAXH_SPECIAL_0_OPCODE_X0 = 16, 160 FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34,
161 MAXH_SPECIAL_0_OPCODE_X1 = 15, 161 FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35,
162 MAXIB_U_IMM_0_OPCODE_X0 = 4, 162 FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36,
163 MAXIB_U_IMM_0_OPCODE_X1 = 5, 163 FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37,
164 MAXIH_IMM_0_OPCODE_X0 = 5, 164 FSINGLE_PACK1_UNARY_OPCODE_X0 = 4,
165 MAXIH_IMM_0_OPCODE_X1 = 6, 165 FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4,
166 MFSPR_IMM_0_OPCODE_X1 = 7, 166 FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38,
167 MF_UN_0_SHUN_0_OPCODE_X1 = 15, 167 FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39,
168 MINB_U_SPECIAL_0_OPCODE_X0 = 17, 168 ICOH_UNARY_OPCODE_X1 = 7,
169 MINB_U_SPECIAL_0_OPCODE_X1 = 16, 169 ILL_UNARY_OPCODE_X1 = 8,
170 MINH_SPECIAL_0_OPCODE_X0 = 18, 170 ILL_UNARY_OPCODE_Y1 = 9,
171 MINH_SPECIAL_0_OPCODE_X1 = 17, 171 IMM8_OPCODE_X0 = 4,
172 MINIB_U_IMM_0_OPCODE_X0 = 6, 172 IMM8_OPCODE_X1 = 3,
173 MINIB_U_IMM_0_OPCODE_X1 = 8, 173 INV_UNARY_OPCODE_X1 = 9,
174 MINIH_IMM_0_OPCODE_X0 = 7, 174 IRET_UNARY_OPCODE_X1 = 10,
175 MINIH_IMM_0_OPCODE_X1 = 9, 175 JALRP_UNARY_OPCODE_X1 = 11,
176 MM_OPCODE_X0 = 6, 176 JALRP_UNARY_OPCODE_Y1 = 10,
177 MM_OPCODE_X1 = 7, 177 JALR_UNARY_OPCODE_X1 = 12,
178 MNZB_SPECIAL_0_OPCODE_X0 = 19, 178 JALR_UNARY_OPCODE_Y1 = 11,
179 MNZB_SPECIAL_0_OPCODE_X1 = 18, 179 JAL_JUMP_OPCODE_X1 = 0,
180 MNZH_SPECIAL_0_OPCODE_X0 = 20, 180 JRP_UNARY_OPCODE_X1 = 13,
181 MNZH_SPECIAL_0_OPCODE_X1 = 19, 181 JRP_UNARY_OPCODE_Y1 = 12,
182 MNZ_SPECIAL_0_OPCODE_X0 = 21, 182 JR_UNARY_OPCODE_X1 = 14,
183 MNZ_SPECIAL_0_OPCODE_X1 = 20, 183 JR_UNARY_OPCODE_Y1 = 13,
184 MNZ_SPECIAL_1_OPCODE_Y0 = 0, 184 JUMP_OPCODE_X1 = 4,
185 MNZ_SPECIAL_1_OPCODE_Y1 = 1, 185 J_JUMP_OPCODE_X1 = 1,
186 MOVEI_IMM_1_OPCODE_SN = 0, 186 LD1S_ADD_IMM8_OPCODE_X1 = 7,
187 MOVE_RR_IMM_0_OPCODE_SN = 8, 187 LD1S_OPCODE_Y2 = 0,
188 MTSPR_IMM_0_OPCODE_X1 = 10, 188 LD1S_UNARY_OPCODE_X1 = 15,
189 MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, 189 LD1U_ADD_IMM8_OPCODE_X1 = 8,
190 MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, 190 LD1U_OPCODE_Y2 = 1,
191 MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, 191 LD1U_UNARY_OPCODE_X1 = 16,
192 MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, 192 LD2S_ADD_IMM8_OPCODE_X1 = 9,
193 MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, 193 LD2S_OPCODE_Y2 = 2,
194 MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, 194 LD2S_UNARY_OPCODE_X1 = 17,
195 MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, 195 LD2U_ADD_IMM8_OPCODE_X1 = 10,
196 MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, 196 LD2U_OPCODE_Y2 = 3,
197 MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, 197 LD2U_UNARY_OPCODE_X1 = 18,
198 MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, 198 LD4S_ADD_IMM8_OPCODE_X1 = 11,
199 MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, 199 LD4S_OPCODE_Y2 = 1,
200 MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, 200 LD4S_UNARY_OPCODE_X1 = 19,
201 MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, 201 LD4U_ADD_IMM8_OPCODE_X1 = 12,
202 MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, 202 LD4U_OPCODE_Y2 = 2,
203 MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, 203 LD4U_UNARY_OPCODE_X1 = 20,
204 MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, 204 LDNA_UNARY_OPCODE_X1 = 21,
205 MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, 205 LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
206 MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, 206 LDNT1S_UNARY_OPCODE_X1 = 22,
207 MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, 207 LDNT1U_ADD_IMM8_OPCODE_X1 = 14,
208 MULHL_US_SPECIAL_0_OPCODE_X0 = 36, 208 LDNT1U_UNARY_OPCODE_X1 = 23,
209 MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, 209 LDNT2S_ADD_IMM8_OPCODE_X1 = 15,
210 MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, 210 LDNT2S_UNARY_OPCODE_X1 = 24,
211 MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, 211 LDNT2U_ADD_IMM8_OPCODE_X1 = 16,
212 MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, 212 LDNT2U_UNARY_OPCODE_X1 = 25,
213 MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, 213 LDNT4S_ADD_IMM8_OPCODE_X1 = 17,
214 MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, 214 LDNT4S_UNARY_OPCODE_X1 = 26,
215 MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, 215 LDNT4U_ADD_IMM8_OPCODE_X1 = 18,
216 MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, 216 LDNT4U_UNARY_OPCODE_X1 = 27,
217 MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, 217 LDNT_ADD_IMM8_OPCODE_X1 = 19,
218 MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, 218 LDNT_UNARY_OPCODE_X1 = 28,
219 MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, 219 LD_ADD_IMM8_OPCODE_X1 = 20,
220 MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, 220 LD_OPCODE_Y2 = 3,
221 MVNZ_SPECIAL_0_OPCODE_X0 = 45, 221 LD_UNARY_OPCODE_X1 = 29,
222 MVNZ_SPECIAL_1_OPCODE_Y0 = 1, 222 LNK_UNARY_OPCODE_X1 = 30,
223 MVZ_SPECIAL_0_OPCODE_X0 = 46, 223 LNK_UNARY_OPCODE_Y1 = 14,
224 MVZ_SPECIAL_1_OPCODE_Y0 = 2, 224 LWNA_ADD_IMM8_OPCODE_X1 = 21,
225 MZB_SPECIAL_0_OPCODE_X0 = 47, 225 MFSPR_IMM8_OPCODE_X1 = 22,
226 MZB_SPECIAL_0_OPCODE_X1 = 21, 226 MF_UNARY_OPCODE_X1 = 31,
227 MZH_SPECIAL_0_OPCODE_X0 = 48, 227 MM_BF_OPCODE_X0 = 7,
228 MZH_SPECIAL_0_OPCODE_X1 = 22, 228 MNZ_RRR_0_OPCODE_X0 = 40,
229 MZ_SPECIAL_0_OPCODE_X0 = 49, 229 MNZ_RRR_0_OPCODE_X1 = 26,
230 MZ_SPECIAL_0_OPCODE_X1 = 23, 230 MNZ_RRR_4_OPCODE_Y0 = 2,
231 MZ_SPECIAL_1_OPCODE_Y0 = 3, 231 MNZ_RRR_4_OPCODE_Y1 = 2,
232 MZ_SPECIAL_1_OPCODE_Y1 = 2, 232 MODE_OPCODE_YA2 = 1,
233 NAP_UN_0_SHUN_0_OPCODE_X1 = 16, 233 MODE_OPCODE_YB2 = 2,
234 NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, 234 MODE_OPCODE_YC2 = 3,
235 NOP_UN_0_SHUN_0_OPCODE_X0 = 6, 235 MTSPR_IMM8_OPCODE_X1 = 23,
236 NOP_UN_0_SHUN_0_OPCODE_X1 = 17, 236 MULAX_RRR_0_OPCODE_X0 = 41,
237 NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, 237 MULAX_RRR_3_OPCODE_Y0 = 2,
238 NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, 238 MULA_HS_HS_RRR_0_OPCODE_X0 = 42,
239 NOREG_RR_IMM_0_OPCODE_SN = 0, 239 MULA_HS_HS_RRR_9_OPCODE_Y0 = 0,
240 NOR_SPECIAL_0_OPCODE_X0 = 50, 240 MULA_HS_HU_RRR_0_OPCODE_X0 = 43,
241 NOR_SPECIAL_0_OPCODE_X1 = 24, 241 MULA_HS_LS_RRR_0_OPCODE_X0 = 44,
242 NOR_SPECIAL_2_OPCODE_Y0 = 1, 242 MULA_HS_LU_RRR_0_OPCODE_X0 = 45,
243 NOR_SPECIAL_2_OPCODE_Y1 = 1, 243 MULA_HU_HU_RRR_0_OPCODE_X0 = 46,
244 ORI_IMM_0_OPCODE_X0 = 8, 244 MULA_HU_HU_RRR_9_OPCODE_Y0 = 1,
245 ORI_IMM_0_OPCODE_X1 = 11, 245 MULA_HU_LS_RRR_0_OPCODE_X0 = 47,
246 ORI_OPCODE_Y0 = 11, 246 MULA_HU_LU_RRR_0_OPCODE_X0 = 48,
247 ORI_OPCODE_Y1 = 9, 247 MULA_LS_LS_RRR_0_OPCODE_X0 = 49,
248 OR_SPECIAL_0_OPCODE_X0 = 51, 248 MULA_LS_LS_RRR_9_OPCODE_Y0 = 2,
249 OR_SPECIAL_0_OPCODE_X1 = 25, 249 MULA_LS_LU_RRR_0_OPCODE_X0 = 50,
250 OR_SPECIAL_2_OPCODE_Y0 = 2, 250 MULA_LU_LU_RRR_0_OPCODE_X0 = 51,
251 OR_SPECIAL_2_OPCODE_Y1 = 2, 251 MULA_LU_LU_RRR_9_OPCODE_Y0 = 3,
252 PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, 252 MULX_RRR_0_OPCODE_X0 = 52,
253 PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, 253 MULX_RRR_3_OPCODE_Y0 = 3,
254 PACKHB_SPECIAL_0_OPCODE_X0 = 52, 254 MUL_HS_HS_RRR_0_OPCODE_X0 = 53,
255 PACKHB_SPECIAL_0_OPCODE_X1 = 26, 255 MUL_HS_HS_RRR_8_OPCODE_Y0 = 0,
256 PACKHS_SPECIAL_0_OPCODE_X0 = 102, 256 MUL_HS_HU_RRR_0_OPCODE_X0 = 54,
257 PACKHS_SPECIAL_0_OPCODE_X1 = 72, 257 MUL_HS_LS_RRR_0_OPCODE_X0 = 55,
258 PACKLB_SPECIAL_0_OPCODE_X0 = 53, 258 MUL_HS_LU_RRR_0_OPCODE_X0 = 56,
259 PACKLB_SPECIAL_0_OPCODE_X1 = 27, 259 MUL_HU_HU_RRR_0_OPCODE_X0 = 57,
260 PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, 260 MUL_HU_HU_RRR_8_OPCODE_Y0 = 1,
261 PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, 261 MUL_HU_LS_RRR_0_OPCODE_X0 = 58,
262 RLI_SHUN_0_OPCODE_X0 = 1, 262 MUL_HU_LU_RRR_0_OPCODE_X0 = 59,
263 RLI_SHUN_0_OPCODE_X1 = 1, 263 MUL_LS_LS_RRR_0_OPCODE_X0 = 60,
264 RLI_SHUN_0_OPCODE_Y0 = 1, 264 MUL_LS_LS_RRR_8_OPCODE_Y0 = 2,
265 RLI_SHUN_0_OPCODE_Y1 = 1, 265 MUL_LS_LU_RRR_0_OPCODE_X0 = 61,
266 RL_SPECIAL_0_OPCODE_X0 = 54, 266 MUL_LU_LU_RRR_0_OPCODE_X0 = 62,
267 RL_SPECIAL_0_OPCODE_X1 = 28, 267 MUL_LU_LU_RRR_8_OPCODE_Y0 = 3,
268 RL_SPECIAL_3_OPCODE_Y0 = 0, 268 MZ_RRR_0_OPCODE_X0 = 63,
269 RL_SPECIAL_3_OPCODE_Y1 = 0, 269 MZ_RRR_0_OPCODE_X1 = 27,
270 RR_IMM_0_OPCODE_SN = 0, 270 MZ_RRR_4_OPCODE_Y0 = 3,
271 S1A_SPECIAL_0_OPCODE_X0 = 55, 271 MZ_RRR_4_OPCODE_Y1 = 3,
272 S1A_SPECIAL_0_OPCODE_X1 = 29, 272 NAP_UNARY_OPCODE_X1 = 32,
273 S1A_SPECIAL_0_OPCODE_Y0 = 1, 273 NOP_UNARY_OPCODE_X0 = 5,
274 S1A_SPECIAL_0_OPCODE_Y1 = 1, 274 NOP_UNARY_OPCODE_X1 = 33,
275 S2A_SPECIAL_0_OPCODE_X0 = 56, 275 NOP_UNARY_OPCODE_Y0 = 5,
276 S2A_SPECIAL_0_OPCODE_X1 = 30, 276 NOP_UNARY_OPCODE_Y1 = 15,
277 S2A_SPECIAL_0_OPCODE_Y0 = 2, 277 NOR_RRR_0_OPCODE_X0 = 64,
278 S2A_SPECIAL_0_OPCODE_Y1 = 2, 278 NOR_RRR_0_OPCODE_X1 = 28,
279 S3A_SPECIAL_0_OPCODE_X0 = 57, 279 NOR_RRR_5_OPCODE_Y0 = 1,
280 S3A_SPECIAL_0_OPCODE_X1 = 31, 280 NOR_RRR_5_OPCODE_Y1 = 1,
281 S3A_SPECIAL_5_OPCODE_Y0 = 1, 281 ORI_IMM8_OPCODE_X0 = 7,
282 S3A_SPECIAL_5_OPCODE_Y1 = 1, 282 ORI_IMM8_OPCODE_X1 = 24,
283 SADAB_U_SPECIAL_0_OPCODE_X0 = 58, 283 OR_RRR_0_OPCODE_X0 = 65,
284 SADAH_SPECIAL_0_OPCODE_X0 = 59, 284 OR_RRR_0_OPCODE_X1 = 29,
285 SADAH_U_SPECIAL_0_OPCODE_X0 = 60, 285 OR_RRR_5_OPCODE_Y0 = 2,
286 SADB_U_SPECIAL_0_OPCODE_X0 = 61, 286 OR_RRR_5_OPCODE_Y1 = 2,
287 SADH_SPECIAL_0_OPCODE_X0 = 62, 287 PCNT_UNARY_OPCODE_X0 = 6,
288 SADH_U_SPECIAL_0_OPCODE_X0 = 63, 288 PCNT_UNARY_OPCODE_Y0 = 6,
289 SBADD_IMM_0_OPCODE_X1 = 28, 289 REVBITS_UNARY_OPCODE_X0 = 7,
290 SB_OPCODE_Y2 = 5, 290 REVBITS_UNARY_OPCODE_Y0 = 7,
291 SB_SPECIAL_0_OPCODE_X1 = 32, 291 REVBYTES_UNARY_OPCODE_X0 = 8,
292 SEQB_SPECIAL_0_OPCODE_X0 = 64, 292 REVBYTES_UNARY_OPCODE_Y0 = 8,
293 SEQB_SPECIAL_0_OPCODE_X1 = 33, 293 ROTLI_SHIFT_OPCODE_X0 = 1,
294 SEQH_SPECIAL_0_OPCODE_X0 = 65, 294 ROTLI_SHIFT_OPCODE_X1 = 1,
295 SEQH_SPECIAL_0_OPCODE_X1 = 34, 295 ROTLI_SHIFT_OPCODE_Y0 = 0,
296 SEQIB_IMM_0_OPCODE_X0 = 9, 296 ROTLI_SHIFT_OPCODE_Y1 = 0,
297 SEQIB_IMM_0_OPCODE_X1 = 12, 297 ROTL_RRR_0_OPCODE_X0 = 66,
298 SEQIH_IMM_0_OPCODE_X0 = 10, 298 ROTL_RRR_0_OPCODE_X1 = 30,
299 SEQIH_IMM_0_OPCODE_X1 = 13, 299 ROTL_RRR_6_OPCODE_Y0 = 0,
300 SEQI_IMM_0_OPCODE_X0 = 11, 300 ROTL_RRR_6_OPCODE_Y1 = 0,
301 SEQI_IMM_0_OPCODE_X1 = 14, 301 RRR_0_OPCODE_X0 = 5,
302 SEQI_OPCODE_Y0 = 12, 302 RRR_0_OPCODE_X1 = 5,
303 SEQI_OPCODE_Y1 = 10, 303 RRR_0_OPCODE_Y0 = 5,
304 SEQ_SPECIAL_0_OPCODE_X0 = 66, 304 RRR_0_OPCODE_Y1 = 6,
305 SEQ_SPECIAL_0_OPCODE_X1 = 35, 305 RRR_1_OPCODE_Y0 = 6,
306 SEQ_SPECIAL_5_OPCODE_Y0 = 2, 306 RRR_1_OPCODE_Y1 = 7,
307 SEQ_SPECIAL_5_OPCODE_Y1 = 2, 307 RRR_2_OPCODE_Y0 = 7,
308 SHADD_IMM_0_OPCODE_X1 = 29, 308 RRR_2_OPCODE_Y1 = 8,
309 SHL8II_IMM_0_OPCODE_SN = 3, 309 RRR_3_OPCODE_Y0 = 8,
310 SHLB_SPECIAL_0_OPCODE_X0 = 67, 310 RRR_3_OPCODE_Y1 = 9,
311 SHLB_SPECIAL_0_OPCODE_X1 = 36, 311 RRR_4_OPCODE_Y0 = 9,
312 SHLH_SPECIAL_0_OPCODE_X0 = 68, 312 RRR_4_OPCODE_Y1 = 10,
313 SHLH_SPECIAL_0_OPCODE_X1 = 37, 313 RRR_5_OPCODE_Y0 = 10,
314 SHLIB_SHUN_0_OPCODE_X0 = 2, 314 RRR_5_OPCODE_Y1 = 11,
315 SHLIB_SHUN_0_OPCODE_X1 = 2, 315 RRR_6_OPCODE_Y0 = 11,
316 SHLIH_SHUN_0_OPCODE_X0 = 3, 316 RRR_6_OPCODE_Y1 = 12,
317 SHLIH_SHUN_0_OPCODE_X1 = 3, 317 RRR_7_OPCODE_Y0 = 12,
318 SHLI_SHUN_0_OPCODE_X0 = 4, 318 RRR_7_OPCODE_Y1 = 13,
319 SHLI_SHUN_0_OPCODE_X1 = 4, 319 RRR_8_OPCODE_Y0 = 13,
320 SHLI_SHUN_0_OPCODE_Y0 = 2, 320 RRR_9_OPCODE_Y0 = 14,
321 SHLI_SHUN_0_OPCODE_Y1 = 2, 321 SHIFT_OPCODE_X0 = 6,
322 SHL_SPECIAL_0_OPCODE_X0 = 69, 322 SHIFT_OPCODE_X1 = 6,
323 SHL_SPECIAL_0_OPCODE_X1 = 38, 323 SHIFT_OPCODE_Y0 = 15,
324 SHL_SPECIAL_3_OPCODE_Y0 = 1, 324 SHIFT_OPCODE_Y1 = 14,
325 SHL_SPECIAL_3_OPCODE_Y1 = 1, 325 SHL16INSLI_OPCODE_X0 = 7,
326 SHR1_RR_IMM_0_OPCODE_SN = 9, 326 SHL16INSLI_OPCODE_X1 = 7,
327 SHRB_SPECIAL_0_OPCODE_X0 = 70, 327 SHL1ADDX_RRR_0_OPCODE_X0 = 67,
328 SHRB_SPECIAL_0_OPCODE_X1 = 39, 328 SHL1ADDX_RRR_0_OPCODE_X1 = 31,
329 SHRH_SPECIAL_0_OPCODE_X0 = 71, 329 SHL1ADDX_RRR_7_OPCODE_Y0 = 1,
330 SHRH_SPECIAL_0_OPCODE_X1 = 40, 330 SHL1ADDX_RRR_7_OPCODE_Y1 = 1,
331 SHRIB_SHUN_0_OPCODE_X0 = 5, 331 SHL1ADD_RRR_0_OPCODE_X0 = 68,
332 SHRIB_SHUN_0_OPCODE_X1 = 5, 332 SHL1ADD_RRR_0_OPCODE_X1 = 32,
333 SHRIH_SHUN_0_OPCODE_X0 = 6, 333 SHL1ADD_RRR_1_OPCODE_Y0 = 0,
334 SHRIH_SHUN_0_OPCODE_X1 = 6, 334 SHL1ADD_RRR_1_OPCODE_Y1 = 0,
335 SHRI_SHUN_0_OPCODE_X0 = 7, 335 SHL2ADDX_RRR_0_OPCODE_X0 = 69,
336 SHRI_SHUN_0_OPCODE_X1 = 7, 336 SHL2ADDX_RRR_0_OPCODE_X1 = 33,
337 SHRI_SHUN_0_OPCODE_Y0 = 3, 337 SHL2ADDX_RRR_7_OPCODE_Y0 = 2,
338 SHRI_SHUN_0_OPCODE_Y1 = 3, 338 SHL2ADDX_RRR_7_OPCODE_Y1 = 2,
339 SHR_SPECIAL_0_OPCODE_X0 = 72, 339 SHL2ADD_RRR_0_OPCODE_X0 = 70,
340 SHR_SPECIAL_0_OPCODE_X1 = 41, 340 SHL2ADD_RRR_0_OPCODE_X1 = 34,
341 SHR_SPECIAL_3_OPCODE_Y0 = 2, 341 SHL2ADD_RRR_1_OPCODE_Y0 = 1,
342 SHR_SPECIAL_3_OPCODE_Y1 = 2, 342 SHL2ADD_RRR_1_OPCODE_Y1 = 1,
343 SHUN_0_OPCODE_X0 = 7, 343 SHL3ADDX_RRR_0_OPCODE_X0 = 71,
344 SHUN_0_OPCODE_X1 = 8, 344 SHL3ADDX_RRR_0_OPCODE_X1 = 35,
345 SHUN_0_OPCODE_Y0 = 13, 345 SHL3ADDX_RRR_7_OPCODE_Y0 = 3,
346 SHUN_0_OPCODE_Y1 = 11, 346 SHL3ADDX_RRR_7_OPCODE_Y1 = 3,
347 SH_OPCODE_Y2 = 6, 347 SHL3ADD_RRR_0_OPCODE_X0 = 72,
348 SH_SPECIAL_0_OPCODE_X1 = 42, 348 SHL3ADD_RRR_0_OPCODE_X1 = 36,
349 SLTB_SPECIAL_0_OPCODE_X0 = 73, 349 SHL3ADD_RRR_1_OPCODE_Y0 = 2,
350 SLTB_SPECIAL_0_OPCODE_X1 = 43, 350 SHL3ADD_RRR_1_OPCODE_Y1 = 2,
351 SLTB_U_SPECIAL_0_OPCODE_X0 = 74, 351 SHLI_SHIFT_OPCODE_X0 = 2,
352 SLTB_U_SPECIAL_0_OPCODE_X1 = 44, 352 SHLI_SHIFT_OPCODE_X1 = 2,
353 SLTEB_SPECIAL_0_OPCODE_X0 = 75, 353 SHLI_SHIFT_OPCODE_Y0 = 1,
354 SLTEB_SPECIAL_0_OPCODE_X1 = 45, 354 SHLI_SHIFT_OPCODE_Y1 = 1,
355 SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, 355 SHLXI_SHIFT_OPCODE_X0 = 3,
356 SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, 356 SHLXI_SHIFT_OPCODE_X1 = 3,
357 SLTEH_SPECIAL_0_OPCODE_X0 = 77, 357 SHLX_RRR_0_OPCODE_X0 = 73,
358 SLTEH_SPECIAL_0_OPCODE_X1 = 47, 358 SHLX_RRR_0_OPCODE_X1 = 37,
359 SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, 359 SHL_RRR_0_OPCODE_X0 = 74,
360 SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, 360 SHL_RRR_0_OPCODE_X1 = 38,
361 SLTE_SPECIAL_0_OPCODE_X0 = 79, 361 SHL_RRR_6_OPCODE_Y0 = 1,
362 SLTE_SPECIAL_0_OPCODE_X1 = 49, 362 SHL_RRR_6_OPCODE_Y1 = 1,
363 SLTE_SPECIAL_4_OPCODE_Y0 = 0, 363 SHRSI_SHIFT_OPCODE_X0 = 4,
364 SLTE_SPECIAL_4_OPCODE_Y1 = 0, 364 SHRSI_SHIFT_OPCODE_X1 = 4,
365 SLTE_U_SPECIAL_0_OPCODE_X0 = 80, 365 SHRSI_SHIFT_OPCODE_Y0 = 2,
366 SLTE_U_SPECIAL_0_OPCODE_X1 = 50, 366 SHRSI_SHIFT_OPCODE_Y1 = 2,
367 SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, 367 SHRS_RRR_0_OPCODE_X0 = 75,
368 SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, 368 SHRS_RRR_0_OPCODE_X1 = 39,
369 SLTH_SPECIAL_0_OPCODE_X0 = 81, 369 SHRS_RRR_6_OPCODE_Y0 = 2,
370 SLTH_SPECIAL_0_OPCODE_X1 = 51, 370 SHRS_RRR_6_OPCODE_Y1 = 2,
371 SLTH_U_SPECIAL_0_OPCODE_X0 = 82, 371 SHRUI_SHIFT_OPCODE_X0 = 5,
372 SLTH_U_SPECIAL_0_OPCODE_X1 = 52, 372 SHRUI_SHIFT_OPCODE_X1 = 5,
373 SLTIB_IMM_0_OPCODE_X0 = 12, 373 SHRUI_SHIFT_OPCODE_Y0 = 3,
374 SLTIB_IMM_0_OPCODE_X1 = 15, 374 SHRUI_SHIFT_OPCODE_Y1 = 3,
375 SLTIB_U_IMM_0_OPCODE_X0 = 13, 375 SHRUXI_SHIFT_OPCODE_X0 = 6,
376 SLTIB_U_IMM_0_OPCODE_X1 = 16, 376 SHRUXI_SHIFT_OPCODE_X1 = 6,
377 SLTIH_IMM_0_OPCODE_X0 = 14, 377 SHRUX_RRR_0_OPCODE_X0 = 76,
378 SLTIH_IMM_0_OPCODE_X1 = 17, 378 SHRUX_RRR_0_OPCODE_X1 = 40,
379 SLTIH_U_IMM_0_OPCODE_X0 = 15, 379 SHRU_RRR_0_OPCODE_X0 = 77,
380 SLTIH_U_IMM_0_OPCODE_X1 = 18, 380 SHRU_RRR_0_OPCODE_X1 = 41,
381 SLTI_IMM_0_OPCODE_X0 = 16, 381 SHRU_RRR_6_OPCODE_Y0 = 3,
382 SLTI_IMM_0_OPCODE_X1 = 19, 382 SHRU_RRR_6_OPCODE_Y1 = 3,
383 SLTI_OPCODE_Y0 = 14, 383 SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78,
384 SLTI_OPCODE_Y1 = 12, 384 ST1_ADD_IMM8_OPCODE_X1 = 25,
385 SLTI_U_IMM_0_OPCODE_X0 = 17, 385 ST1_OPCODE_Y2 = 0,
386 SLTI_U_IMM_0_OPCODE_X1 = 20, 386 ST1_RRR_0_OPCODE_X1 = 42,
387 SLTI_U_OPCODE_Y0 = 15, 387 ST2_ADD_IMM8_OPCODE_X1 = 26,
388 SLTI_U_OPCODE_Y1 = 13, 388 ST2_OPCODE_Y2 = 1,
389 SLT_SPECIAL_0_OPCODE_X0 = 83, 389 ST2_RRR_0_OPCODE_X1 = 43,
390 SLT_SPECIAL_0_OPCODE_X1 = 53, 390 ST4_ADD_IMM8_OPCODE_X1 = 27,
391 SLT_SPECIAL_4_OPCODE_Y0 = 2, 391 ST4_OPCODE_Y2 = 2,
392 SLT_SPECIAL_4_OPCODE_Y1 = 2, 392 ST4_RRR_0_OPCODE_X1 = 44,
393 SLT_U_SPECIAL_0_OPCODE_X0 = 84, 393 STNT1_ADD_IMM8_OPCODE_X1 = 28,
394 SLT_U_SPECIAL_0_OPCODE_X1 = 54, 394 STNT1_RRR_0_OPCODE_X1 = 45,
395 SLT_U_SPECIAL_4_OPCODE_Y0 = 3, 395 STNT2_ADD_IMM8_OPCODE_X1 = 29,
396 SLT_U_SPECIAL_4_OPCODE_Y1 = 3, 396 STNT2_RRR_0_OPCODE_X1 = 46,
397 SNEB_SPECIAL_0_OPCODE_X0 = 85, 397 STNT4_ADD_IMM8_OPCODE_X1 = 30,
398 SNEB_SPECIAL_0_OPCODE_X1 = 55, 398 STNT4_RRR_0_OPCODE_X1 = 47,
399 SNEH_SPECIAL_0_OPCODE_X0 = 86, 399 STNT_ADD_IMM8_OPCODE_X1 = 31,
400 SNEH_SPECIAL_0_OPCODE_X1 = 56, 400 STNT_RRR_0_OPCODE_X1 = 48,
401 SNE_SPECIAL_0_OPCODE_X0 = 87, 401 ST_ADD_IMM8_OPCODE_X1 = 32,
402 SNE_SPECIAL_0_OPCODE_X1 = 57, 402 ST_OPCODE_Y2 = 3,
403 SNE_SPECIAL_5_OPCODE_Y0 = 3, 403 ST_RRR_0_OPCODE_X1 = 49,
404 SNE_SPECIAL_5_OPCODE_Y1 = 3, 404 SUBXSC_RRR_0_OPCODE_X0 = 79,
405 SPECIAL_0_OPCODE_X0 = 0, 405 SUBXSC_RRR_0_OPCODE_X1 = 50,
406 SPECIAL_0_OPCODE_X1 = 1, 406 SUBX_RRR_0_OPCODE_X0 = 80,
407 SPECIAL_0_OPCODE_Y0 = 1, 407 SUBX_RRR_0_OPCODE_X1 = 51,
408 SPECIAL_0_OPCODE_Y1 = 1, 408 SUBX_RRR_0_OPCODE_Y0 = 2,
409 SPECIAL_1_OPCODE_Y0 = 2, 409 SUBX_RRR_0_OPCODE_Y1 = 2,
410 SPECIAL_1_OPCODE_Y1 = 2, 410 SUB_RRR_0_OPCODE_X0 = 81,
411 SPECIAL_2_OPCODE_Y0 = 3, 411 SUB_RRR_0_OPCODE_X1 = 52,
412 SPECIAL_2_OPCODE_Y1 = 3, 412 SUB_RRR_0_OPCODE_Y0 = 3,
413 SPECIAL_3_OPCODE_Y0 = 4, 413 SUB_RRR_0_OPCODE_Y1 = 3,
414 SPECIAL_3_OPCODE_Y1 = 4, 414 SWINT0_UNARY_OPCODE_X1 = 34,
415 SPECIAL_4_OPCODE_Y0 = 5, 415 SWINT1_UNARY_OPCODE_X1 = 35,
416 SPECIAL_4_OPCODE_Y1 = 5, 416 SWINT2_UNARY_OPCODE_X1 = 36,
417 SPECIAL_5_OPCODE_Y0 = 6, 417 SWINT3_UNARY_OPCODE_X1 = 37,
418 SPECIAL_5_OPCODE_Y1 = 6, 418 TBLIDXB0_UNARY_OPCODE_X0 = 9,
419 SPECIAL_6_OPCODE_Y0 = 7, 419 TBLIDXB0_UNARY_OPCODE_Y0 = 9,
420 SPECIAL_7_OPCODE_Y0 = 8, 420 TBLIDXB1_UNARY_OPCODE_X0 = 10,
421 SRAB_SPECIAL_0_OPCODE_X0 = 88, 421 TBLIDXB1_UNARY_OPCODE_Y0 = 10,
422 SRAB_SPECIAL_0_OPCODE_X1 = 58, 422 TBLIDXB2_UNARY_OPCODE_X0 = 11,
423 SRAH_SPECIAL_0_OPCODE_X0 = 89, 423 TBLIDXB2_UNARY_OPCODE_Y0 = 11,
424 SRAH_SPECIAL_0_OPCODE_X1 = 59, 424 TBLIDXB3_UNARY_OPCODE_X0 = 12,
425 SRAIB_SHUN_0_OPCODE_X0 = 8, 425 TBLIDXB3_UNARY_OPCODE_Y0 = 12,
426 SRAIB_SHUN_0_OPCODE_X1 = 8, 426 UNARY_RRR_0_OPCODE_X0 = 82,
427 SRAIH_SHUN_0_OPCODE_X0 = 9, 427 UNARY_RRR_0_OPCODE_X1 = 53,
428 SRAIH_SHUN_0_OPCODE_X1 = 9, 428 UNARY_RRR_1_OPCODE_Y0 = 3,
429 SRAI_SHUN_0_OPCODE_X0 = 10, 429 UNARY_RRR_1_OPCODE_Y1 = 3,
430 SRAI_SHUN_0_OPCODE_X1 = 10, 430 V1ADDI_IMM8_OPCODE_X0 = 8,
431 SRAI_SHUN_0_OPCODE_Y0 = 4, 431 V1ADDI_IMM8_OPCODE_X1 = 33,
432 SRAI_SHUN_0_OPCODE_Y1 = 4, 432 V1ADDUC_RRR_0_OPCODE_X0 = 83,
433 SRA_SPECIAL_0_OPCODE_X0 = 90, 433 V1ADDUC_RRR_0_OPCODE_X1 = 54,
434 SRA_SPECIAL_0_OPCODE_X1 = 60, 434 V1ADD_RRR_0_OPCODE_X0 = 84,
435 SRA_SPECIAL_3_OPCODE_Y0 = 3, 435 V1ADD_RRR_0_OPCODE_X1 = 55,
436 SRA_SPECIAL_3_OPCODE_Y1 = 3, 436 V1ADIFFU_RRR_0_OPCODE_X0 = 85,
437 SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, 437 V1AVGU_RRR_0_OPCODE_X0 = 86,
438 SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, 438 V1CMPEQI_IMM8_OPCODE_X0 = 9,
439 SUBB_SPECIAL_0_OPCODE_X0 = 91, 439 V1CMPEQI_IMM8_OPCODE_X1 = 34,
440 SUBB_SPECIAL_0_OPCODE_X1 = 61, 440 V1CMPEQ_RRR_0_OPCODE_X0 = 87,
441 SUBHS_SPECIAL_0_OPCODE_X0 = 101, 441 V1CMPEQ_RRR_0_OPCODE_X1 = 56,
442 SUBHS_SPECIAL_0_OPCODE_X1 = 71, 442 V1CMPLES_RRR_0_OPCODE_X0 = 88,
443 SUBH_SPECIAL_0_OPCODE_X0 = 92, 443 V1CMPLES_RRR_0_OPCODE_X1 = 57,
444 SUBH_SPECIAL_0_OPCODE_X1 = 62, 444 V1CMPLEU_RRR_0_OPCODE_X0 = 89,
445 SUBS_SPECIAL_0_OPCODE_X0 = 97, 445 V1CMPLEU_RRR_0_OPCODE_X1 = 58,
446 SUBS_SPECIAL_0_OPCODE_X1 = 67, 446 V1CMPLTSI_IMM8_OPCODE_X0 = 10,
447 SUB_SPECIAL_0_OPCODE_X0 = 93, 447 V1CMPLTSI_IMM8_OPCODE_X1 = 35,
448 SUB_SPECIAL_0_OPCODE_X1 = 63, 448 V1CMPLTS_RRR_0_OPCODE_X0 = 90,
449 SUB_SPECIAL_0_OPCODE_Y0 = 3, 449 V1CMPLTS_RRR_0_OPCODE_X1 = 59,
450 SUB_SPECIAL_0_OPCODE_Y1 = 3, 450 V1CMPLTUI_IMM8_OPCODE_X0 = 11,
451 SWADD_IMM_0_OPCODE_X1 = 30, 451 V1CMPLTUI_IMM8_OPCODE_X1 = 36,
452 SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, 452 V1CMPLTU_RRR_0_OPCODE_X0 = 91,
453 SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, 453 V1CMPLTU_RRR_0_OPCODE_X1 = 60,
454 SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, 454 V1CMPNE_RRR_0_OPCODE_X0 = 92,
455 SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, 455 V1CMPNE_RRR_0_OPCODE_X1 = 61,
456 SW_OPCODE_Y2 = 7, 456 V1DDOTPUA_RRR_0_OPCODE_X0 = 161,
457 SW_SPECIAL_0_OPCODE_X1 = 64, 457 V1DDOTPUSA_RRR_0_OPCODE_X0 = 93,
458 TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, 458 V1DDOTPUS_RRR_0_OPCODE_X0 = 94,
459 TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, 459 V1DDOTPU_RRR_0_OPCODE_X0 = 162,
460 TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, 460 V1DOTPA_RRR_0_OPCODE_X0 = 95,
461 TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, 461 V1DOTPUA_RRR_0_OPCODE_X0 = 163,
462 TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, 462 V1DOTPUSA_RRR_0_OPCODE_X0 = 96,
463 TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, 463 V1DOTPUS_RRR_0_OPCODE_X0 = 97,
464 TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, 464 V1DOTPU_RRR_0_OPCODE_X0 = 164,
465 TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, 465 V1DOTP_RRR_0_OPCODE_X0 = 98,
466 TNS_UN_0_SHUN_0_OPCODE_X1 = 22, 466 V1INT_H_RRR_0_OPCODE_X0 = 99,
467 UN_0_SHUN_0_OPCODE_X0 = 11, 467 V1INT_H_RRR_0_OPCODE_X1 = 62,
468 UN_0_SHUN_0_OPCODE_X1 = 11, 468 V1INT_L_RRR_0_OPCODE_X0 = 100,
469 UN_0_SHUN_0_OPCODE_Y0 = 5, 469 V1INT_L_RRR_0_OPCODE_X1 = 63,
470 UN_0_SHUN_0_OPCODE_Y1 = 5, 470 V1MAXUI_IMM8_OPCODE_X0 = 12,
471 WH64_UN_0_SHUN_0_OPCODE_X1 = 23, 471 V1MAXUI_IMM8_OPCODE_X1 = 37,
472 XORI_IMM_0_OPCODE_X0 = 2, 472 V1MAXU_RRR_0_OPCODE_X0 = 101,
473 XORI_IMM_0_OPCODE_X1 = 21, 473 V1MAXU_RRR_0_OPCODE_X1 = 64,
474 XOR_SPECIAL_0_OPCODE_X0 = 94, 474 V1MINUI_IMM8_OPCODE_X0 = 13,
475 XOR_SPECIAL_0_OPCODE_X1 = 65, 475 V1MINUI_IMM8_OPCODE_X1 = 38,
476 XOR_SPECIAL_2_OPCODE_Y0 = 3, 476 V1MINU_RRR_0_OPCODE_X0 = 102,
477 XOR_SPECIAL_2_OPCODE_Y1 = 3 477 V1MINU_RRR_0_OPCODE_X1 = 65,
478 V1MNZ_RRR_0_OPCODE_X0 = 103,
479 V1MNZ_RRR_0_OPCODE_X1 = 66,
480 V1MULTU_RRR_0_OPCODE_X0 = 104,
481 V1MULUS_RRR_0_OPCODE_X0 = 105,
482 V1MULU_RRR_0_OPCODE_X0 = 106,
483 V1MZ_RRR_0_OPCODE_X0 = 107,
484 V1MZ_RRR_0_OPCODE_X1 = 67,
485 V1SADAU_RRR_0_OPCODE_X0 = 108,
486 V1SADU_RRR_0_OPCODE_X0 = 109,
487 V1SHLI_SHIFT_OPCODE_X0 = 7,
488 V1SHLI_SHIFT_OPCODE_X1 = 7,
489 V1SHL_RRR_0_OPCODE_X0 = 110,
490 V1SHL_RRR_0_OPCODE_X1 = 68,
491 V1SHRSI_SHIFT_OPCODE_X0 = 8,
492 V1SHRSI_SHIFT_OPCODE_X1 = 8,
493 V1SHRS_RRR_0_OPCODE_X0 = 111,
494 V1SHRS_RRR_0_OPCODE_X1 = 69,
495 V1SHRUI_SHIFT_OPCODE_X0 = 9,
496 V1SHRUI_SHIFT_OPCODE_X1 = 9,
497 V1SHRU_RRR_0_OPCODE_X0 = 112,
498 V1SHRU_RRR_0_OPCODE_X1 = 70,
499 V1SUBUC_RRR_0_OPCODE_X0 = 113,
500 V1SUBUC_RRR_0_OPCODE_X1 = 71,
501 V1SUB_RRR_0_OPCODE_X0 = 114,
502 V1SUB_RRR_0_OPCODE_X1 = 72,
503 V2ADDI_IMM8_OPCODE_X0 = 14,
504 V2ADDI_IMM8_OPCODE_X1 = 39,
505 V2ADDSC_RRR_0_OPCODE_X0 = 115,
506 V2ADDSC_RRR_0_OPCODE_X1 = 73,
507 V2ADD_RRR_0_OPCODE_X0 = 116,
508 V2ADD_RRR_0_OPCODE_X1 = 74,
509 V2ADIFFS_RRR_0_OPCODE_X0 = 117,
510 V2AVGS_RRR_0_OPCODE_X0 = 118,
511 V2CMPEQI_IMM8_OPCODE_X0 = 15,
512 V2CMPEQI_IMM8_OPCODE_X1 = 40,
513 V2CMPEQ_RRR_0_OPCODE_X0 = 119,
514 V2CMPEQ_RRR_0_OPCODE_X1 = 75,
515 V2CMPLES_RRR_0_OPCODE_X0 = 120,
516 V2CMPLES_RRR_0_OPCODE_X1 = 76,
517 V2CMPLEU_RRR_0_OPCODE_X0 = 121,
518 V2CMPLEU_RRR_0_OPCODE_X1 = 77,
519 V2CMPLTSI_IMM8_OPCODE_X0 = 16,
520 V2CMPLTSI_IMM8_OPCODE_X1 = 41,
521 V2CMPLTS_RRR_0_OPCODE_X0 = 122,
522 V2CMPLTS_RRR_0_OPCODE_X1 = 78,
523 V2CMPLTUI_IMM8_OPCODE_X0 = 17,
524 V2CMPLTUI_IMM8_OPCODE_X1 = 42,
525 V2CMPLTU_RRR_0_OPCODE_X0 = 123,
526 V2CMPLTU_RRR_0_OPCODE_X1 = 79,
527 V2CMPNE_RRR_0_OPCODE_X0 = 124,
528 V2CMPNE_RRR_0_OPCODE_X1 = 80,
529 V2DOTPA_RRR_0_OPCODE_X0 = 125,
530 V2DOTP_RRR_0_OPCODE_X0 = 126,
531 V2INT_H_RRR_0_OPCODE_X0 = 127,
532 V2INT_H_RRR_0_OPCODE_X1 = 81,
533 V2INT_L_RRR_0_OPCODE_X0 = 128,
534 V2INT_L_RRR_0_OPCODE_X1 = 82,
535 V2MAXSI_IMM8_OPCODE_X0 = 18,
536 V2MAXSI_IMM8_OPCODE_X1 = 43,
537 V2MAXS_RRR_0_OPCODE_X0 = 129,
538 V2MAXS_RRR_0_OPCODE_X1 = 83,
539 V2MINSI_IMM8_OPCODE_X0 = 19,
540 V2MINSI_IMM8_OPCODE_X1 = 44,
541 V2MINS_RRR_0_OPCODE_X0 = 130,
542 V2MINS_RRR_0_OPCODE_X1 = 84,
543 V2MNZ_RRR_0_OPCODE_X0 = 131,
544 V2MNZ_RRR_0_OPCODE_X1 = 85,
545 V2MULFSC_RRR_0_OPCODE_X0 = 132,
546 V2MULS_RRR_0_OPCODE_X0 = 133,
547 V2MULTS_RRR_0_OPCODE_X0 = 134,
548 V2MZ_RRR_0_OPCODE_X0 = 135,
549 V2MZ_RRR_0_OPCODE_X1 = 86,
550 V2PACKH_RRR_0_OPCODE_X0 = 136,
551 V2PACKH_RRR_0_OPCODE_X1 = 87,
552 V2PACKL_RRR_0_OPCODE_X0 = 137,
553 V2PACKL_RRR_0_OPCODE_X1 = 88,
554 V2PACKUC_RRR_0_OPCODE_X0 = 138,
555 V2PACKUC_RRR_0_OPCODE_X1 = 89,
556 V2SADAS_RRR_0_OPCODE_X0 = 139,
557 V2SADAU_RRR_0_OPCODE_X0 = 140,
558 V2SADS_RRR_0_OPCODE_X0 = 141,
559 V2SADU_RRR_0_OPCODE_X0 = 142,
560 V2SHLI_SHIFT_OPCODE_X0 = 10,
561 V2SHLI_SHIFT_OPCODE_X1 = 10,
562 V2SHLSC_RRR_0_OPCODE_X0 = 143,
563 V2SHLSC_RRR_0_OPCODE_X1 = 90,
564 V2SHL_RRR_0_OPCODE_X0 = 144,
565 V2SHL_RRR_0_OPCODE_X1 = 91,
566 V2SHRSI_SHIFT_OPCODE_X0 = 11,
567 V2SHRSI_SHIFT_OPCODE_X1 = 11,
568 V2SHRS_RRR_0_OPCODE_X0 = 145,
569 V2SHRS_RRR_0_OPCODE_X1 = 92,
570 V2SHRUI_SHIFT_OPCODE_X0 = 12,
571 V2SHRUI_SHIFT_OPCODE_X1 = 12,
572 V2SHRU_RRR_0_OPCODE_X0 = 146,
573 V2SHRU_RRR_0_OPCODE_X1 = 93,
574 V2SUBSC_RRR_0_OPCODE_X0 = 147,
575 V2SUBSC_RRR_0_OPCODE_X1 = 94,
576 V2SUB_RRR_0_OPCODE_X0 = 148,
577 V2SUB_RRR_0_OPCODE_X1 = 95,
578 V4ADDSC_RRR_0_OPCODE_X0 = 149,
579 V4ADDSC_RRR_0_OPCODE_X1 = 96,
580 V4ADD_RRR_0_OPCODE_X0 = 150,
581 V4ADD_RRR_0_OPCODE_X1 = 97,
582 V4INT_H_RRR_0_OPCODE_X0 = 151,
583 V4INT_H_RRR_0_OPCODE_X1 = 98,
584 V4INT_L_RRR_0_OPCODE_X0 = 152,
585 V4INT_L_RRR_0_OPCODE_X1 = 99,
586 V4PACKSC_RRR_0_OPCODE_X0 = 153,
587 V4PACKSC_RRR_0_OPCODE_X1 = 100,
588 V4SHLSC_RRR_0_OPCODE_X0 = 154,
589 V4SHLSC_RRR_0_OPCODE_X1 = 101,
590 V4SHL_RRR_0_OPCODE_X0 = 155,
591 V4SHL_RRR_0_OPCODE_X1 = 102,
592 V4SHRS_RRR_0_OPCODE_X0 = 156,
593 V4SHRS_RRR_0_OPCODE_X1 = 103,
594 V4SHRU_RRR_0_OPCODE_X0 = 157,
595 V4SHRU_RRR_0_OPCODE_X1 = 104,
596 V4SUBSC_RRR_0_OPCODE_X0 = 158,
597 V4SUBSC_RRR_0_OPCODE_X1 = 105,
598 V4SUB_RRR_0_OPCODE_X0 = 159,
599 V4SUB_RRR_0_OPCODE_X1 = 106,
600 WH64_UNARY_OPCODE_X1 = 38,
601 XORI_IMM8_OPCODE_X0 = 20,
602 XORI_IMM8_OPCODE_X1 = 45,
603 XOR_RRR_0_OPCODE_X0 = 160,
604 XOR_RRR_0_OPCODE_X1 = 107,
605 XOR_RRR_5_OPCODE_Y0 = 3,
606 XOR_RRR_5_OPCODE_Y1 = 3
478}; 607};
479 608
480#endif /* !_TILE_OPCODE_CONSTANTS_H */ 609#endif /* !_TILE_OPCODE_CONSTANTS_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 3eb53525bf9d..db93518fac03 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -16,7 +16,8 @@
16#define _ASM_TILE_PAGE_H 16#define _ASM_TILE_PAGE_H
17 17
18#include <linux/const.h> 18#include <linux/const.h>
19#include <hv/pagesize.h> 19#include <hv/hypervisor.h>
20#include <arch/chip.h>
20 21
21/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ 22/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
22#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL 23#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
@@ -28,8 +29,6 @@
28#define PAGE_MASK (~(PAGE_SIZE - 1)) 29#define PAGE_MASK (~(PAGE_SIZE - 1))
29#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 30#define HPAGE_MASK (~(HPAGE_SIZE - 1))
30 31
31#ifdef __KERNEL__
32
33/* 32/*
34 * If the Kconfig doesn't specify, set a maximum zone order that 33 * If the Kconfig doesn't specify, set a maximum zone order that
35 * is enough so that we can create huge pages from small pages given 34 * is enough so that we can create huge pages from small pages given
@@ -39,9 +38,6 @@
39#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1) 38#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
40#endif 39#endif
41 40
42#include <hv/hypervisor.h>
43#include <arch/chip.h>
44
45#ifndef __ASSEMBLY__ 41#ifndef __ASSEMBLY__
46 42
47#include <linux/types.h> 43#include <linux/types.h>
@@ -91,6 +87,10 @@ typedef struct page *pgtable_t;
91/* Must be a macro since it is used to create constants. */ 87/* Must be a macro since it is used to create constants. */
92#define __pgprot(val) hv_pte(val) 88#define __pgprot(val) hv_pte(val)
93 89
90/* Rarely-used initializers, typically with a "zero" value. */
91#define __pte(x) hv_pte(x)
92#define __pgd(x) hv_pte(x)
93
94static inline u64 pgprot_val(pgprot_t pgprot) 94static inline u64 pgprot_val(pgprot_t pgprot)
95{ 95{
96 return hv_pte_val(pgprot); 96 return hv_pte_val(pgprot);
@@ -110,6 +110,8 @@ static inline u64 pgd_val(pgd_t pgd)
110 110
111typedef HV_PTE pmd_t; 111typedef HV_PTE pmd_t;
112 112
113#define __pmd(x) hv_pte(x)
114
113static inline u64 pmd_val(pmd_t pmd) 115static inline u64 pmd_val(pmd_t pmd)
114{ 116{
115 return hv_pte_val(pmd); 117 return hv_pte_val(pmd);
@@ -318,7 +320,7 @@ static inline int pfn_valid(unsigned long pfn)
318 320
319/* Provide as macros since these require some other headers included. */ 321/* Provide as macros since these require some other headers included. */
320#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT) 322#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
321#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr)) 323#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
322#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page)) 324#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
323 325
324struct mm_struct; 326struct mm_struct;
@@ -331,6 +333,4 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
331 333
332#include <asm-generic/memory_model.h> 334#include <asm-generic/memory_model.h>
333 335
334#endif /* __KERNEL__ */
335
336#endif /* _ASM_TILE_PAGE_H */ 336#endif /* _ASM_TILE_PAGE_H */
diff --git a/arch/tile/include/asm/parport.h b/arch/tile/include/asm/parport.h
new file mode 100644
index 000000000000..cf252af64590
--- /dev/null
+++ b/arch/tile/include/asm/parport.h
@@ -0,0 +1 @@
#include <asm-generic/parport.h>
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index c3fc458a0d32..7f03cefed1b9 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -46,7 +46,8 @@ struct pci_controller {
46 */ 46 */
47#define PCI_DMA_BUS_IS_PHYS 1 47#define PCI_DMA_BUS_IS_PHYS 1
48 48
49int __init tile_pci_init(void); 49int __devinit tile_pci_init(void);
50int __devinit pcibios_init(void);
50 51
51void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 52void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
52static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} 53static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
new file mode 100644
index 000000000000..fd80328523b4
--- /dev/null
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef _ASM_TILE_PGTABLE_64_H
17#define _ASM_TILE_PGTABLE_64_H
18
19/* The level-0 page table breaks the address space into 32-bit chunks. */
20#define PGDIR_SHIFT HV_LOG2_L1_SPAN
21#define PGDIR_SIZE HV_L1_SPAN
22#define PGDIR_MASK (~(PGDIR_SIZE-1))
23#define PTRS_PER_PGD HV_L0_ENTRIES
24#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
25
26/*
27 * The level-1 index is defined by the huge page size. A PMD is composed
28 * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
29 */
30#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE
31#define PMD_SIZE HV_PAGE_SIZE_LARGE
32#define PMD_MASK (~(PMD_SIZE-1))
33#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT))
34#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t))
35
36/*
37 * The level-2 index is defined by the difference between the huge
38 * page size and the normal page size. A PTE is composed of
39 * PTRS_PER_PTE pte_t's and is the bottom level of the page table.
40 * Note that the hypervisor docs use PTE for what we call pte_t, so
41 * this nomenclature is somewhat confusing.
42 */
43#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
44#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
45
46/*
47 * Align the vmalloc area to an L2 page table, and leave a guard page
48 * at the beginning and end. The vmalloc code also puts in an internal
49 * guard page between each allocation.
50 */
51#define _VMALLOC_END HUGE_VMAP_BASE
52#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
53#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
54
55#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
56
57#ifndef __ASSEMBLY__
58
59/* We have no pud since we are a three-level page table. */
60#include <asm-generic/pgtable-nopud.h>
61
62static inline int pud_none(pud_t pud)
63{
64 return pud_val(pud) == 0;
65}
66
67static inline int pud_present(pud_t pud)
68{
69 return pud_val(pud) & _PAGE_PRESENT;
70}
71
72#define pmd_ERROR(e) \
73 pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
74
75static inline void pud_clear(pud_t *pudp)
76{
77 __pte_clear(&pudp->pgd);
78}
79
80static inline int pud_bad(pud_t pud)
81{
82 return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
83}
84
85/* Return the page-table frame number (ptfn) that a pud_t points at. */
86#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
87
88/*
89 * A given kernel pud_t maps to a kernel pmd_t table at a specific
90 * virtual address. Since kernel pmd_t tables can be aligned at
91 * sub-page granularity, this macro can return non-page-aligned
92 * pointers, despite its name.
93 */
94#define pud_page_vaddr(pud) \
95 (__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
96
97/*
98 * A pud_t points to a pmd_t array. Since we can have multiple per
99 * page, we don't have a one-to-one mapping of pud_t's to pages.
100 */
101#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
102
103static inline unsigned long pud_index(unsigned long address)
104{
105 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
106}
107
108#define pmd_offset(pud, address) \
109 ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
110
111static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
112{
113 set_pte(pmdp, pmdval);
114}
115
116/* Create a pmd from a PTFN and pgprot. */
117static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
118{
119 return hv_pte_set_ptfn(prot, ptfn);
120}
121
122/* Return the page-table frame number (ptfn) that a pmd_t points at. */
123static inline unsigned long pmd_ptfn(pmd_t pmd)
124{
125 return hv_pte_get_ptfn(pmd);
126}
127
128static inline void pmd_clear(pmd_t *pmdp)
129{
130 __pte_clear(pmdp);
131}
132
133/* Normalize an address to having the correct high bits set. */
134#define pgd_addr_normalize pgd_addr_normalize
135static inline unsigned long pgd_addr_normalize(unsigned long addr)
136{
137 return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
138 (CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
139}
140
141/* We don't define any pgds for these addresses. */
142static inline int pgd_addr_invalid(unsigned long addr)
143{
144 return addr >= MEM_HV_START ||
145 (addr > MEM_LOW_END && addr < MEM_HIGH_START);
146}
147
148/*
149 * Use atomic instructions to provide atomicity against the hypervisor.
150 */
151#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
152static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
153 unsigned long addr, pte_t *ptep)
154{
155 return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
156 HV_PTE_INDEX_ACCESSED) & 0x1;
157}
158
159#define __HAVE_ARCH_PTEP_SET_WRPROTECT
160static inline void ptep_set_wrprotect(struct mm_struct *mm,
161 unsigned long addr, pte_t *ptep)
162{
163 __insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
164}
165
166#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
167static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
168 unsigned long addr, pte_t *ptep)
169{
170 return hv_pte(__insn_exch(&ptep->val, 0UL));
171}
172
173#endif /* __ASSEMBLY__ */
174
175#endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index e6889474038a..34c1e01ffb5e 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -215,6 +215,8 @@ static inline void release_thread(struct task_struct *dead_task)
215 215
216extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 216extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
217 217
218extern int do_work_pending(struct pt_regs *regs, u32 flags);
219
218 220
219/* 221/*
220 * Return saved (kernel) PC of a blocked thread. 222 * Return saved (kernel) PC of a blocked thread.
@@ -255,10 +257,6 @@ static inline void cpu_relax(void)
255 barrier(); 257 barrier();
256} 258}
257 259
258struct siginfo;
259extern void arch_coredump_signal(struct siginfo *, struct pt_regs *);
260#define arch_coredump_signal arch_coredump_signal
261
262/* Info on this processor (see fs/proc/cpuinfo.c) */ 260/* Info on this processor (see fs/proc/cpuinfo.c) */
263struct seq_operations; 261struct seq_operations;
264extern const struct seq_operations cpuinfo_op; 262extern const struct seq_operations cpuinfo_op;
@@ -269,9 +267,6 @@ extern char chip_model[64];
269/* Data on which physical memory controller corresponds to which NUMA node. */ 267/* Data on which physical memory controller corresponds to which NUMA node. */
270extern int node_controller[]; 268extern int node_controller[];
271 269
272/* Do we dump information to the console when a user application crashes? */
273extern int show_crashinfo;
274
275#if CHIP_HAS_CBOX_HOME_MAP() 270#if CHIP_HAS_CBOX_HOME_MAP()
276/* Does the heap allocator return hash-for-home pages by default? */ 271/* Does the heap allocator return hash-for-home pages by default? */
277extern int hash_default; 272extern int hash_default;
diff --git a/arch/tile/include/asm/serial.h b/arch/tile/include/asm/serial.h
new file mode 100644
index 000000000000..a0cb0caff152
--- /dev/null
+++ b/arch/tile/include/asm/serial.h
@@ -0,0 +1 @@
#include <asm-generic/serial.h>
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
index 81d92a45cd4b..1e1e616783eb 100644
--- a/arch/tile/include/asm/signal.h
+++ b/arch/tile/include/asm/signal.h
@@ -28,6 +28,10 @@ struct pt_regs;
28int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); 28int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
29int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); 29int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
30void do_signal(struct pt_regs *regs); 30void do_signal(struct pt_regs *regs);
31void signal_fault(const char *type, struct pt_regs *,
32 void __user *frame, int sig);
33void trace_unhandled_signal(const char *type, struct pt_regs *regs,
34 unsigned long address, int signo);
31#endif 35#endif
32 36
33#endif /* _ASM_TILE_SIGNAL_H */ 37#endif /* _ASM_TILE_SIGNAL_H */
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
new file mode 100644
index 000000000000..72be5904e020
--- /dev/null
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
15 * (the type definitions are in asm/spinlock_types.h)
16 */
17
18#ifndef _ASM_TILE_SPINLOCK_64_H
19#define _ASM_TILE_SPINLOCK_64_H
20
21/* Shifts and masks for the various fields in "lock". */
22#define __ARCH_SPIN_CURRENT_SHIFT 17
23#define __ARCH_SPIN_NEXT_MASK 0x7fff
24#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
25
26/*
27 * Return the "current" portion of a ticket lock value,
28 * i.e. the number that currently owns the lock.
29 */
30static inline int arch_spin_current(u32 val)
31{
32 return val >> __ARCH_SPIN_CURRENT_SHIFT;
33}
34
35/*
36 * Return the "next" portion of a ticket lock value,
37 * i.e. the number that the next task to try to acquire the lock will get.
38 */
39static inline int arch_spin_next(u32 val)
40{
41 return val & __ARCH_SPIN_NEXT_MASK;
42}
43
44/* The lock is locked if a task would have to wait to get it. */
45static inline int arch_spin_is_locked(arch_spinlock_t *lock)
46{
47 u32 val = lock->lock;
48 return arch_spin_current(val) != arch_spin_next(val);
49}
50
51/* Bump the current ticket so the next task owns the lock. */
52static inline void arch_spin_unlock(arch_spinlock_t *lock)
53{
54 wmb(); /* guarantee anything modified under the lock is visible */
55 __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
56}
57
58void arch_spin_unlock_wait(arch_spinlock_t *lock);
59
60void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
61
62/* Grab the "next" ticket number and bump it atomically.
63 * If the current ticket is not ours, go to the slow path.
64 * We also take the slow path if the "next" value overflows.
65 */
66static inline void arch_spin_lock(arch_spinlock_t *lock)
67{
68 u32 val = __insn_fetchadd4(&lock->lock, 1);
69 u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
70 if (unlikely(arch_spin_current(val) != ticket))
71 arch_spin_lock_slow(lock, ticket);
72}
73
74/* Try to get the lock, and return whether we succeeded. */
75int arch_spin_trylock(arch_spinlock_t *lock);
76
77/* We cannot take an interrupt after getting a ticket, so don't enable them. */
78#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
79
80/*
81 * Read-write spinlocks, allowing multiple readers
82 * but only one writer.
83 *
84 * We use fetchadd() for readers, and fetchor() with the sign bit
85 * for writers.
86 */
87
88#define __WRITE_LOCK_BIT (1 << 31)
89
90static inline int arch_write_val_locked(int val)
91{
92 return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
93}
94
95/**
96 * read_can_lock - would read_trylock() succeed?
97 * @lock: the rwlock in question.
98 */
99static inline int arch_read_can_lock(arch_rwlock_t *rw)
100{
101 return !arch_write_val_locked(rw->lock);
102}
103
104/**
105 * write_can_lock - would write_trylock() succeed?
106 * @lock: the rwlock in question.
107 */
108static inline int arch_write_can_lock(arch_rwlock_t *rw)
109{
110 return rw->lock == 0;
111}
112
113extern void __read_lock_failed(arch_rwlock_t *rw);
114
115static inline void arch_read_lock(arch_rwlock_t *rw)
116{
117 u32 val = __insn_fetchaddgez4(&rw->lock, 1);
118 if (unlikely(arch_write_val_locked(val)))
119 __read_lock_failed(rw);
120}
121
122extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
123
124static inline void arch_write_lock(arch_rwlock_t *rw)
125{
126 u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
127 if (unlikely(val != 0))
128 __write_lock_failed(rw, val);
129}
130
131static inline void arch_read_unlock(arch_rwlock_t *rw)
132{
133 __insn_mf();
134 __insn_fetchadd4(&rw->lock, -1);
135}
136
137static inline void arch_write_unlock(arch_rwlock_t *rw)
138{
139 __insn_mf();
140 rw->lock = 0;
141}
142
143static inline int arch_read_trylock(arch_rwlock_t *rw)
144{
145 return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
146}
147
148static inline int arch_write_trylock(arch_rwlock_t *rw)
149{
150 u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
151 if (likely(val == 0))
152 return 1;
153 if (!arch_write_val_locked(val))
154 __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
155 return 0;
156}
157
158#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
159#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
160
161#endif /* _ASM_TILE_SPINLOCK_64_H */
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index b16e5db8f0e7..c0db34d56be3 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1,4 +1,4 @@
1#ifdef CONFIG_COMPAT 1#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
2#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */ 2#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
3#endif 3#endif
4#include <asm-generic/stat.h> 4#include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/swab.h b/arch/tile/include/asm/swab.h
index 25c686a00f1d..7c37b38f6c8d 100644
--- a/arch/tile/include/asm/swab.h
+++ b/arch/tile/include/asm/swab.h
@@ -18,12 +18,6 @@
18/* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */ 18/* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */
19#define __arch_swab32(x) __builtin_bswap32(x) 19#define __arch_swab32(x) __builtin_bswap32(x)
20#define __arch_swab64(x) __builtin_bswap64(x) 20#define __arch_swab64(x) __builtin_bswap64(x)
21
22/* Use the variant that is natural for the wordsize. */
23#ifdef CONFIG_64BIT
24#define __arch_swab16(x) (__builtin_bswap64(x) >> 48)
25#else
26#define __arch_swab16(x) (__builtin_bswap32(x) >> 16) 21#define __arch_swab16(x) (__builtin_bswap32(x) >> 16)
27#endif
28 22
29#endif /* _ASM_TILE_SWAB_H */ 23#endif /* _ASM_TILE_SWAB_H */
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 3405b52853b8..bc4f562bd459 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -125,6 +125,7 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
125#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ 125#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
126#define TIF_SECCOMP 6 /* secure computing */ 126#define TIF_SECCOMP 6 /* secure computing */
127#define TIF_MEMDIE 7 /* OOM killer at work */ 127#define TIF_MEMDIE 7 /* OOM killer at work */
128#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
128 129
129#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 130#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
130#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 131#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -134,10 +135,12 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
134#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 135#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
135#define _TIF_SECCOMP (1<<TIF_SECCOMP) 136#define _TIF_SECCOMP (1<<TIF_SECCOMP)
136#define _TIF_MEMDIE (1<<TIF_MEMDIE) 137#define _TIF_MEMDIE (1<<TIF_MEMDIE)
138#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
137 139
138/* Work to do on any return to user space. */ 140/* Work to do on any return to user space. */
139#define _TIF_ALLWORK_MASK \ 141#define _TIF_ALLWORK_MASK \
140 (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) 142 (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
143 _TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
141 144
142/* 145/*
143 * Thread-synchronous status. 146 * Thread-synchronous status.
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h
index 343172d422a9..6fdd0c860193 100644
--- a/arch/tile/include/asm/topology.h
+++ b/arch/tile/include/asm/topology.h
@@ -44,25 +44,64 @@ static inline const struct cpumask *cpumask_of_node(int node)
44/* For now, use numa node -1 for global allocation. */ 44/* For now, use numa node -1 for global allocation. */
45#define pcibus_to_node(bus) ((void)(bus), -1) 45#define pcibus_to_node(bus) ((void)(bus), -1)
46 46
47/*
48 * TILE architecture has many cores integrated in one processor, so we need
49 * setup bigger balance_interval for both CPU/NODE scheduling domains to
50 * reduce process scheduling costs.
51 */
52
53/* sched_domains SD_CPU_INIT for TILE architecture */
54#define SD_CPU_INIT (struct sched_domain) { \
55 .min_interval = 4, \
56 .max_interval = 128, \
57 .busy_factor = 64, \
58 .imbalance_pct = 125, \
59 .cache_nice_tries = 1, \
60 .busy_idx = 2, \
61 .idle_idx = 1, \
62 .newidle_idx = 0, \
63 .wake_idx = 0, \
64 .forkexec_idx = 0, \
65 \
66 .flags = 1*SD_LOAD_BALANCE \
67 | 1*SD_BALANCE_NEWIDLE \
68 | 1*SD_BALANCE_EXEC \
69 | 1*SD_BALANCE_FORK \
70 | 0*SD_BALANCE_WAKE \
71 | 0*SD_WAKE_AFFINE \
72 | 0*SD_PREFER_LOCAL \
73 | 0*SD_SHARE_CPUPOWER \
74 | 0*SD_SHARE_PKG_RESOURCES \
75 | 0*SD_SERIALIZE \
76 , \
77 .last_balance = jiffies, \
78 .balance_interval = 32, \
79}
80
47/* sched_domains SD_NODE_INIT for TILE architecture */ 81/* sched_domains SD_NODE_INIT for TILE architecture */
48#define SD_NODE_INIT (struct sched_domain) { \ 82#define SD_NODE_INIT (struct sched_domain) { \
49 .min_interval = 8, \ 83 .min_interval = 16, \
50 .max_interval = 32, \ 84 .max_interval = 512, \
51 .busy_factor = 32, \ 85 .busy_factor = 32, \
52 .imbalance_pct = 125, \ 86 .imbalance_pct = 125, \
53 .cache_nice_tries = 1, \ 87 .cache_nice_tries = 1, \
54 .busy_idx = 3, \ 88 .busy_idx = 3, \
55 .idle_idx = 1, \ 89 .idle_idx = 1, \
56 .newidle_idx = 2, \ 90 .newidle_idx = 2, \
57 .wake_idx = 1, \ 91 .wake_idx = 1, \
58 .flags = SD_LOAD_BALANCE \ 92 .flags = 1*SD_LOAD_BALANCE \
59 | SD_BALANCE_NEWIDLE \ 93 | 1*SD_BALANCE_NEWIDLE \
60 | SD_BALANCE_EXEC \ 94 | 1*SD_BALANCE_EXEC \
61 | SD_BALANCE_FORK \ 95 | 1*SD_BALANCE_FORK \
62 | SD_WAKE_AFFINE \ 96 | 0*SD_BALANCE_WAKE \
63 | SD_SERIALIZE, \ 97 | 0*SD_WAKE_AFFINE \
64 .last_balance = jiffies, \ 98 | 0*SD_PREFER_LOCAL \
65 .balance_interval = 1, \ 99 | 0*SD_SHARE_CPUPOWER \
100 | 0*SD_SHARE_PKG_RESOURCES \
101 | 1*SD_SERIALIZE \
102 , \
103 .last_balance = jiffies, \
104 .balance_interval = 128, \
66} 105}
67 106
68/* By definition, we create nodes based on online memory. */ 107/* By definition, we create nodes based on online memory. */
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index d06e35f57201..5f20f920f932 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -15,10 +15,14 @@
15#ifndef _ASM_TILE_TRAPS_H 15#ifndef _ASM_TILE_TRAPS_H
16#define _ASM_TILE_TRAPS_H 16#define _ASM_TILE_TRAPS_H
17 17
18#include <arch/chip.h>
19
18/* mm/fault.c */ 20/* mm/fault.c */
19void do_page_fault(struct pt_regs *, int fault_num, 21void do_page_fault(struct pt_regs *, int fault_num,
20 unsigned long address, unsigned long write); 22 unsigned long address, unsigned long write);
23#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
21void do_async_page_fault(struct pt_regs *); 24void do_async_page_fault(struct pt_regs *);
25#endif
22 26
23#ifndef __tilegx__ 27#ifndef __tilegx__
24/* 28/*
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index b35c2db71199..f70bf1c541f1 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -15,7 +15,7 @@
15#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL) 15#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL)
16#define _ASM_TILE_UNISTD_H 16#define _ASM_TILE_UNISTD_H
17 17
18#ifndef __LP64__ 18#if !defined(__LP64__) || defined(__SYSCALL_COMPAT)
19/* Use the flavor of this syscall that matches the 32-bit API better. */ 19/* Use the flavor of this syscall that matches the 32-bit API better. */
20#define __ARCH_WANT_SYNC_FILE_RANGE2 20#define __ARCH_WANT_SYNC_FILE_RANGE2
21#endif 21#endif
diff --git a/arch/tile/include/hv/pagesize.h b/arch/tile/include/asm/vga.h
index 58bed114fedd..7b46e754d611 100644
--- a/arch/tile/include/hv/pagesize.h
+++ b/arch/tile/include/asm/vga.h
@@ -10,23 +10,30 @@
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for 11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * Access to VGA videoram.
13 */ 15 */
14 16
15/** 17#ifndef _ASM_TILE_VGA_H
16 * @file pagesize.h 18#define _ASM_TILE_VGA_H
17 */
18 19
19#ifndef _HV_PAGESIZE_H 20#include <asm/io.h>
20#define _HV_PAGESIZE_H
21 21
22/** The log2 of the size of small pages, in bytes. This value should 22#define VT_BUF_HAVE_RW
23 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
24 */
25#define HV_LOG2_PAGE_SIZE_SMALL 16
26 23
27/** The log2 of the size of large pages, in bytes. This value should be 24static inline void scr_writew(u16 val, volatile u16 *addr)
28 * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 25{
29 */ 26 __raw_writew(val, (volatile u16 __iomem *) addr);
30#define HV_LOG2_PAGE_SIZE_LARGE 24 27}
28
29static inline u16 scr_readw(volatile const u16 *addr)
30{
31 return __raw_readw((volatile const u16 __iomem *) addr);
32}
33
34#define vga_readb(a) readb((u8 __iomem *)(a))
35#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a))
36
37#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s))
31 38
32#endif /* _HV_PAGESIZE_H */ 39#endif
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index ee41bca4c8c4..72ec1e972f15 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -22,8 +22,6 @@
22 22
23#include <arch/chip.h> 23#include <arch/chip.h>
24 24
25#include <hv/pagesize.h>
26
27/* Linux builds want unsigned long constants, but assembler wants numbers */ 25/* Linux builds want unsigned long constants, but assembler wants numbers */
28#ifdef __ASSEMBLER__ 26#ifdef __ASSEMBLER__
29/** One, for assembler */ 27/** One, for assembler */
@@ -44,11 +42,21 @@
44 */ 42 */
45#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) 43#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
46 44
45/** The log2 of the size of small pages, in bytes. This value should
46 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
47 */
48#define HV_LOG2_PAGE_SIZE_SMALL 16
49
47/** The size of small pages, in bytes. This value should be verified 50/** The size of small pages, in bytes. This value should be verified
48 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 51 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
49 */ 52 */
50#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) 53#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
51 54
55/** The log2 of the size of large pages, in bytes. This value should be
56 * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
57 */
58#define HV_LOG2_PAGE_SIZE_LARGE 24
59
52/** The size of large pages, in bytes. This value should be verified 60/** The size of large pages, in bytes. This value should be verified
53 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 61 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
54 */ 62 */
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index 55a6a74974b4..1dc71eabfc5a 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -14,19 +14,11 @@
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/string.h> 16#include <linux/string.h>
17
18#include <asm/backtrace.h> 17#include <asm/backtrace.h>
19
20#include <arch/chip.h>
21
22#include <asm/opcode-tile.h> 18#include <asm/opcode-tile.h>
19#include <arch/abi.h>
23 20
24 21#ifdef __tilegx__
25#define TREG_SP 54
26#define TREG_LR 55
27
28
29#if TILE_CHIP >= 10
30#define tile_bundle_bits tilegx_bundle_bits 22#define tile_bundle_bits tilegx_bundle_bits
31#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE 23#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE
32#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES 24#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
@@ -47,7 +39,7 @@ typedef long long bt_int_reg_t;
47typedef int bt_int_reg_t; 39typedef int bt_int_reg_t;
48#endif 40#endif
49 41
50/** A decoded bundle used for backtracer analysis. */ 42/* A decoded bundle used for backtracer analysis. */
51struct BacktraceBundle { 43struct BacktraceBundle {
52 tile_bundle_bits bits; 44 tile_bundle_bits bits;
53 int num_insns; 45 int num_insns;
@@ -56,23 +48,7 @@ struct BacktraceBundle {
56}; 48};
57 49
58 50
59/* This implementation only makes sense for native tools. */ 51/* Locates an instruction inside the given bundle that
60/** Default function to read memory. */
61static bool bt_read_memory(void *result, VirtualAddress addr,
62 unsigned int size, void *extra)
63{
64 /* FIXME: this should do some horrible signal stuff to catch
65 * SEGV cleanly and fail.
66 *
67 * Or else the caller should do the setjmp for efficiency.
68 */
69
70 memcpy(result, (const void *)addr, size);
71 return true;
72}
73
74
75/** Locates an instruction inside the given bundle that
76 * has the specified mnemonic, and whose first 'num_operands_to_match' 52 * has the specified mnemonic, and whose first 'num_operands_to_match'
77 * operands exactly match those in 'operand_values'. 53 * operands exactly match those in 'operand_values'.
78 */ 54 */
@@ -107,13 +83,13 @@ static const struct tile_decoded_instruction *find_matching_insn(
107 return NULL; 83 return NULL;
108} 84}
109 85
110/** Does this bundle contain an 'iret' instruction? */ 86/* Does this bundle contain an 'iret' instruction? */
111static inline bool bt_has_iret(const struct BacktraceBundle *bundle) 87static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
112{ 88{
113 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL; 89 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
114} 90}
115 91
116/** Does this bundle contain an 'addi sp, sp, OFFSET' or 92/* Does this bundle contain an 'addi sp, sp, OFFSET' or
117 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET? 93 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
118 */ 94 */
119static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust) 95static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
@@ -124,7 +100,7 @@ static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
124 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); 100 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
125 if (insn == NULL) 101 if (insn == NULL)
126 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); 102 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
127#if TILE_CHIP >= 10 103#ifdef __tilegx__
128 if (insn == NULL) 104 if (insn == NULL)
129 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2); 105 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2);
130 if (insn == NULL) 106 if (insn == NULL)
@@ -137,7 +113,7 @@ static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
137 return true; 113 return true;
138} 114}
139 115
140/** Does this bundle contain any 'info OP' or 'infol OP' 116/* Does this bundle contain any 'info OP' or 'infol OP'
141 * instruction, and if so, what are their OP? Note that OP is interpreted 117 * instruction, and if so, what are their OP? Note that OP is interpreted
142 * as an unsigned value by this code since that's what the caller wants. 118 * as an unsigned value by this code since that's what the caller wants.
143 * Returns the number of info ops found. 119 * Returns the number of info ops found.
@@ -161,7 +137,7 @@ static int bt_get_info_ops(const struct BacktraceBundle *bundle,
161 return num_ops; 137 return num_ops;
162} 138}
163 139
164/** Does this bundle contain a jrp instruction, and if so, to which 140/* Does this bundle contain a jrp instruction, and if so, to which
165 * register is it jumping? 141 * register is it jumping?
166 */ 142 */
167static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg) 143static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
@@ -175,7 +151,7 @@ static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
175 return true; 151 return true;
176} 152}
177 153
178/** Does this bundle modify the specified register in any way? */ 154/* Does this bundle modify the specified register in any way? */
179static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg) 155static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
180{ 156{
181 int i, j; 157 int i, j;
@@ -195,34 +171,34 @@ static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
195 return false; 171 return false;
196} 172}
197 173
198/** Does this bundle modify sp? */ 174/* Does this bundle modify sp? */
199static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle) 175static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
200{ 176{
201 return bt_modifies_reg(bundle, TREG_SP); 177 return bt_modifies_reg(bundle, TREG_SP);
202} 178}
203 179
204/** Does this bundle modify lr? */ 180/* Does this bundle modify lr? */
205static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle) 181static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
206{ 182{
207 return bt_modifies_reg(bundle, TREG_LR); 183 return bt_modifies_reg(bundle, TREG_LR);
208} 184}
209 185
210/** Does this bundle contain the instruction 'move fp, sp'? */ 186/* Does this bundle contain the instruction 'move fp, sp'? */
211static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle) 187static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
212{ 188{
213 static const int vals[2] = { 52, TREG_SP }; 189 static const int vals[2] = { 52, TREG_SP };
214 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; 190 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
215} 191}
216 192
217/** Does this bundle contain a store of lr to sp? */ 193/* Does this bundle contain a store of lr to sp? */
218static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle) 194static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
219{ 195{
220 static const int vals[2] = { TREG_SP, TREG_LR }; 196 static const int vals[2] = { TREG_SP, TREG_LR };
221 return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL; 197 return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL;
222} 198}
223 199
224#if TILE_CHIP >= 10 200#ifdef __tilegx__
225/** Track moveli values placed into registers. */ 201/* Track moveli values placed into registers. */
226static inline void bt_update_moveli(const struct BacktraceBundle *bundle, 202static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
227 int moveli_args[]) 203 int moveli_args[])
228{ 204{
@@ -238,7 +214,7 @@ static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
238 } 214 }
239} 215}
240 216
241/** Does this bundle contain an 'add sp, sp, reg' instruction 217/* Does this bundle contain an 'add sp, sp, reg' instruction
242 * from a register that we saw a moveli into, and if so, what 218 * from a register that we saw a moveli into, and if so, what
243 * is the value in the register? 219 * is the value in the register?
244 */ 220 */
@@ -260,11 +236,11 @@ static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust,
260} 236}
261#endif 237#endif
262 238
263/** Locates the caller's PC and SP for a program starting at the 239/* Locates the caller's PC and SP for a program starting at the
264 * given address. 240 * given address.
265 */ 241 */
266static void find_caller_pc_and_caller_sp(CallerLocation *location, 242static void find_caller_pc_and_caller_sp(CallerLocation *location,
267 const VirtualAddress start_pc, 243 const unsigned long start_pc,
268 BacktraceMemoryReader read_memory_func, 244 BacktraceMemoryReader read_memory_func,
269 void *read_memory_func_extra) 245 void *read_memory_func_extra)
270{ 246{
@@ -288,9 +264,9 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
288 tile_bundle_bits prefetched_bundles[32]; 264 tile_bundle_bits prefetched_bundles[32];
289 int num_bundles_prefetched = 0; 265 int num_bundles_prefetched = 0;
290 int next_bundle = 0; 266 int next_bundle = 0;
291 VirtualAddress pc; 267 unsigned long pc;
292 268
293#if TILE_CHIP >= 10 269#ifdef __tilegx__
294 /* Naively try to track moveli values to support addx for -m32. */ 270 /* Naively try to track moveli values to support addx for -m32. */
295 int moveli_args[TILEGX_NUM_REGISTERS] = { 0 }; 271 int moveli_args[TILEGX_NUM_REGISTERS] = { 0 };
296#endif 272#endif
@@ -369,10 +345,6 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
369 /* Weird; reserved value, ignore it. */ 345 /* Weird; reserved value, ignore it. */
370 continue; 346 continue;
371 } 347 }
372 if (info_operand & ENTRY_POINT_INFO_OP) {
373 /* This info op is ignored by the backtracer. */
374 continue;
375 }
376 348
377 /* Skip info ops which are not in the 349 /* Skip info ops which are not in the
378 * "one_ago" mode we want right now. 350 * "one_ago" mode we want right now.
@@ -453,7 +425,7 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
453 if (!sp_determined) { 425 if (!sp_determined) {
454 int adjust; 426 int adjust;
455 if (bt_has_addi_sp(&bundle, &adjust) 427 if (bt_has_addi_sp(&bundle, &adjust)
456#if TILE_CHIP >= 10 428#ifdef __tilegx__
457 || bt_has_add_sp(&bundle, &adjust, moveli_args) 429 || bt_has_add_sp(&bundle, &adjust, moveli_args)
458#endif 430#endif
459 ) { 431 ) {
@@ -504,7 +476,7 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
504 } 476 }
505 } 477 }
506 478
507#if TILE_CHIP >= 10 479#ifdef __tilegx__
508 /* Track moveli arguments for -m32 mode. */ 480 /* Track moveli arguments for -m32 mode. */
509 bt_update_moveli(&bundle, moveli_args); 481 bt_update_moveli(&bundle, moveli_args);
510#endif 482#endif
@@ -546,18 +518,26 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
546 } 518 }
547} 519}
548 520
521/* Initializes a backtracer to start from the given location.
522 *
523 * If the frame pointer cannot be determined it is set to -1.
524 *
525 * state: The state to be filled in.
526 * read_memory_func: A callback that reads memory.
527 * read_memory_func_extra: An arbitrary argument to read_memory_func.
528 * pc: The current PC.
529 * lr: The current value of the 'lr' register.
530 * sp: The current value of the 'sp' register.
531 * r52: The current value of the 'r52' register.
532 */
549void backtrace_init(BacktraceIterator *state, 533void backtrace_init(BacktraceIterator *state,
550 BacktraceMemoryReader read_memory_func, 534 BacktraceMemoryReader read_memory_func,
551 void *read_memory_func_extra, 535 void *read_memory_func_extra,
552 VirtualAddress pc, VirtualAddress lr, 536 unsigned long pc, unsigned long lr,
553 VirtualAddress sp, VirtualAddress r52) 537 unsigned long sp, unsigned long r52)
554{ 538{
555 CallerLocation location; 539 CallerLocation location;
556 VirtualAddress fp, initial_frame_caller_pc; 540 unsigned long fp, initial_frame_caller_pc;
557
558 if (read_memory_func == NULL) {
559 read_memory_func = bt_read_memory;
560 }
561 541
562 /* Find out where we are in the initial frame. */ 542 /* Find out where we are in the initial frame. */
563 find_caller_pc_and_caller_sp(&location, pc, 543 find_caller_pc_and_caller_sp(&location, pc,
@@ -630,12 +610,15 @@ void backtrace_init(BacktraceIterator *state,
630/* Handle the case where the register holds more bits than the VA. */ 610/* Handle the case where the register holds more bits than the VA. */
631static bool valid_addr_reg(bt_int_reg_t reg) 611static bool valid_addr_reg(bt_int_reg_t reg)
632{ 612{
633 return ((VirtualAddress)reg == reg); 613 return ((unsigned long)reg == reg);
634} 614}
635 615
616/* Advances the backtracing state to the calling frame, returning
617 * true iff successful.
618 */
636bool backtrace_next(BacktraceIterator *state) 619bool backtrace_next(BacktraceIterator *state)
637{ 620{
638 VirtualAddress next_fp, next_pc; 621 unsigned long next_fp, next_pc;
639 bt_int_reg_t next_frame[2]; 622 bt_int_reg_t next_frame[2];
640 623
641 if (state->fp == -1) { 624 if (state->fp == -1) {
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index dbc213adf5e1..bf5e9d70266c 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -135,26 +135,15 @@ long tile_compat_sys_msgrcv(int msqid,
135 135
136/* Provide the compat syscall number to call mapping. */ 136/* Provide the compat syscall number to call mapping. */
137#undef __SYSCALL 137#undef __SYSCALL
138#define __SYSCALL(nr, call) [nr] = (compat_##call), 138#define __SYSCALL(nr, call) [nr] = (call),
139 139
140/* The generic versions of these don't work for Tile. */ 140/* The generic versions of these don't work for Tile. */
141#define compat_sys_msgrcv tile_compat_sys_msgrcv 141#define compat_sys_msgrcv tile_compat_sys_msgrcv
142#define compat_sys_msgsnd tile_compat_sys_msgsnd 142#define compat_sys_msgsnd tile_compat_sys_msgsnd
143 143
144/* See comments in sys.c */ 144/* See comments in sys.c */
145#define compat_sys_fadvise64 sys32_fadvise64
146#define compat_sys_fadvise64_64 sys32_fadvise64_64 145#define compat_sys_fadvise64_64 sys32_fadvise64_64
147#define compat_sys_readahead sys32_readahead 146#define compat_sys_readahead sys32_readahead
148#define compat_sys_sync_file_range compat_sys_sync_file_range2
149
150/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
151#define compat_sys_stat64 sys_stat64
152#define compat_sys_lstat64 sys_lstat64
153#define compat_sys_fstat64 sys_fstat64
154#define compat_sys_fstatat64 sys_fstatat64
155
156/* The native sys_ptrace dynamically handles compat binaries. */
157#define compat_sys_ptrace sys_ptrace
158 147
159/* Call the trampolines to manage pt_regs where necessary. */ 148/* Call the trampolines to manage pt_regs where necessary. */
160#define compat_sys_execve _compat_sys_execve 149#define compat_sys_execve _compat_sys_execve
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index dbb0dfc7bece..a7869ad62776 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -317,7 +317,7 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
317 return 0; 317 return 0;
318 318
319badframe: 319badframe:
320 force_sig(SIGSEGV, current); 320 signal_fault("bad sigreturn frame", regs, frame, 0);
321 return 0; 321 return 0;
322} 322}
323 323
@@ -431,6 +431,6 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
431 return 0; 431 return 0;
432 432
433give_sigsegv: 433give_sigsegv:
434 force_sigsegv(sig, current); 434 signal_fault("bad setup frame", regs, frame, sig);
435 return -EFAULT; 435 return -EFAULT;
436} 436}
diff --git a/arch/tile/kernel/futex_64.S b/arch/tile/kernel/futex_64.S
new file mode 100644
index 000000000000..f465d1eda20f
--- /dev/null
+++ b/arch/tile/kernel/futex_64.S
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Atomically access user memory, but use MMU to avoid propagating
15 * kernel exceptions.
16 */
17
18#include <linux/linkage.h>
19#include <asm/errno.h>
20#include <asm/futex.h>
21#include <asm/page.h>
22#include <asm/processor.h>
23
24/*
25 * Provide a set of atomic memory operations supporting <asm/futex.h>.
26 *
27 * r0: user address to manipulate
28 * r1: new value to write, or for cmpxchg, old value to compare against
29 * r2: (cmpxchg only) new value to write
30 *
31 * Return __get_user struct, r0 with value, r1 with error.
32 */
33#define FUTEX_OP(name, ...) \
34STD_ENTRY(futex_##name) \
35 __VA_ARGS__; \
36 { \
37 move r1, zero; \
38 jrp lr \
39 }; \
40 STD_ENDPROC(futex_##name); \
41 .pushsection __ex_table,"a"; \
42 .quad 1b, get_user_fault; \
43 .popsection
44
45 .pushsection .fixup,"ax"
46get_user_fault:
47 { movei r1, -EFAULT; jrp lr }
48 ENDPROC(get_user_fault)
49 .popsection
50
51FUTEX_OP(cmpxchg, mtspr CMPEXCH_VALUE, r1; 1: cmpexch4 r0, r0, r2)
52FUTEX_OP(set, 1: exch4 r0, r0, r1)
53FUTEX_OP(add, 1: fetchadd4 r0, r0, r1)
54FUTEX_OP(or, 1: fetchor4 r0, r0, r1)
55FUTEX_OP(andn, nor r1, r1, zero; 1: fetchand4 r0, r0, r1)
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index e910530436e6..3bddef710de4 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -268,12 +268,10 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
268 found_processes = 0; 268 found_processes = 0;
269 list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { 269 list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
270 BUG_ON(p->thread.hardwall != rect); 270 BUG_ON(p->thread.hardwall != rect);
271 if (p->sighand) { 271 if (!(p->flags & PF_EXITING)) {
272 found_processes = 1; 272 found_processes = 1;
273 pr_notice("hardwall: killing %d\n", p->pid); 273 pr_notice("hardwall: killing %d\n", p->pid);
274 spin_lock(&p->sighand->siglock); 274 do_send_sig_info(info.si_signo, &info, p, false);
275 __group_send_sig_info(info.si_signo, &info, p);
276 spin_unlock(&p->sighand->siglock);
277 } 275 }
278 } 276 }
279 if (!found_processes) 277 if (!found_processes)
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
new file mode 100644
index 000000000000..6bc3a932fe45
--- /dev/null
+++ b/arch/tile/kernel/head_64.S
@@ -0,0 +1,269 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE startup code.
15 */
16
17#include <linux/linkage.h>
18#include <linux/init.h>
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/thread_info.h>
22#include <asm/processor.h>
23#include <asm/asm-offsets.h>
24#include <hv/hypervisor.h>
25#include <arch/chip.h>
26#include <arch/spr_def.h>
27
28/*
29 * This module contains the entry code for kernel images. It performs the
30 * minimal setup needed to call the generic C routines.
31 */
32
33 __HEAD
34ENTRY(_start)
35 /* Notify the hypervisor of what version of the API we want */
36 {
37 movei r1, TILE_CHIP
38 movei r2, TILE_CHIP_REV
39 }
40 {
41 moveli r0, _HV_VERSION
42 jal hv_init
43 }
44 /* Get a reasonable default ASID in r0 */
45 {
46 move r0, zero
47 jal hv_inquire_asid
48 }
49
50 /*
51 * Install the default page table. The relocation required to
52 * statically define the table is a bit too complex, so we have
53 * to plug in the pointer from the L0 to the L1 table by hand.
54 * We only do this on the first cpu to boot, though, since the
55 * other CPUs should see a properly-constructed page table.
56 */
57 {
58 v4int_l r2, zero, r0 /* ASID for hv_install_context */
59 moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
60 }
61 {
62 shl16insli r4, r4, hw0(swapper_pgprot - PAGE_OFFSET)
63 }
64 {
65 ld r1, r4 /* access_pte for hv_install_context */
66 }
67 {
68 moveli r0, hw1_last(.Lsv_data_pmd - PAGE_OFFSET)
69 moveli r6, hw1_last(temp_data_pmd - PAGE_OFFSET)
70 }
71 {
72 /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
73 bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
74 inv r4
75 }
76 bnez r7, .Lno_write
77 {
78 shl16insli r0, r0, hw0(.Lsv_data_pmd - PAGE_OFFSET)
79 shl16insli r6, r6, hw0(temp_data_pmd - PAGE_OFFSET)
80 }
81 {
82 /* Cut off the low bits of the PT address. */
83 shrui r6, r6, HV_LOG2_PAGE_TABLE_ALIGN
84 /* Start with our access pte. */
85 move r5, r1
86 }
87 {
88 /* Stuff the address into the page table pointer slot of the PTE. */
89 bfins r5, r6, HV_PTE_INDEX_PTFN, \
90 HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
91 }
92 {
93 /* Store the L0 data PTE. */
94 st r0, r5
95 addli r6, r6, (temp_code_pmd - temp_data_pmd) >> \
96 HV_LOG2_PAGE_TABLE_ALIGN
97 }
98 {
99 addli r0, r0, .Lsv_code_pmd - .Lsv_data_pmd
100 bfins r5, r6, HV_PTE_INDEX_PTFN, \
101 HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
102 }
103 /* Store the L0 code PTE. */
104 st r0, r5
105
106.Lno_write:
107 moveli lr, hw2_last(1f)
108 {
109 shl16insli lr, lr, hw1(1f)
110 moveli r0, hw1_last(swapper_pg_dir - PAGE_OFFSET)
111 }
112 {
113 shl16insli lr, lr, hw0(1f)
114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
115 }
116 {
117 move r3, zero
118 j hv_install_context
119 }
1201:
121
122 /* Install the interrupt base. */
123 moveli r0, hw2_last(MEM_SV_START)
124 shl16insli r0, r0, hw1(MEM_SV_START)
125 shl16insli r0, r0, hw0(MEM_SV_START)
126 mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
127
128 /*
129 * Get our processor number and save it away in SAVE_K_0.
130 * Extract stuff from the topology structure: r4 = y, r6 = x,
131 * r5 = width. FIXME: consider whether we want to just make these
132 * 64-bit values (and if so fix smp_topology write below, too).
133 */
134 jal hv_inquire_topology
135 {
136 v4int_l r5, zero, r1 /* r5 = width */
137 shrui r4, r0, 32 /* r4 = y */
138 }
139 {
140 v4int_l r6, zero, r0 /* r6 = x */
141 mul_lu_lu r4, r4, r5
142 }
143 {
144 add r4, r4, r6 /* r4 == cpu == y*width + x */
145 }
146
147#ifdef CONFIG_SMP
148 /*
149 * Load up our per-cpu offset. When the first (master) tile
150 * boots, this value is still zero, so we will load boot_pc
151 * with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
152 * The master tile initializes the per-cpu offset array, so that
153 * when subsequent (secondary) tiles boot, they will instead load
154 * from their per-cpu versions of boot_sp and boot_pc.
155 */
156 moveli r5, hw2_last(__per_cpu_offset)
157 shl16insli r5, r5, hw1(__per_cpu_offset)
158 shl16insli r5, r5, hw0(__per_cpu_offset)
159 shl3add r5, r4, r5
160 ld r5, r5
161 bnez r5, 1f
162
163 /*
164 * Save the width and height to the smp_topology variable
165 * for later use.
166 */
167 moveli r0, hw2_last(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
168 shl16insli r0, r0, hw1(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
169 shl16insli r0, r0, hw0(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
170 st r0, r1
1711:
172#else
173 move r5, zero
174#endif
175
176 /* Load and go with the correct pc and sp. */
177 {
178 moveli r1, hw2_last(boot_sp)
179 moveli r0, hw2_last(boot_pc)
180 }
181 {
182 shl16insli r1, r1, hw1(boot_sp)
183 shl16insli r0, r0, hw1(boot_pc)
184 }
185 {
186 shl16insli r1, r1, hw0(boot_sp)
187 shl16insli r0, r0, hw0(boot_pc)
188 }
189 {
190 add r1, r1, r5
191 add r0, r0, r5
192 }
193 ld r0, r0
194 ld sp, r1
195 or r4, sp, r4
196 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
197 addi sp, sp, -STACK_TOP_DELTA
198 {
199 move lr, zero /* stop backtraces in the called function */
200 jr r0
201 }
202 ENDPROC(_start)
203
204__PAGE_ALIGNED_BSS
205 .align PAGE_SIZE
206ENTRY(empty_zero_page)
207 .fill PAGE_SIZE,1,0
208 END(empty_zero_page)
209
210 .macro PTE cpa, bits1
211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
213 (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
214 .endm
215
216__PAGE_ALIGNED_DATA
217 .align PAGE_SIZE
218ENTRY(swapper_pg_dir)
219 .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
220.Lsv_data_pmd:
221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
222 .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
223.Lsv_code_pmd:
224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
225 .org swapper_pg_dir + HV_L0_SIZE
226 END(swapper_pg_dir)
227
228 .align HV_PAGE_TABLE_ALIGN
229ENTRY(temp_data_pmd)
230 /*
231 * We fill the PAGE_OFFSET pmd with huge pages with
232 * VA = PA + PAGE_OFFSET. We remap things with more precise access
233 * permissions later.
234 */
235 .set addr, 0
236 .rept HV_L1_ENTRIES
237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
238 .set addr, addr + HV_PAGE_SIZE_LARGE
239 .endr
240 .org temp_data_pmd + HV_L1_SIZE
241 END(temp_data_pmd)
242
243 .align HV_PAGE_TABLE_ALIGN
244ENTRY(temp_code_pmd)
245 /*
246 * We fill the MEM_SV_START pmd with huge pages with
247 * VA = PA + PAGE_OFFSET. We remap things with more precise access
248 * permissions later.
249 */
250 .set addr, 0
251 .rept HV_L1_ENTRIES
252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
253 .set addr, addr + HV_PAGE_SIZE_LARGE
254 .endr
255 .org temp_code_pmd + HV_L1_SIZE
256 END(temp_code_pmd)
257
258 /*
259 * Isolate swapper_pgprot to its own cache line, since each cpu
260 * starting up will read it using VA-is-PA and local homing.
261 * This would otherwise likely conflict with other data on the cache
262 * line, once we have set its permanent home in the page tables.
263 */
264 __INITDATA
265 .align CHIP_L2_LINE_SIZE()
266ENTRY(swapper_pgprot)
267 .quad HV_PTE_PRESENT | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
268 .align CHIP_L2_LINE_SIZE()
269 END(swapper_pgprot)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index fffcfa6b3a62..72ade79b621b 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -851,14 +851,27 @@ STD_ENTRY(interrupt_return)
851 /* Check to see if there is any work to do before returning to user. */ 851 /* Check to see if there is any work to do before returning to user. */
852 { 852 {
853 addi r29, r32, THREAD_INFO_FLAGS_OFFSET 853 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
854 moveli r28, lo16(_TIF_ALLWORK_MASK) 854 moveli r1, lo16(_TIF_ALLWORK_MASK)
855 } 855 }
856 { 856 {
857 lw r29, r29 857 lw r29, r29
858 auli r28, r28, ha16(_TIF_ALLWORK_MASK) 858 auli r1, r1, ha16(_TIF_ALLWORK_MASK)
859 } 859 }
860 and r28, r29, r28 860 and r1, r29, r1
861 bnz r28, .Lwork_pending 861 bzt r1, .Lrestore_all
862
863 /*
864 * Make sure we have all the registers saved for signal
865 * handling or single-step. Call out to C code to figure out
866 * exactly what we need to do for each flag bit, then if
867 * necessary, reload the flags and recheck.
868 */
869 push_extra_callee_saves r0
870 {
871 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
872 jal do_work_pending
873 }
874 bnz r0, .Lresume_userspace
862 875
863 /* 876 /*
864 * In the NMI case we 877 * In the NMI case we
@@ -1099,99 +1112,6 @@ STD_ENTRY(interrupt_return)
1099 pop_reg r50 1112 pop_reg r50
1100 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) 1113 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
1101 j .Lcontinue_restore_regs 1114 j .Lcontinue_restore_regs
1102
1103.Lwork_pending:
1104 /* Mask the reschedule flag */
1105 andi r28, r29, _TIF_NEED_RESCHED
1106
1107 {
1108 /*
1109 * If the NEED_RESCHED flag is called, we call schedule(), which
1110 * may drop this context right here and go do something else.
1111 * On return, jump back to .Lresume_userspace and recheck.
1112 */
1113 bz r28, .Lasync_tlb
1114
1115 /* Mask the async-tlb flag */
1116 andi r28, r29, _TIF_ASYNC_TLB
1117 }
1118
1119 jal schedule
1120 FEEDBACK_REENTER(interrupt_return)
1121
1122 /* Reload the flags and check again */
1123 j .Lresume_userspace
1124
1125.Lasync_tlb:
1126 {
1127 bz r28, .Lneed_sigpending
1128
1129 /* Mask the sigpending flag */
1130 andi r28, r29, _TIF_SIGPENDING
1131 }
1132
1133 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1134 jal do_async_page_fault
1135 FEEDBACK_REENTER(interrupt_return)
1136
1137 /*
1138 * Go restart the "resume userspace" process. We may have
1139 * fired a signal, and we need to disable interrupts again.
1140 */
1141 j .Lresume_userspace
1142
1143.Lneed_sigpending:
1144 /*
1145 * At this point we are either doing signal handling or single-step,
1146 * so either way make sure we have all the registers saved.
1147 */
1148 push_extra_callee_saves r0
1149
1150 {
1151 /* If no signal pending, skip to singlestep check */
1152 bz r28, .Lneed_singlestep
1153
1154 /* Mask the singlestep flag */
1155 andi r28, r29, _TIF_SINGLESTEP
1156 }
1157
1158 jal do_signal
1159 FEEDBACK_REENTER(interrupt_return)
1160
1161 /* Reload the flags and check again */
1162 j .Lresume_userspace
1163
1164.Lneed_singlestep:
1165 {
1166 /* Get a pointer to the EX1 field */
1167 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
1168
1169 /* If we get here, our bit must be set. */
1170 bz r28, .Lwork_confusion
1171 }
1172 /* If we are in priv mode, don't single step */
1173 lw r28, r29
1174 andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
1175 bnz r28, .Lrestore_all
1176
1177 /* Allow interrupts within the single step code */
1178 TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */
1179 IRQ_ENABLE(r20, r21)
1180
1181 /* try to single-step the current instruction */
1182 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1183 jal single_step_once
1184 FEEDBACK_REENTER(interrupt_return)
1185
1186 /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */
1187 IRQ_DISABLE(r20,r21)
1188
1189 j .Lrestore_all
1190
1191.Lwork_confusion:
1192 move r0, r28
1193 panic "thread_info allwork flags unhandled on userspace resume: %#x"
1194
1195 STD_ENDPROC(interrupt_return) 1115 STD_ENDPROC(interrupt_return)
1196 1116
1197 /* 1117 /*
@@ -1550,7 +1470,10 @@ STD_ENTRY(_sys_clone)
1550 * We place it in the __HEAD section to ensure it is relatively 1470 * We place it in the __HEAD section to ensure it is relatively
1551 * near to the intvec_SWINT_1 code (reachable by a conditional branch). 1471 * near to the intvec_SWINT_1 code (reachable by a conditional branch).
1552 * 1472 *
1553 * Must match register usage in do_page_fault(). 1473 * Our use of ATOMIC_LOCK_REG here must match do_page_fault_ics().
1474 *
1475 * As we do in lib/atomic_asm_32.S, we bypass a store if the value we
1476 * would store is the same as the value we just loaded.
1554 */ 1477 */
1555 __HEAD 1478 __HEAD
1556 .align 64 1479 .align 64
@@ -1611,17 +1534,7 @@ ENTRY(sys_cmpxchg)
1611 { 1534 {
1612 shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT 1535 shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
1613 slt_u r23, r0, r23 1536 slt_u r23, r0, r23
1614 1537 lw r26, r0 /* see comment in the "#else" for the "lw r26". */
1615 /*
1616 * Ensure that the TLB is loaded before we take out the lock.
1617 * On TILEPro, this will start fetching the value all the way
1618 * into our L1 as well (and if it gets modified before we
1619 * grab the lock, it will be invalidated from our cache
1620 * before we reload it). On tile64, we'll start fetching it
1621 * into our L1 if we're the home, and if we're not, we'll
1622 * still at least start fetching it into the home's L2.
1623 */
1624 lw r26, r0
1625 } 1538 }
1626 { 1539 {
1627 s2a r21, r20, r21 1540 s2a r21, r20, r21
@@ -1637,18 +1550,9 @@ ENTRY(sys_cmpxchg)
1637 bbs r23, .Lcmpxchg64 1550 bbs r23, .Lcmpxchg64
1638 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ 1551 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1639 } 1552 }
1640
1641 { 1553 {
1642 /*
1643 * We very carefully align the code that actually runs with
1644 * the lock held (nine bundles) so that we know it is all in
1645 * the icache when we start. This instruction (the jump) is
1646 * at the start of the first cache line, address zero mod 64;
1647 * we jump to somewhere in the second cache line to issue the
1648 * tns, then jump back to finish up.
1649 */
1650 s2a ATOMIC_LOCK_REG_NAME, r25, r21 1554 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1651 j .Lcmpxchg32_tns 1555 j .Lcmpxchg32_tns /* see comment in the #else for the jump. */
1652 } 1556 }
1653 1557
1654#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 1558#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
@@ -1713,24 +1617,25 @@ ENTRY(sys_cmpxchg)
1713 { 1617 {
1714 /* 1618 /*
1715 * We very carefully align the code that actually runs with 1619 * We very carefully align the code that actually runs with
1716 * the lock held (nine bundles) so that we know it is all in 1620 * the lock held (twelve bundles) so that we know it is all in
1717 * the icache when we start. This instruction (the jump) is 1621 * the icache when we start. This instruction (the jump) is
1718 * at the start of the first cache line, address zero mod 64; 1622 * at the start of the first cache line, address zero mod 64;
1719 * we jump to somewhere in the second cache line to issue the 1623 * we jump to the very end of the second cache line to get that
1720 * tns, then jump back to finish up. 1624 * line loaded in the icache, then fall through to issue the tns
1625 * in the third cache line, at which point it's all cached.
1626 * Note that is for performance, not correctness.
1721 */ 1627 */
1722 j .Lcmpxchg32_tns 1628 j .Lcmpxchg32_tns
1723 } 1629 }
1724 1630
1725#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 1631#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1726 1632
1727 ENTRY(__sys_cmpxchg_grab_lock) 1633/* Symbol for do_page_fault_ics() to use to compare against the PC. */
1634.global __sys_cmpxchg_grab_lock
1635__sys_cmpxchg_grab_lock:
1728 1636
1729 /* 1637 /*
1730 * Perform the actual cmpxchg or atomic_update. 1638 * Perform the actual cmpxchg or atomic_update.
1731 * Note that the system <arch/atomic.h> header relies on
1732 * atomic_update() to always perform an "mf", so don't make
1733 * it optional or conditional without modifying that code.
1734 */ 1639 */
1735.Ldo_cmpxchg32: 1640.Ldo_cmpxchg32:
1736 { 1641 {
@@ -1748,10 +1653,13 @@ ENTRY(sys_cmpxchg)
1748 } 1653 }
1749 { 1654 {
1750 mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */ 1655 mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
1751 bbns r22, .Lcmpxchg32_mismatch 1656 bbns r22, .Lcmpxchg32_nostore
1752 } 1657 }
1658 seq r22, r24, r21 /* Are we storing the value we loaded? */
1659 bbs r22, .Lcmpxchg32_nostore
1753 sw r0, r24 1660 sw r0, r24
1754 1661
1662 /* The following instruction is the start of the second cache line. */
1755 /* Do slow mtspr here so the following "mf" waits less. */ 1663 /* Do slow mtspr here so the following "mf" waits less. */
1756 { 1664 {
1757 move sp, r27 1665 move sp, r27
@@ -1759,7 +1667,6 @@ ENTRY(sys_cmpxchg)
1759 } 1667 }
1760 mf 1668 mf
1761 1669
1762 /* The following instruction is the start of the second cache line. */
1763 { 1670 {
1764 move r0, r21 1671 move r0, r21
1765 sw ATOMIC_LOCK_REG_NAME, zero 1672 sw ATOMIC_LOCK_REG_NAME, zero
@@ -1767,7 +1674,7 @@ ENTRY(sys_cmpxchg)
1767 iret 1674 iret
1768 1675
1769 /* Duplicated code here in the case where we don't overlap "mf" */ 1676 /* Duplicated code here in the case where we don't overlap "mf" */
1770.Lcmpxchg32_mismatch: 1677.Lcmpxchg32_nostore:
1771 { 1678 {
1772 move r0, r21 1679 move r0, r21
1773 sw ATOMIC_LOCK_REG_NAME, zero 1680 sw ATOMIC_LOCK_REG_NAME, zero
@@ -1783,8 +1690,6 @@ ENTRY(sys_cmpxchg)
1783 * and for 64-bit cmpxchg. We provide it as a macro and put 1690 * and for 64-bit cmpxchg. We provide it as a macro and put
1784 * it into both versions. We can't share the code literally 1691 * it into both versions. We can't share the code literally
1785 * since it depends on having the right branch-back address. 1692 * since it depends on having the right branch-back address.
1786 * Note that the first few instructions should share the cache
1787 * line with the second half of the actual locked code.
1788 */ 1693 */
1789 .macro cmpxchg_lock, bitwidth 1694 .macro cmpxchg_lock, bitwidth
1790 1695
@@ -1810,7 +1715,7 @@ ENTRY(sys_cmpxchg)
1810 } 1715 }
1811 /* 1716 /*
1812 * The preceding instruction is the last thing that must be 1717 * The preceding instruction is the last thing that must be
1813 * on the second cache line. 1718 * hot in the icache before we do the "tns" above.
1814 */ 1719 */
1815 1720
1816#ifdef CONFIG_SMP 1721#ifdef CONFIG_SMP
@@ -1841,6 +1746,12 @@ ENTRY(sys_cmpxchg)
1841 .endm 1746 .endm
1842 1747
1843.Lcmpxchg32_tns: 1748.Lcmpxchg32_tns:
1749 /*
1750 * This is the last instruction on the second cache line.
1751 * The nop here loads the second line, then we fall through
1752 * to the tns to load the third line before we take the lock.
1753 */
1754 nop
1844 cmpxchg_lock 32 1755 cmpxchg_lock 32
1845 1756
1846 /* 1757 /*
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
new file mode 100644
index 000000000000..79c93e10ba27
--- /dev/null
+++ b/arch/tile/kernel/intvec_64.S
@@ -0,0 +1,1231 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Linux interrupt vectors.
15 */
16
17#include <linux/linkage.h>
18#include <linux/errno.h>
19#include <linux/unistd.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/irqflags.h>
23#include <asm/asm-offsets.h>
24#include <asm/types.h>
25#include <hv/hypervisor.h>
26#include <arch/abi.h>
27#include <arch/interrupts.h>
28#include <arch/spr_def.h>
29
30#ifdef CONFIG_PREEMPT
31# error "No support for kernel preemption currently"
32#endif
33
34#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
35
36#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
37
38
39 .macro push_reg reg, ptr=sp, delta=-8
40 {
41 st \ptr, \reg
42 addli \ptr, \ptr, \delta
43 }
44 .endm
45
46 .macro pop_reg reg, ptr=sp, delta=8
47 {
48 ld \reg, \ptr
49 addli \ptr, \ptr, \delta
50 }
51 .endm
52
53 .macro pop_reg_zero reg, zreg, ptr=sp, delta=8
54 {
55 move \zreg, zero
56 ld \reg, \ptr
57 addi \ptr, \ptr, \delta
58 }
59 .endm
60
61 .macro push_extra_callee_saves reg
62 PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
63 push_reg r51, \reg
64 push_reg r50, \reg
65 push_reg r49, \reg
66 push_reg r48, \reg
67 push_reg r47, \reg
68 push_reg r46, \reg
69 push_reg r45, \reg
70 push_reg r44, \reg
71 push_reg r43, \reg
72 push_reg r42, \reg
73 push_reg r41, \reg
74 push_reg r40, \reg
75 push_reg r39, \reg
76 push_reg r38, \reg
77 push_reg r37, \reg
78 push_reg r36, \reg
79 push_reg r35, \reg
80 push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
81 .endm
82
83 .macro panic str
84 .pushsection .rodata, "a"
851:
86 .asciz "\str"
87 .popsection
88 {
89 moveli r0, hw2_last(1b)
90 }
91 {
92 shl16insli r0, r0, hw1(1b)
93 }
94 {
95 shl16insli r0, r0, hw0(1b)
96 jal panic
97 }
98 .endm
99
100
101#ifdef __COLLECT_LINKER_FEEDBACK__
102 .pushsection .text.intvec_feedback,"ax"
103intvec_feedback:
104 .popsection
105#endif
106
107 /*
108 * Default interrupt handler.
109 *
110 * vecnum is where we'll put this code.
111 * c_routine is the C routine we'll call.
112 *
113 * The C routine is passed two arguments:
114 * - A pointer to the pt_regs state.
115 * - The interrupt vector number.
116 *
117 * The "processing" argument specifies the code for processing
118 * the interrupt. Defaults to "handle_interrupt".
119 */
120 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
121 .org (\vecnum << 8)
122intvec_\vecname:
123 /* Temporarily save a register so we have somewhere to work. */
124
125 mtspr SPR_SYSTEM_SAVE_K_1, r0
126 mfspr r0, SPR_EX_CONTEXT_K_1
127
128 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
129
130 .ifc \vecnum, INT_DOUBLE_FAULT
131 /*
132 * For double-faults from user-space, fall through to the normal
133 * register save and stack setup path. Otherwise, it's the
134 * hypervisor giving us one last chance to dump diagnostics, and we
135 * branch to the kernel_double_fault routine to do so.
136 */
137 beqz r0, 1f
138 j _kernel_double_fault
1391:
140 .else
141 /*
142 * If we're coming from user-space, then set sp to the top of
143 * the kernel stack. Otherwise, assume sp is already valid.
144 */
145 {
146 bnez r0, 0f
147 move r0, sp
148 }
149 .endif
150
151 .ifc \c_routine, do_page_fault
152 /*
153 * The page_fault handler may be downcalled directly by the
154 * hypervisor even when Linux is running and has ICS set.
155 *
156 * In this case the contents of EX_CONTEXT_K_1 reflect the
157 * previous fault and can't be relied on to choose whether or
158 * not to reinitialize the stack pointer. So we add a test
159 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
160 * and if so we don't reinitialize sp, since we must be coming
161 * from Linux. (In fact the precise case is !(val & ~1),
162 * but any Linux PC has to have the high bit set.)
163 *
164 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
165 * any path that turns into a downcall to one of our TLB handlers.
166 *
167 * FIXME: if we end up never using this path, perhaps we should
168 * prevent the hypervisor from generating downcalls in this case.
169 * The advantage of getting a downcall is we can panic in Linux.
170 */
171 mfspr r0, SPR_SYSTEM_SAVE_K_2
172 {
173 bltz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
174 move r0, sp
175 }
176 .endif
177
178
179 /*
180 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
181 * the current stack top in the higher bits. So we recover
182 * our stack top by just masking off the low bits, then
183 * point sp at the top aligned address on the actual stack page.
184 */
185 mfspr r0, SPR_SYSTEM_SAVE_K_0
186 mm r0, zero, LOG2_THREAD_SIZE, 63
187
1880:
189 /*
190 * Align the stack mod 64 so we can properly predict what
191 * cache lines we need to write-hint to reduce memory fetch
192 * latency as we enter the kernel. The layout of memory is
193 * as follows, with cache line 0 at the lowest VA, and cache
194 * line 8 just below the r0 value this "andi" computes.
195 * Note that we never write to cache line 8, and we skip
196 * cache lines 1-3 for syscalls.
197 *
198 * cache line 8: ptregs padding (two words)
199 * cache line 7: sp, lr, pc, ex1, faultnum, orig_r0, flags, cmpexch
200 * cache line 6: r46...r53 (tp)
201 * cache line 5: r38...r45
202 * cache line 4: r30...r37
203 * cache line 3: r22...r29
204 * cache line 2: r14...r21
205 * cache line 1: r6...r13
206 * cache line 0: 2 x frame, r0..r5
207 */
208 andi r0, r0, -64
209
210 /*
211 * Push the first four registers on the stack, so that we can set
212 * them to vector-unique values before we jump to the common code.
213 *
214 * Registers are pushed on the stack as a struct pt_regs,
215 * with the sp initially just above the struct, and when we're
216 * done, sp points to the base of the struct, minus
217 * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
218 *
219 * This routine saves just the first four registers, plus the
220 * stack context so we can do proper backtracing right away,
221 * and defers to handle_interrupt to save the rest.
222 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
223 */
224 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
225 wh64 r0 /* cache line 7 */
226 {
227 st r0, lr
228 addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
229 }
230 {
231 st r0, sp
232 addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
233 }
234 wh64 sp /* cache line 6 */
235 {
236 st sp, r52
237 addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
238 }
239 wh64 sp /* cache line 0 */
240 {
241 st sp, r1
242 addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
243 }
244 {
245 st sp, r2
246 addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
247 }
248 {
249 st sp, r3
250 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
251 }
252 mfspr r0, SPR_EX_CONTEXT_K_0
253 .ifc \processing,handle_syscall
254 /*
255 * Bump the saved PC by one bundle so that when we return, we won't
256 * execute the same swint instruction again. We need to do this while
257 * we're in the critical section.
258 */
259 addi r0, r0, 8
260 .endif
261 {
262 st sp, r0
263 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
264 }
265 mfspr r0, SPR_EX_CONTEXT_K_1
266 {
267 st sp, r0
268 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
269 /*
270 * Use r0 for syscalls so it's a temporary; use r1 for interrupts
271 * so that it gets passed through unchanged to the handler routine.
272 * Note that the .if conditional confusingly spans bundles.
273 */
274 .ifc \processing,handle_syscall
275 movei r0, \vecnum
276 }
277 {
278 st sp, r0
279 .else
280 movei r1, \vecnum
281 }
282 {
283 st sp, r1
284 .endif
285 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
286 }
287 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
288 {
289 st sp, r0
290 addi sp, sp, -PTREGS_OFFSET_REG(0) - 8
291 }
292 {
293 st sp, zero /* write zero into "Next SP" frame pointer */
294 addi sp, sp, -8 /* leave SP pointing at bottom of frame */
295 }
296 .ifc \processing,handle_syscall
297 j handle_syscall
298 .else
299 /* Capture per-interrupt SPR context to registers. */
300 .ifc \c_routine, do_page_fault
301 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
302 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
303 .else
304 .ifc \vecnum, INT_ILL_TRANS
305 mfspr r2, ILL_TRANS_REASON
306 .else
307 .ifc \vecnum, INT_DOUBLE_FAULT
308 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
309 .else
310 .ifc \c_routine, do_trap
311 mfspr r2, GPV_REASON
312 .else
313 .ifc \c_routine, op_handle_perf_interrupt
314 mfspr r2, PERF_COUNT_STS
315#if CHIP_HAS_AUX_PERF_COUNTERS()
316 .else
317 .ifc \c_routine, op_handle_aux_perf_interrupt
318 mfspr r2, AUX_PERF_COUNT_STS
319 .endif
320#endif
321 .endif
322 .endif
323 .endif
324 .endif
325 .endif
326 /* Put function pointer in r0 */
327 moveli r0, hw2_last(\c_routine)
328 shl16insli r0, r0, hw1(\c_routine)
329 {
330 shl16insli r0, r0, hw0(\c_routine)
331 j \processing
332 }
333 .endif
334 ENDPROC(intvec_\vecname)
335
336#ifdef __COLLECT_LINKER_FEEDBACK__
337 .pushsection .text.intvec_feedback,"ax"
338 .org (\vecnum << 5)
339 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
340 jrp lr
341 .popsection
342#endif
343
344 .endm
345
346
347 /*
348 * Save the rest of the registers that we didn't save in the actual
349 * vector itself. We can't use r0-r10 inclusive here.
350 */
351 .macro finish_interrupt_save, function
352
353 /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
354 PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
355 {
356 .ifc \function,handle_syscall
357 st r52, r0
358 .else
359 st r52, zero
360 .endif
361 PTREGS_PTR(r52, PTREGS_OFFSET_TP)
362 }
363 st r52, tp
364 {
365 mfspr tp, CMPEXCH_VALUE
366 PTREGS_PTR(r52, PTREGS_OFFSET_CMPEXCH)
367 }
368
369 /*
370 * For ordinary syscalls, we save neither caller- nor callee-
371 * save registers, since the syscall invoker doesn't expect the
372 * caller-saves to be saved, and the called kernel functions will
373 * take care of saving the callee-saves for us.
374 *
375 * For interrupts we save just the caller-save registers. Saving
376 * them is required (since the "caller" can't save them). Again,
377 * the called kernel functions will restore the callee-save
378 * registers for us appropriately.
379 *
380 * On return, we normally restore nothing special for syscalls,
381 * and just the caller-save registers for interrupts.
382 *
383 * However, there are some important caveats to all this:
384 *
385 * - We always save a few callee-save registers to give us
386 * some scratchpad registers to carry across function calls.
387 *
388 * - fork/vfork/etc require us to save all the callee-save
389 * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
390 *
391 * - We always save r0..r5 and r10 for syscalls, since we need
392 * to reload them a bit later for the actual kernel call, and
393 * since we might need them for -ERESTARTNOINTR, etc.
394 *
395 * - Before invoking a signal handler, we save the unsaved
396 * callee-save registers so they are visible to the
397 * signal handler or any ptracer.
398 *
399 * - If the unsaved callee-save registers are modified, we set
400 * a bit in pt_regs so we know to reload them from pt_regs
401 * and not just rely on the kernel function unwinding.
402 * (Done for ptrace register writes and SA_SIGINFO handler.)
403 */
404 {
405 st r52, tp
406 PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
407 }
408 wh64 r52 /* cache line 4 */
409 push_reg r33, r52
410 push_reg r32, r52
411 push_reg r31, r52
412 .ifc \function,handle_syscall
413 push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
414 push_reg TREG_SYSCALL_NR_NAME, r52, \
415 PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
416 .else
417
418 push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
419 wh64 r52 /* cache line 3 */
420 push_reg r29, r52
421 push_reg r28, r52
422 push_reg r27, r52
423 push_reg r26, r52
424 push_reg r25, r52
425 push_reg r24, r52
426 push_reg r23, r52
427 push_reg r22, r52
428 wh64 r52 /* cache line 2 */
429 push_reg r21, r52
430 push_reg r20, r52
431 push_reg r19, r52
432 push_reg r18, r52
433 push_reg r17, r52
434 push_reg r16, r52
435 push_reg r15, r52
436 push_reg r14, r52
437 wh64 r52 /* cache line 1 */
438 push_reg r13, r52
439 push_reg r12, r52
440 push_reg r11, r52
441 push_reg r10, r52
442 push_reg r9, r52
443 push_reg r8, r52
444 push_reg r7, r52
445 push_reg r6, r52
446
447 .endif
448
449 push_reg r5, r52
450 st r52, r4
451
452 /* Load tp with our per-cpu offset. */
453#ifdef CONFIG_SMP
454 {
455 mfspr r20, SPR_SYSTEM_SAVE_K_0
456 moveli r21, hw2_last(__per_cpu_offset)
457 }
458 {
459 shl16insli r21, r21, hw1(__per_cpu_offset)
460 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
461 }
462 shl16insli r21, r21, hw0(__per_cpu_offset)
463 shl3add r20, r20, r21
464 ld tp, r20
465#else
466 move tp, zero
467#endif
468
469 /*
470 * If we will be returning to the kernel, we will need to
471 * reset the interrupt masks to the state they had before.
472 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
473 */
474 mfspr r32, SPR_EX_CONTEXT_K_1
475 {
476 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
477 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
478 }
479 beqzt r32, 1f /* zero if from user space */
480 IRQS_DISABLED(r32) /* zero if irqs enabled */
481#if PT_FLAGS_DISABLE_IRQ != 1
482# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
483#endif
4841:
485 .ifnc \function,handle_syscall
486 /* Record the fact that we saved the caller-save registers above. */
487 ori r32, r32, PT_FLAGS_CALLER_SAVES
488 .endif
489 st r21, r32
490
491#ifdef __COLLECT_LINKER_FEEDBACK__
492 /*
493 * Notify the feedback routines that we were in the
494 * appropriate fixed interrupt vector area. Note that we
495 * still have ICS set at this point, so we can't invoke any
496 * atomic operations or we will panic. The feedback
497 * routines internally preserve r0..r10 and r30 up.
498 */
499 .ifnc \function,handle_syscall
500 shli r20, r1, 5
501 .else
502 moveli r20, INT_SWINT_1 << 5
503 .endif
504 moveli r21, hw2_last(intvec_feedback)
505 shl16insli r21, r21, hw1(intvec_feedback)
506 shl16insli r21, r21, hw0(intvec_feedback)
507 add r20, r20, r21
508 jalr r20
509
510 /* And now notify the feedback routines that we are here. */
511 FEEDBACK_ENTER(\function)
512#endif
513
514 /*
515 * we've captured enough state to the stack (including in
516 * particular our EX_CONTEXT state) that we can now release
517 * the interrupt critical section and replace it with our
518 * standard "interrupts disabled" mask value. This allows
519 * synchronous interrupts (and profile interrupts) to punch
520 * through from this point onwards.
521 */
522 .ifc \function,handle_nmi
523 IRQ_DISABLE_ALL(r20)
524 .else
525 IRQ_DISABLE(r20, r21)
526 .endif
527 mtspr INTERRUPT_CRITICAL_SECTION, zero
528
529 /*
530 * Prepare the first 256 stack bytes to be rapidly accessible
531 * without having to fetch the background data.
532 */
533 addi r52, sp, -64
534 {
535 wh64 r52
536 addi r52, r52, -64
537 }
538 {
539 wh64 r52
540 addi r52, r52, -64
541 }
542 {
543 wh64 r52
544 addi r52, r52, -64
545 }
546 wh64 r52
547
548#ifdef CONFIG_TRACE_IRQFLAGS
549 .ifnc \function,handle_nmi
550 /*
551 * We finally have enough state set up to notify the irq
552 * tracing code that irqs were disabled on entry to the handler.
553 * The TRACE_IRQS_OFF call clobbers registers r0-r29.
554 * For syscalls, we already have the register state saved away
555 * on the stack, so we don't bother to do any register saves here,
556 * and later we pop the registers back off the kernel stack.
557 * For interrupt handlers, save r0-r3 in callee-saved registers.
558 */
559 .ifnc \function,handle_syscall
560 { move r30, r0; move r31, r1 }
561 { move r32, r2; move r33, r3 }
562 .endif
563 TRACE_IRQS_OFF
564 .ifnc \function,handle_syscall
565 { move r0, r30; move r1, r31 }
566 { move r2, r32; move r3, r33 }
567 .endif
568 .endif
569#endif
570
571 .endm
572
573 /*
574 * Redispatch a downcall.
575 */
576 .macro dc_dispatch vecnum, vecname
577 .org (\vecnum << 8)
578intvec_\vecname:
579 j hv_downcall_dispatch
580 ENDPROC(intvec_\vecname)
581 .endm
582
583 /*
584 * Common code for most interrupts. The C function we're eventually
585 * going to is in r0, and the faultnum is in r1; the original
586 * values for those registers are on the stack.
587 */
588 .pushsection .text.handle_interrupt,"ax"
589handle_interrupt:
590 finish_interrupt_save handle_interrupt
591
592 /* Jump to the C routine; it should enable irqs as soon as possible. */
593 {
594 jalr r0
595 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
596 }
597 FEEDBACK_REENTER(handle_interrupt)
598 {
599 movei r30, 0 /* not an NMI */
600 j interrupt_return
601 }
602 STD_ENDPROC(handle_interrupt)
603
604/*
605 * This routine takes a boolean in r30 indicating if this is an NMI.
606 * If so, we also expect a boolean in r31 indicating whether to
607 * re-enable the oprofile interrupts.
608 */
609STD_ENTRY(interrupt_return)
610 /* If we're resuming to kernel space, don't check thread flags. */
611 {
612 bnez r30, .Lrestore_all /* NMIs don't special-case user-space */
613 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
614 }
615 ld r29, r29
616 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
617 {
618 beqzt r29, .Lresume_userspace
619 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
620 }
621
622 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
623 moveli r27, hw2_last(_cpu_idle_nap)
624 {
625 ld r28, r29
626 shl16insli r27, r27, hw1(_cpu_idle_nap)
627 }
628 {
629 shl16insli r27, r27, hw0(_cpu_idle_nap)
630 }
631 {
632 cmpeq r27, r27, r28
633 }
634 {
635 blbc r27, .Lrestore_all
636 addi r28, r28, 8
637 }
638 st r29, r28
639 j .Lrestore_all
640
641.Lresume_userspace:
642 FEEDBACK_REENTER(interrupt_return)
643
644 /*
645 * Disable interrupts so as to make sure we don't
646 * miss an interrupt that sets any of the thread flags (like
647 * need_resched or sigpending) between sampling and the iret.
648 * Routines like schedule() or do_signal() may re-enable
649 * interrupts before returning.
650 */
651 IRQ_DISABLE(r20, r21)
652 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
653
654 /* Get base of stack in r32; note r30/31 are used as arguments here. */
655 GET_THREAD_INFO(r32)
656
657
658 /* Check to see if there is any work to do before returning to user. */
659 {
660 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
661 moveli r1, hw1_last(_TIF_ALLWORK_MASK)
662 }
663 {
664 ld r29, r29
665 shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
666 }
667 and r1, r29, r1
668 beqzt r1, .Lrestore_all
669
670 /*
671 * Make sure we have all the registers saved for signal
672 * handling or single-step. Call out to C code to figure out
673 * exactly what we need to do for each flag bit, then if
674 * necessary, reload the flags and recheck.
675 */
676 push_extra_callee_saves r0
677 {
678 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
679 jal do_work_pending
680 }
681 bnez r0, .Lresume_userspace
682
683 /*
684 * In the NMI case we
685 * omit the call to single_process_check_nohz, which normally checks
686 * to see if we should start or stop the scheduler tick, because
687 * we can't call arbitrary Linux code from an NMI context.
688 * We always call the homecache TLB deferral code to re-trigger
689 * the deferral mechanism.
690 *
691 * The other chunk of responsibility this code has is to reset the
692 * interrupt masks appropriately to reset irqs and NMIs. We have
693 * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
694 * lockdep-type stuff, but we can't set ICS until afterwards, since
695 * ICS can only be used in very tight chunks of code to avoid
696 * tripping over various assertions that it is off.
697 */
698.Lrestore_all:
699 PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
700 {
701 ld r0, r0
702 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
703 }
704 {
705 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
706 ld r32, r32
707 }
708 bnez r0, 1f
709 j 2f
710#if PT_FLAGS_DISABLE_IRQ != 1
711# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use blbct below
712#endif
7131: blbct r32, 2f
714 IRQ_DISABLE(r20,r21)
715 TRACE_IRQS_OFF
716 movei r0, 1
717 mtspr INTERRUPT_CRITICAL_SECTION, r0
718 beqzt r30, .Lrestore_regs
719 j 3f
7202: TRACE_IRQS_ON
721 movei r0, 1
722 mtspr INTERRUPT_CRITICAL_SECTION, r0
723 IRQ_ENABLE(r20, r21)
724 beqzt r30, .Lrestore_regs
7253:
726
727
728 /*
729 * We now commit to returning from this interrupt, since we will be
730 * doing things like setting EX_CONTEXT SPRs and unwinding the stack
731 * frame. No calls should be made to any other code after this point.
732 * This code should only be entered with ICS set.
733 * r32 must still be set to ptregs.flags.
734 * We launch loads to each cache line separately first, so we can
735 * get some parallelism out of the memory subsystem.
736 * We start zeroing caller-saved registers throughout, since
737 * that will save some cycles if this turns out to be a syscall.
738 */
739.Lrestore_regs:
740 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
741
742 /*
743 * Rotate so we have one high bit and one low bit to test.
744 * - low bit says whether to restore all the callee-saved registers,
745 * or just r30-r33, and r52 up.
746 * - high bit (i.e. sign bit) says whether to restore all the
747 * caller-saved registers, or just r0.
748 */
749#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
750# error Rotate trick does not work :-)
751#endif
752 {
753 rotli r20, r32, 62
754 PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
755 }
756
757 /*
758 * Load cache lines 0, 4, 6 and 7, in that order, then use
759 * the last loaded value, which makes it likely that the other
760 * cache lines have also loaded, at which point we should be
761 * able to safely read all the remaining words on those cache
762 * lines without waiting for the memory subsystem.
763 */
764 pop_reg r0, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
765 pop_reg r30, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_REG(30)
766 pop_reg_zero r52, r3, sp, PTREGS_OFFSET_CMPEXCH - PTREGS_OFFSET_REG(52)
767 pop_reg_zero r21, r27, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_CMPEXCH
768 pop_reg_zero lr, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_EX1
769 {
770 mtspr CMPEXCH_VALUE, r21
771 move r4, zero
772 }
773 pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
774 {
775 mtspr SPR_EX_CONTEXT_K_1, lr
776 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
777 }
778 {
779 mtspr SPR_EX_CONTEXT_K_0, r21
780 move r5, zero
781 }
782
783 /* Restore callee-saveds that we actually use. */
784 pop_reg_zero r31, r6
785 pop_reg_zero r32, r7
786 pop_reg_zero r33, r8, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
787
788 /*
789 * If we modified other callee-saveds, restore them now.
790 * This is rare, but could be via ptrace or signal handler.
791 */
792 {
793 move r9, zero
794 blbs r20, .Lrestore_callees
795 }
796.Lcontinue_restore_regs:
797
798 /* Check if we're returning from a syscall. */
799 {
800 move r10, zero
801 bltzt r20, 1f /* no, so go restore callee-save registers */
802 }
803
804 /*
805 * Check if we're returning to userspace.
806 * Note that if we're not, we don't worry about zeroing everything.
807 */
808 {
809 addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
810 bnez lr, .Lkernel_return
811 }
812
813 /*
814 * On return from syscall, we've restored r0 from pt_regs, but we
815 * clear the remainder of the caller-saved registers. We could
816 * restore the syscall arguments, but there's not much point,
817 * and it ensures user programs aren't trying to use the
818 * caller-saves if we clear them, as well as avoiding leaking
819 * kernel pointers into userspace.
820 */
821 pop_reg_zero lr, r11, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
822 pop_reg_zero tp, r12, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
823 {
824 ld sp, sp
825 move r13, zero
826 move r14, zero
827 }
828 { move r15, zero; move r16, zero }
829 { move r17, zero; move r18, zero }
830 { move r19, zero; move r20, zero }
831 { move r21, zero; move r22, zero }
832 { move r23, zero; move r24, zero }
833 { move r25, zero; move r26, zero }
834
835 /* Set r1 to errno if we are returning an error, otherwise zero. */
836 {
837 moveli r29, 4096
838 sub r1, zero, r0
839 }
840 {
841 move r28, zero
842 cmpltu r29, r1, r29
843 }
844 {
845 mnz r1, r29, r1
846 move r29, zero
847 }
848 iret
849
850 /*
851 * Not a syscall, so restore caller-saved registers.
852 * First kick off loads for cache lines 1-3, which we're touching
853 * for the first time here.
854 */
855 .align 64
8561: pop_reg r29, sp, PTREGS_OFFSET_REG(21) - PTREGS_OFFSET_REG(29)
857 pop_reg r21, sp, PTREGS_OFFSET_REG(13) - PTREGS_OFFSET_REG(21)
858 pop_reg r13, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(13)
859 pop_reg r1
860 pop_reg r2
861 pop_reg r3
862 pop_reg r4
863 pop_reg r5
864 pop_reg r6
865 pop_reg r7
866 pop_reg r8
867 pop_reg r9
868 pop_reg r10
869 pop_reg r11
870 pop_reg r12, sp, 16
871 /* r13 already restored above */
872 pop_reg r14
873 pop_reg r15
874 pop_reg r16
875 pop_reg r17
876 pop_reg r18
877 pop_reg r19
878 pop_reg r20, sp, 16
879 /* r21 already restored above */
880 pop_reg r22
881 pop_reg r23
882 pop_reg r24
883 pop_reg r25
884 pop_reg r26
885 pop_reg r27
886 pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
887 /* r29 already restored above */
888 bnez lr, .Lkernel_return
889 pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
890 pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
891 ld sp, sp
892 iret
893
894 /*
895 * We can't restore tp when in kernel mode, since a thread might
896 * have migrated from another cpu and brought a stale tp value.
897 */
898.Lkernel_return:
899 pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
900 ld sp, sp
901 iret
902
903 /* Restore callee-saved registers from r34 to r51. */
904.Lrestore_callees:
905 addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
906 pop_reg r34
907 pop_reg r35
908 pop_reg r36
909 pop_reg r37
910 pop_reg r38
911 pop_reg r39
912 pop_reg r40
913 pop_reg r41
914 pop_reg r42
915 pop_reg r43
916 pop_reg r44
917 pop_reg r45
918 pop_reg r46
919 pop_reg r47
920 pop_reg r48
921 pop_reg r49
922 pop_reg r50
923 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
924 j .Lcontinue_restore_regs
925 STD_ENDPROC(interrupt_return)
926
927 /*
928 * "NMI" interrupts mask ALL interrupts before calling the
929 * handler, and don't check thread flags, etc., on the way
930 * back out. In general, the only things we do here for NMIs
931 * are register save/restore and dataplane kernel-TLB management.
932 * We don't (for example) deal with start/stop of the sched tick.
933 */
934 .pushsection .text.handle_nmi,"ax"
935handle_nmi:
936 finish_interrupt_save handle_nmi
937 {
938 jalr r0
939 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
940 }
941 FEEDBACK_REENTER(handle_nmi)
942 {
943 movei r30, 1
944 move r31, r0
945 }
946 j interrupt_return
947 STD_ENDPROC(handle_nmi)
948
949 /*
950 * Parallel code for syscalls to handle_interrupt.
951 */
952 .pushsection .text.handle_syscall,"ax"
953handle_syscall:
954 finish_interrupt_save handle_syscall
955
956 /* Enable irqs. */
957 TRACE_IRQS_ON
958 IRQ_ENABLE(r20, r21)
959
960 /* Bump the counter for syscalls made on this tile. */
961 moveli r20, hw2_last(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
962 shl16insli r20, r20, hw1(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
963 shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
964 add r20, r20, tp
965 ld4s r21, r20
966 addi r21, r21, 1
967 st4 r20, r21
968
969 /* Trace syscalls, if requested. */
970 GET_THREAD_INFO(r31)
971 addi r31, r31, THREAD_INFO_FLAGS_OFFSET
972 ld r30, r31
973 andi r30, r30, _TIF_SYSCALL_TRACE
974 {
975 addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
976 beqzt r30, .Lrestore_syscall_regs
977 }
978 jal do_syscall_trace
979 FEEDBACK_REENTER(handle_syscall)
980
981 /*
982 * We always reload our registers from the stack at this
983 * point. They might be valid, if we didn't build with
984 * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
985 * doing syscall tracing, but there are enough cases now that it
986 * seems simplest just to do the reload unconditionally.
987 */
988.Lrestore_syscall_regs:
989 {
990 ld r30, r30
991 PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
992 }
993 pop_reg r0, r11
994 pop_reg r1, r11
995 pop_reg r2, r11
996 pop_reg r3, r11
997 pop_reg r4, r11
998 pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
999 {
1000 ld TREG_SYSCALL_NR_NAME, r11
1001 moveli r21, __NR_syscalls
1002 }
1003
1004 /* Ensure that the syscall number is within the legal range. */
1005 {
1006 moveli r20, hw2(sys_call_table)
1007 blbs r30, .Lcompat_syscall
1008 }
1009 {
1010 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1011 shl16insli r20, r20, hw1(sys_call_table)
1012 }
1013 {
1014 blbc r21, .Linvalid_syscall
1015 shl16insli r20, r20, hw0(sys_call_table)
1016 }
1017.Lload_syscall_pointer:
1018 shl3add r20, TREG_SYSCALL_NR_NAME, r20
1019 ld r20, r20
1020
1021 /* Jump to syscall handler. */
1022 jalr r20
1023.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1024
1025 /*
1026 * Write our r0 onto the stack so it gets restored instead
1027 * of whatever the user had there before.
1028 * In compat mode, sign-extend r0 before storing it.
1029 */
1030 {
1031 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1032 blbct r30, 1f
1033 }
1034 addxi r0, r0, 0
10351: st r29, r0
1036
1037.Lsyscall_sigreturn_skip:
1038 FEEDBACK_REENTER(handle_syscall)
1039
1040 /* Do syscall trace again, if requested. */
1041 ld r30, r31
1042 andi r30, r30, _TIF_SYSCALL_TRACE
1043 beqzt r30, 1f
1044 jal do_syscall_trace
1045 FEEDBACK_REENTER(handle_syscall)
10461: j .Lresume_userspace /* jump into middle of interrupt_return */
1047
1048.Lcompat_syscall:
1049 /*
1050 * Load the base of the compat syscall table in r20, and
1051 * range-check the syscall number (duplicated from 64-bit path).
1052 * Sign-extend all the user's passed arguments to make them consistent.
1053 * Also save the original "r(n)" values away in "r(11+n)" in
1054 * case the syscall table entry wants to validate them.
1055 */
1056 moveli r20, hw2(compat_sys_call_table)
1057 {
1058 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1059 shl16insli r20, r20, hw1(compat_sys_call_table)
1060 }
1061 {
1062 blbc r21, .Linvalid_syscall
1063 shl16insli r20, r20, hw0(compat_sys_call_table)
1064 }
1065 { move r11, r0; addxi r0, r0, 0 }
1066 { move r12, r1; addxi r1, r1, 0 }
1067 { move r13, r2; addxi r2, r2, 0 }
1068 { move r14, r3; addxi r3, r3, 0 }
1069 { move r15, r4; addxi r4, r4, 0 }
1070 { move r16, r5; addxi r5, r5, 0 }
1071 j .Lload_syscall_pointer
1072
1073.Linvalid_syscall:
1074 /* Report an invalid syscall back to the user program */
1075 {
1076 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1077 movei r28, -ENOSYS
1078 }
1079 st r29, r28
1080 j .Lresume_userspace /* jump into middle of interrupt_return */
1081 STD_ENDPROC(handle_syscall)
1082
1083 /* Return the address for oprofile to suppress in backtraces. */
1084STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1085 lnk r0
1086 {
1087 addli r0, r0, .Lhandle_syscall_link - .
1088 jrp lr
1089 }
1090 STD_ENDPROC(handle_syscall_link_address)
1091
1092STD_ENTRY(ret_from_fork)
1093 jal sim_notify_fork
1094 jal schedule_tail
1095 FEEDBACK_REENTER(ret_from_fork)
1096 j .Lresume_userspace
1097 STD_ENDPROC(ret_from_fork)
1098
1099/* Various stub interrupt handlers and syscall handlers */
1100
1101STD_ENTRY_LOCAL(_kernel_double_fault)
1102 mfspr r1, SPR_EX_CONTEXT_K_0
1103 move r2, lr
1104 move r3, sp
1105 move r4, r52
1106 addi sp, sp, -C_ABI_SAVE_AREA_SIZE
1107 j kernel_double_fault
1108 STD_ENDPROC(_kernel_double_fault)
1109
1110STD_ENTRY_LOCAL(bad_intr)
1111 mfspr r2, SPR_EX_CONTEXT_K_0
1112 panic "Unhandled interrupt %#x: PC %#lx"
1113 STD_ENDPROC(bad_intr)
1114
1115/* Put address of pt_regs in reg and jump. */
1116#define PTREGS_SYSCALL(x, reg) \
1117 STD_ENTRY(_##x); \
1118 { \
1119 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1120 j x \
1121 }; \
1122 STD_ENDPROC(_##x)
1123
1124/*
1125 * Special-case sigreturn to not write r0 to the stack on return.
1126 * This is technically more efficient, but it also avoids difficulties
1127 * in the 64-bit OS when handling 32-bit compat code, since we must not
1128 * sign-extend r0 for the sigreturn return-value case.
1129 */
1130#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1131 STD_ENTRY(_##x); \
1132 addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1133 { \
1134 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1135 j x \
1136 }; \
1137 STD_ENDPROC(_##x)
1138
1139PTREGS_SYSCALL(sys_execve, r3)
1140PTREGS_SYSCALL(sys_sigaltstack, r2)
1141PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1142#ifdef CONFIG_COMPAT
1143PTREGS_SYSCALL(compat_sys_execve, r3)
1144PTREGS_SYSCALL(compat_sys_sigaltstack, r2)
1145PTREGS_SYSCALL_SIGRETURN(compat_sys_rt_sigreturn, r0)
1146#endif
1147
1148/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
1149STD_ENTRY(_sys_clone)
1150 push_extra_callee_saves r4
1151 j sys_clone
1152 STD_ENDPROC(_sys_clone)
1153
1154/* The single-step support may need to read all the registers. */
1155int_unalign:
1156 push_extra_callee_saves r0
1157 j do_trap
1158
1159/* Include .intrpt1 array of interrupt vectors */
1160 .section ".intrpt1", "ax"
1161
1162#define op_handle_perf_interrupt bad_intr
1163#define op_handle_aux_perf_interrupt bad_intr
1164
1165#ifndef CONFIG_HARDWALL
1166#define do_hardwall_trap bad_intr
1167#endif
1168
1169 int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
1170 int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
1171#if CONFIG_KERNEL_PL == 2
1172 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
1173 int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, bad_intr
1174#else
1175 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, bad_intr
1176 int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, gx_singlestep_handle
1177#endif
1178 int_hand INT_SINGLE_STEP_0, SINGLE_STEP_0, bad_intr
1179 int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1180 int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1181 int_hand INT_ITLB_MISS, ITLB_MISS, do_page_fault
1182 int_hand INT_ILL, ILL, do_trap
1183 int_hand INT_GPV, GPV, do_trap
1184 int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
1185 int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
1186 int_hand INT_SWINT_3, SWINT_3, do_trap
1187 int_hand INT_SWINT_2, SWINT_2, do_trap
1188 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1189 int_hand INT_SWINT_0, SWINT_0, do_trap
1190 int_hand INT_ILL_TRANS, ILL_TRANS, do_trap
1191 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1192 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1193 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1194 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
1195 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1196 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1197 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
1198 int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
1199 int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1200 int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1201 int_hand INT_IPI_3, IPI_3, bad_intr
1202#if CONFIG_KERNEL_PL == 2
1203 int_hand INT_IPI_2, IPI_2, tile_dev_intr
1204 int_hand INT_IPI_1, IPI_1, bad_intr
1205#else
1206 int_hand INT_IPI_2, IPI_2, bad_intr
1207 int_hand INT_IPI_1, IPI_1, tile_dev_intr
1208#endif
1209 int_hand INT_IPI_0, IPI_0, bad_intr
1210 int_hand INT_PERF_COUNT, PERF_COUNT, \
1211 op_handle_perf_interrupt, handle_nmi
1212 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1213 op_handle_perf_interrupt, handle_nmi
1214 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1215#if CONFIG_KERNEL_PL == 2
1216 dc_dispatch INT_INTCTRL_2, INTCTRL_2
1217 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1218#else
1219 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1220 dc_dispatch INT_INTCTRL_1, INTCTRL_1
1221#endif
1222 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1223 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1224 hv_message_intr
1225 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr
1226 int_hand INT_I_ASID, I_ASID, bad_intr
1227 int_hand INT_D_ASID, D_ASID, bad_intr
1228 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1229
1230 /* Synthetic interrupt delivered only by the simulator */
1231 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index e2ab82b7c7e7..f68df69f1f67 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -22,6 +22,7 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <asm/opcode-tile.h> 23#include <asm/opcode-tile.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/homecache.h>
25 26
26#ifdef __tilegx__ 27#ifdef __tilegx__
27# define Elf_Rela Elf64_Rela 28# define Elf_Rela Elf64_Rela
@@ -86,8 +87,13 @@ error:
86void module_free(struct module *mod, void *module_region) 87void module_free(struct module *mod, void *module_region)
87{ 88{
88 vfree(module_region); 89 vfree(module_region);
90
91 /* Globally flush the L1 icache. */
92 flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
93 0, 0, 0, NULL, NULL, 0);
94
89 /* 95 /*
90 * FIXME: If module_region == mod->init_region, trim exception 96 * FIXME: If module_region == mod->module_init, trim exception
91 * table entries. 97 * table entries.
92 */ 98 */
93} 99}
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 658752b2835e..658f2ce426a4 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(dma_sync_single_range_for_device);
244 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no 244 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no
245 * need to do any flushing here. 245 * need to do any flushing here.
246 */ 246 */
247void dma_cache_sync(void *vaddr, size_t size, 247void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
248 enum dma_data_direction direction) 248 enum dma_data_direction direction)
249{ 249{
250} 250}
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index ea38f0c9ec7c..6d4cb5d7a9fd 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -59,6 +59,7 @@ int __write_once tile_plx_gen1;
59 59
60static struct pci_controller controllers[TILE_NUM_PCIE]; 60static struct pci_controller controllers[TILE_NUM_PCIE];
61static int num_controllers; 61static int num_controllers;
62static int pci_scan_flags[TILE_NUM_PCIE];
62 63
63static struct pci_ops tile_cfg_ops; 64static struct pci_ops tile_cfg_ops;
64 65
@@ -79,7 +80,7 @@ EXPORT_SYMBOL(pcibios_align_resource);
79 * controller_id is the controller number, config type is 0 or 1 for 80 * controller_id is the controller number, config type is 0 or 1 for
80 * config0 or config1 operations. 81 * config0 or config1 operations.
81 */ 82 */
82static int __init tile_pcie_open(int controller_id, int config_type) 83static int __devinit tile_pcie_open(int controller_id, int config_type)
83{ 84{
84 char filename[32]; 85 char filename[32];
85 int fd; 86 int fd;
@@ -95,7 +96,7 @@ static int __init tile_pcie_open(int controller_id, int config_type)
95/* 96/*
96 * Get the IRQ numbers from the HV and set up the handlers for them. 97 * Get the IRQ numbers from the HV and set up the handlers for them.
97 */ 98 */
98static int __init tile_init_irqs(int controller_id, 99static int __devinit tile_init_irqs(int controller_id,
99 struct pci_controller *controller) 100 struct pci_controller *controller)
100{ 101{
101 char filename[32]; 102 char filename[32];
@@ -139,71 +140,74 @@ static int __init tile_init_irqs(int controller_id,
139 * 140 *
140 * Returns the number of controllers discovered. 141 * Returns the number of controllers discovered.
141 */ 142 */
142int __init tile_pci_init(void) 143int __devinit tile_pci_init(void)
143{ 144{
144 int i; 145 int i;
145 146
146 pr_info("PCI: Searching for controllers...\n"); 147 pr_info("PCI: Searching for controllers...\n");
147 148
149 /* Re-init number of PCIe controllers to support hot-plug feature. */
150 num_controllers = 0;
151
148 /* Do any configuration we need before using the PCIe */ 152 /* Do any configuration we need before using the PCIe */
149 153
150 for (i = 0; i < TILE_NUM_PCIE; i++) { 154 for (i = 0; i < TILE_NUM_PCIE; i++) {
151 int hv_cfg_fd0 = -1;
152 int hv_cfg_fd1 = -1;
153 int hv_mem_fd = -1;
154 char name[32];
155 struct pci_controller *controller;
156
157 /* 155 /*
158 * Open the fd to the HV. If it fails then this 156 * To see whether we need a real config op based on
159 * device doesn't exist. 157 * the results of pcibios_init(), to support PCIe hot-plug.
160 */ 158 */
161 hv_cfg_fd0 = tile_pcie_open(i, 0); 159 if (pci_scan_flags[i] == 0) {
162 if (hv_cfg_fd0 < 0) 160 int hv_cfg_fd0 = -1;
163 continue; 161 int hv_cfg_fd1 = -1;
164 hv_cfg_fd1 = tile_pcie_open(i, 1); 162 int hv_mem_fd = -1;
165 if (hv_cfg_fd1 < 0) { 163 char name[32];
166 pr_err("PCI: Couldn't open config fd to HV " 164 struct pci_controller *controller;
167 "for controller %d\n", i); 165
168 goto err_cont; 166 /*
169 } 167 * Open the fd to the HV. If it fails then this
170 168 * device doesn't exist.
171 sprintf(name, "pcie/%d/mem", i); 169 */
172 hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0); 170 hv_cfg_fd0 = tile_pcie_open(i, 0);
173 if (hv_mem_fd < 0) { 171 if (hv_cfg_fd0 < 0)
174 pr_err("PCI: Could not open mem fd to HV!\n"); 172 continue;
175 goto err_cont; 173 hv_cfg_fd1 = tile_pcie_open(i, 1);
176 } 174 if (hv_cfg_fd1 < 0) {
175 pr_err("PCI: Couldn't open config fd to HV "
176 "for controller %d\n", i);
177 goto err_cont;
178 }
177 179
178 pr_info("PCI: Found PCI controller #%d\n", i); 180 sprintf(name, "pcie/%d/mem", i);
181 hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
182 if (hv_mem_fd < 0) {
183 pr_err("PCI: Could not open mem fd to HV!\n");
184 goto err_cont;
185 }
179 186
180 controller = &controllers[num_controllers]; 187 pr_info("PCI: Found PCI controller #%d\n", i);
181 188
182 if (tile_init_irqs(i, controller)) { 189 controller = &controllers[i];
183 pr_err("PCI: Could not initialize "
184 "IRQs, aborting.\n");
185 goto err_cont;
186 }
187 190
188 controller->index = num_controllers; 191 controller->index = i;
189 controller->hv_cfg_fd[0] = hv_cfg_fd0; 192 controller->hv_cfg_fd[0] = hv_cfg_fd0;
190 controller->hv_cfg_fd[1] = hv_cfg_fd1; 193 controller->hv_cfg_fd[1] = hv_cfg_fd1;
191 controller->hv_mem_fd = hv_mem_fd; 194 controller->hv_mem_fd = hv_mem_fd;
192 controller->first_busno = 0; 195 controller->first_busno = 0;
193 controller->last_busno = 0xff; 196 controller->last_busno = 0xff;
194 controller->ops = &tile_cfg_ops; 197 controller->ops = &tile_cfg_ops;
195 198
196 num_controllers++; 199 num_controllers++;
197 continue; 200 continue;
198 201
199err_cont: 202err_cont:
200 if (hv_cfg_fd0 >= 0) 203 if (hv_cfg_fd0 >= 0)
201 hv_dev_close(hv_cfg_fd0); 204 hv_dev_close(hv_cfg_fd0);
202 if (hv_cfg_fd1 >= 0) 205 if (hv_cfg_fd1 >= 0)
203 hv_dev_close(hv_cfg_fd1); 206 hv_dev_close(hv_cfg_fd1);
204 if (hv_mem_fd >= 0) 207 if (hv_mem_fd >= 0)
205 hv_dev_close(hv_mem_fd); 208 hv_dev_close(hv_mem_fd);
206 continue; 209 continue;
210 }
207 } 211 }
208 212
209 /* 213 /*
@@ -232,7 +236,7 @@ static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
232} 236}
233 237
234 238
235static void __init fixup_read_and_payload_sizes(void) 239static void __devinit fixup_read_and_payload_sizes(void)
236{ 240{
237 struct pci_dev *dev = NULL; 241 struct pci_dev *dev = NULL;
238 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */ 242 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
@@ -282,7 +286,7 @@ static void __init fixup_read_and_payload_sizes(void)
282 * The controllers have been set up by the time we get here, by a call to 286 * The controllers have been set up by the time we get here, by a call to
283 * tile_pci_init. 287 * tile_pci_init.
284 */ 288 */
285static int __init pcibios_init(void) 289int __devinit pcibios_init(void)
286{ 290{
287 int i; 291 int i;
288 292
@@ -296,25 +300,36 @@ static int __init pcibios_init(void)
296 mdelay(250); 300 mdelay(250);
297 301
298 /* Scan all of the recorded PCI controllers. */ 302 /* Scan all of the recorded PCI controllers. */
299 for (i = 0; i < num_controllers; i++) { 303 for (i = 0; i < TILE_NUM_PCIE; i++) {
300 struct pci_controller *controller = &controllers[i];
301 struct pci_bus *bus;
302
303 pr_info("PCI: initializing controller #%d\n", i);
304
305 /* 304 /*
306 * This comes from the generic Linux PCI driver. 305 * Do real pcibios init ops if the controller is initialized
307 * 306 * by tile_pci_init() successfully and not initialized by
308 * It reads the PCI tree for this bus into the Linux 307 * pcibios_init() yet to support PCIe hot-plug.
309 * data structures.
310 *
311 * This is inlined in linux/pci.h and calls into
312 * pci_scan_bus_parented() in probe.c.
313 */ 308 */
314 bus = pci_scan_bus(0, controller->ops, controller); 309 if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
315 controller->root_bus = bus; 310 struct pci_controller *controller = &controllers[i];
316 controller->last_busno = bus->subordinate; 311 struct pci_bus *bus;
317 312
313 if (tile_init_irqs(i, controller)) {
314 pr_err("PCI: Could not initialize IRQs\n");
315 continue;
316 }
317
318 pr_info("PCI: initializing controller #%d\n", i);
319
320 /*
321 * This comes from the generic Linux PCI driver.
322 *
323 * It reads the PCI tree for this bus into the Linux
324 * data structures.
325 *
326 * This is inlined in linux/pci.h and calls into
327 * pci_scan_bus_parented() in probe.c.
328 */
329 bus = pci_scan_bus(0, controller->ops, controller);
330 controller->root_bus = bus;
331 controller->last_busno = bus->subordinate;
332 }
318 } 333 }
319 334
320 /* Do machine dependent PCI interrupt routing */ 335 /* Do machine dependent PCI interrupt routing */
@@ -326,34 +341,45 @@ static int __init pcibios_init(void)
326 * It allocates all of the resources (I/O memory, etc) 341 * It allocates all of the resources (I/O memory, etc)
327 * associated with the devices read in above. 342 * associated with the devices read in above.
328 */ 343 */
329
330 pci_assign_unassigned_resources(); 344 pci_assign_unassigned_resources();
331 345
332 /* Configure the max_read_size and max_payload_size values. */ 346 /* Configure the max_read_size and max_payload_size values. */
333 fixup_read_and_payload_sizes(); 347 fixup_read_and_payload_sizes();
334 348
335 /* Record the I/O resources in the PCI controller structure. */ 349 /* Record the I/O resources in the PCI controller structure. */
336 for (i = 0; i < num_controllers; i++) { 350 for (i = 0; i < TILE_NUM_PCIE; i++) {
337 struct pci_bus *root_bus = controllers[i].root_bus; 351 /*
338 struct pci_bus *next_bus; 352 * Do real pcibios init ops if the controller is initialized
339 struct pci_dev *dev; 353 * by tile_pci_init() successfully and not initialized by
340 354 * pcibios_init() yet to support PCIe hot-plug.
341 list_for_each_entry(dev, &root_bus->devices, bus_list) { 355 */
342 /* Find the PCI host controller, ie. the 1st bridge. */ 356 if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
343 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && 357 struct pci_bus *root_bus = controllers[i].root_bus;
344 (PCI_SLOT(dev->devfn) == 0)) { 358 struct pci_bus *next_bus;
345 next_bus = dev->subordinate; 359 struct pci_dev *dev;
346 controllers[i].mem_resources[0] = 360
347 *next_bus->resource[0]; 361 list_for_each_entry(dev, &root_bus->devices, bus_list) {
348 controllers[i].mem_resources[1] = 362 /*
349 *next_bus->resource[1]; 363 * Find the PCI host controller, ie. the 1st
350 controllers[i].mem_resources[2] = 364 * bridge.
351 *next_bus->resource[2]; 365 */
352 366 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
353 break; 367 (PCI_SLOT(dev->devfn) == 0)) {
368 next_bus = dev->subordinate;
369 controllers[i].mem_resources[0] =
370 *next_bus->resource[0];
371 controllers[i].mem_resources[1] =
372 *next_bus->resource[1];
373 controllers[i].mem_resources[2] =
374 *next_bus->resource[2];
375
376 /* Setup flags. */
377 pci_scan_flags[i] = 1;
378
379 break;
380 }
354 } 381 }
355 } 382 }
356
357 } 383 }
358 384
359 return 0; 385 return 0;
@@ -381,7 +407,7 @@ char __devinit *pcibios_setup(char *str)
381/* 407/*
382 * This is called from the generic Linux layer. 408 * This is called from the generic Linux layer.
383 */ 409 */
384void __init pcibios_update_irq(struct pci_dev *dev, int irq) 410void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
385{ 411{
386 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); 412 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
387} 413}
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index d0065103eb7b..9c45d8bbdf57 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -25,10 +25,13 @@
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/tracehook.h>
29#include <linux/signal.h>
28#include <asm/system.h> 30#include <asm/system.h>
29#include <asm/stack.h> 31#include <asm/stack.h>
30#include <asm/homecache.h> 32#include <asm/homecache.h>
31#include <asm/syscalls.h> 33#include <asm/syscalls.h>
34#include <asm/traps.h>
32#ifdef CONFIG_HARDWALL 35#ifdef CONFIG_HARDWALL
33#include <asm/hardwall.h> 36#include <asm/hardwall.h>
34#endif 37#endif
@@ -546,6 +549,51 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
546 return __switch_to(prev, next, next_current_ksp0(next)); 549 return __switch_to(prev, next, next_current_ksp0(next));
547} 550}
548 551
552/*
553 * This routine is called on return from interrupt if any of the
554 * TIF_WORK_MASK flags are set in thread_info->flags. It is
555 * entered with interrupts disabled so we don't miss an event
556 * that modified the thread_info flags. If any flag is set, we
557 * handle it and return, and the calling assembly code will
558 * re-disable interrupts, reload the thread flags, and call back
559 * if more flags need to be handled.
560 *
561 * We return whether we need to check the thread_info flags again
562 * or not. Note that we don't clear TIF_SINGLESTEP here, so it's
563 * important that it be tested last, and then claim that we don't
564 * need to recheck the flags.
565 */
566int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
567{
568 if (thread_info_flags & _TIF_NEED_RESCHED) {
569 schedule();
570 return 1;
571 }
572#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
573 if (thread_info_flags & _TIF_ASYNC_TLB) {
574 do_async_page_fault(regs);
575 return 1;
576 }
577#endif
578 if (thread_info_flags & _TIF_SIGPENDING) {
579 do_signal(regs);
580 return 1;
581 }
582 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
583 clear_thread_flag(TIF_NOTIFY_RESUME);
584 tracehook_notify_resume(regs);
585 if (current->replacement_session_keyring)
586 key_replace_session_keyring();
587 return 1;
588 }
589 if (thread_info_flags & _TIF_SINGLESTEP) {
590 if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0)
591 single_step_once(regs);
592 return 0;
593 }
594 panic("work_pending: bad flags %#x\n", thread_info_flags);
595}
596
549/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */ 597/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
550SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 598SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
551 void __user *, parent_tidptr, void __user *, child_tidptr, 599 void __user *, parent_tidptr, void __user *, child_tidptr,
@@ -582,8 +630,8 @@ out:
582 630
583#ifdef CONFIG_COMPAT 631#ifdef CONFIG_COMPAT
584long compat_sys_execve(const char __user *path, 632long compat_sys_execve(const char __user *path,
585 const compat_uptr_t __user *argv, 633 compat_uptr_t __user *argv,
586 const compat_uptr_t __user *envp, 634 compat_uptr_t __user *envp,
587 struct pt_regs *regs) 635 struct pt_regs *regs)
588{ 636{
589 long error; 637 long error;
diff --git a/arch/tile/kernel/regs_64.S b/arch/tile/kernel/regs_64.S
new file mode 100644
index 000000000000..f748c1e85285
--- /dev/null
+++ b/arch/tile/kernel/regs_64.S
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/linkage.h>
16#include <asm/system.h>
17#include <asm/ptrace.h>
18#include <asm/asm-offsets.h>
19#include <arch/spr_def.h>
20#include <asm/processor.h>
21
22/*
23 * See <asm/system.h>; called with prev and next task_struct pointers.
24 * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
25 *
26 * We want to save pc/sp in "prev", and get the new pc/sp from "next".
27 * We also need to save all the callee-saved registers on the stack.
28 *
29 * Intel enables/disables access to the hardware cycle counter in
30 * seccomp (secure computing) environments if necessary, based on
31 * has_secure_computing(). We might want to do this at some point,
32 * though it would require virtualizing the other SPRs under WORLD_ACCESS.
33 *
34 * Since we're saving to the stack, we omit sp from this list.
35 * And for parallels with other architectures, we save lr separately,
36 * in the thread_struct itself (as the "pc" field).
37 *
38 * This code also needs to be aligned with process.c copy_thread()
39 */
40
41#if CALLEE_SAVED_REGS_COUNT != 24
42# error Mismatch between <asm/system.h> and kernel/entry.S
43#endif
44#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 8)
45
46#define SAVE_REG(r) { st r12, r; addi r12, r12, 8 }
47#define LOAD_REG(r) { ld r, r12; addi r12, r12, 8 }
48#define FOR_EACH_CALLEE_SAVED_REG(f) \
49 f(r30); f(r31); \
50 f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \
51 f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
52 f(r48); f(r49); f(r50); f(r51); f(r52);
53
54STD_ENTRY_SECTION(__switch_to, .sched.text)
55 {
56 move r10, sp
57 st sp, lr
58 }
59 {
60 addli r11, sp, -FRAME_SIZE + 8
61 addli sp, sp, -FRAME_SIZE
62 }
63 {
64 st r11, r10
65 addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET
66 }
67 {
68 ld r13, r4 /* Load new sp to a temp register early. */
69 addi r12, sp, 16
70 }
71 FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
72 addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET
73 {
74 st r3, sp
75 addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET
76 }
77 {
78 st r3, lr
79 addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET
80 }
81 {
82 ld lr, r4
83 addi r12, r13, 16
84 }
85 {
86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
87 move sp, r13
88 mtspr SPR_SYSTEM_SAVE_K_0, r2
89 }
90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
91.L__switch_to_pc:
92 {
93 addli sp, sp, FRAME_SIZE
94 jrp lr /* r0 is still valid here, so return it */
95 }
96 STD_ENDPROC(__switch_to)
97
98/* Return a suitable address for the backtracer for suspended threads */
99STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
100 lnk r0
101 {
102 addli r0, r0, .L__switch_to_pc - .
103 jrp lr
104 }
105 STD_ENDPROC(get_switch_to_pc)
106
107STD_ENTRY(get_pt_regs)
108 .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \
109 r8, r9, r10, r11, r12, r13, r14, r15, \
110 r16, r17, r18, r19, r20, r21, r22, r23, \
111 r24, r25, r26, r27, r28, r29, r30, r31, \
112 r32, r33, r34, r35, r36, r37, r38, r39, \
113 r40, r41, r42, r43, r44, r45, r46, r47, \
114 r48, r49, r50, r51, r52, tp, sp
115 {
116 st r0, \reg
117 addi r0, r0, 8
118 }
119 .endr
120 {
121 st r0, lr
122 addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR
123 }
124 lnk r1
125 {
126 st r0, r1
127 addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
128 }
129 mfspr r1, INTERRUPT_CRITICAL_SECTION
130 shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT
131 ori r1, r1, KERNEL_PL
132 {
133 st r0, r1
134 addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
135 }
136 {
137 st r0, zero /* clear faultnum */
138 addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM
139 }
140 {
141 st r0, zero /* clear orig_r0 */
142 addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */
143 }
144 jrp lr
145 STD_ENDPROC(get_pt_regs)
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 3696b1832566..6cdc9ba55fe0 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -912,6 +912,8 @@ void __cpuinit setup_cpu(int boot)
912#endif 912#endif
913} 913}
914 914
915#ifdef CONFIG_BLK_DEV_INITRD
916
915static int __initdata set_initramfs_file; 917static int __initdata set_initramfs_file;
916static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 918static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
917 919
@@ -969,6 +971,10 @@ void __init free_initrd_mem(unsigned long begin, unsigned long end)
969 free_bootmem(__pa(begin), end - begin); 971 free_bootmem(__pa(begin), end - begin);
970} 972}
971 973
974#else
975static inline void load_hv_initrd(void) {}
976#endif /* CONFIG_BLK_DEV_INITRD */
977
972static void __init validate_hv(void) 978static void __init validate_hv(void)
973{ 979{
974 /* 980 /*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 1260321155f1..bedaf4e9f3a7 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -39,7 +39,6 @@
39 39
40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
41 41
42
43SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss, 42SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
44 stack_t __user *, uoss, struct pt_regs *, regs) 43 stack_t __user *, uoss, struct pt_regs *, regs)
45{ 44{
@@ -78,6 +77,13 @@ int restore_sigcontext(struct pt_regs *regs,
78 return err; 77 return err;
79} 78}
80 79
80void signal_fault(const char *type, struct pt_regs *regs,
81 void __user *frame, int sig)
82{
83 trace_unhandled_signal(type, regs, (unsigned long)frame, SIGSEGV);
84 force_sigsegv(sig, current);
85}
86
81/* The assembly shim for this function arranges to ignore the return value. */ 87/* The assembly shim for this function arranges to ignore the return value. */
82SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs) 88SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
83{ 89{
@@ -105,7 +111,7 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
105 return 0; 111 return 0;
106 112
107badframe: 113badframe:
108 force_sig(SIGSEGV, current); 114 signal_fault("bad sigreturn frame", regs, frame, 0);
109 return 0; 115 return 0;
110} 116}
111 117
@@ -231,7 +237,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
231 return 0; 237 return 0;
232 238
233give_sigsegv: 239give_sigsegv:
234 force_sigsegv(sig, current); 240 signal_fault("bad setup frame", regs, frame, sig);
235 return -EFAULT; 241 return -EFAULT;
236} 242}
237 243
@@ -245,7 +251,6 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
245{ 251{
246 int ret; 252 int ret;
247 253
248
249 /* Are we from a system call? */ 254 /* Are we from a system call? */
250 if (regs->faultnum == INT_SWINT_1) { 255 if (regs->faultnum == INT_SWINT_1) {
251 /* If so, check system call restarting.. */ 256 /* If so, check system call restarting.. */
@@ -363,3 +368,118 @@ done:
363 /* Avoid double syscall restart if there are nested signals. */ 368 /* Avoid double syscall restart if there are nested signals. */
364 regs->faultnum = INT_SWINT_1_SIGRETURN; 369 regs->faultnum = INT_SWINT_1_SIGRETURN;
365} 370}
371
372int show_unhandled_signals = 1;
373
374static int __init crashinfo(char *str)
375{
376 unsigned long val;
377 const char *word;
378
379 if (*str == '\0')
380 val = 2;
381 else if (*str != '=' || strict_strtoul(++str, 0, &val) != 0)
382 return 0;
383 show_unhandled_signals = val;
384 switch (show_unhandled_signals) {
385 case 0:
386 word = "No";
387 break;
388 case 1:
389 word = "One-line";
390 break;
391 default:
392 word = "Detailed";
393 break;
394 }
395 pr_info("%s crash reports will be generated on the console\n", word);
396 return 1;
397}
398__setup("crashinfo", crashinfo);
399
400static void dump_mem(void __user *address)
401{
402 void __user *addr;
403 enum { region_size = 256, bytes_per_line = 16 };
404 int i, j, k;
405 int found_readable_mem = 0;
406
407 pr_err("\n");
408 if (!access_ok(VERIFY_READ, address, 1)) {
409 pr_err("Not dumping at address 0x%lx (kernel address)\n",
410 (unsigned long)address);
411 return;
412 }
413
414 addr = (void __user *)
415 (((unsigned long)address & -bytes_per_line) - region_size/2);
416 if (addr > address)
417 addr = NULL;
418 for (i = 0; i < region_size;
419 addr += bytes_per_line, i += bytes_per_line) {
420 unsigned char buf[bytes_per_line];
421 char line[100];
422 if (copy_from_user(buf, addr, bytes_per_line))
423 continue;
424 if (!found_readable_mem) {
425 pr_err("Dumping memory around address 0x%lx:\n",
426 (unsigned long)address);
427 found_readable_mem = 1;
428 }
429 j = sprintf(line, REGFMT":", (unsigned long)addr);
430 for (k = 0; k < bytes_per_line; ++k)
431 j += sprintf(&line[j], " %02x", buf[k]);
432 pr_err("%s\n", line);
433 }
434 if (!found_readable_mem)
435 pr_err("No readable memory around address 0x%lx\n",
436 (unsigned long)address);
437}
438
439void trace_unhandled_signal(const char *type, struct pt_regs *regs,
440 unsigned long address, int sig)
441{
442 struct task_struct *tsk = current;
443
444 if (show_unhandled_signals == 0)
445 return;
446
447 /* If the signal is handled, don't show it here. */
448 if (!is_global_init(tsk)) {
449 void __user *handler =
450 tsk->sighand->action[sig-1].sa.sa_handler;
451 if (handler != SIG_IGN && handler != SIG_DFL)
452 return;
453 }
454
455 /* Rate-limit the one-line output, not the detailed output. */
456 if (show_unhandled_signals <= 1 && !printk_ratelimit())
457 return;
458
459 printk("%s%s[%d]: %s at %lx pc "REGFMT" signal %d",
460 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
461 tsk->comm, task_pid_nr(tsk), type, address, regs->pc, sig);
462
463 print_vma_addr(KERN_CONT " in ", regs->pc);
464
465 printk(KERN_CONT "\n");
466
467 if (show_unhandled_signals > 1) {
468 switch (sig) {
469 case SIGILL:
470 case SIGFPE:
471 case SIGSEGV:
472 case SIGBUS:
473 pr_err("User crash: signal %d,"
474 " trap %ld, address 0x%lx\n",
475 sig, regs->faultnum, address);
476 show_regs(regs);
477 dump_mem((void __user *)address);
478 break;
479 default:
480 pr_err("User crash: signal %d, trap %ld\n",
481 sig, regs->faultnum);
482 break;
483 }
484 }
485}
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 84a729e06ec4..4032ca8e51b6 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -186,6 +186,8 @@ static tile_bundle_bits rewrite_load_store_unaligned(
186 .si_code = SEGV_MAPERR, 186 .si_code = SEGV_MAPERR,
187 .si_addr = addr 187 .si_addr = addr
188 }; 188 };
189 trace_unhandled_signal("segfault", regs,
190 (unsigned long)addr, SIGSEGV);
189 force_sig_info(info.si_signo, &info, current); 191 force_sig_info(info.si_signo, &info, current);
190 return (tile_bundle_bits) 0; 192 return (tile_bundle_bits) 0;
191 } 193 }
@@ -196,6 +198,8 @@ static tile_bundle_bits rewrite_load_store_unaligned(
196 .si_code = BUS_ADRALN, 198 .si_code = BUS_ADRALN,
197 .si_addr = addr 199 .si_addr = addr
198 }; 200 };
201 trace_unhandled_signal("unaligned trap", regs,
202 (unsigned long)addr, SIGBUS);
199 force_sig_info(info.si_signo, &info, current); 203 force_sig_info(info.si_signo, &info, current);
200 return (tile_bundle_bits) 0; 204 return (tile_bundle_bits) 0;
201 } 205 }
@@ -318,6 +322,14 @@ void single_step_once(struct pt_regs *regs)
318" .popsection\n" 322" .popsection\n"
319 ); 323 );
320 324
325 /*
326 * Enable interrupts here to allow touching userspace and the like.
327 * The callers expect this: do_trap() already has interrupts
328 * enabled, and do_work_pending() handles functions that enable
329 * interrupts internally.
330 */
331 local_irq_enable();
332
321 if (state == NULL) { 333 if (state == NULL) {
322 /* allocate a page of writable, executable memory */ 334 /* allocate a page of writable, executable memory */
323 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); 335 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index dd81713a90dc..37ee4d037e0b 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -36,7 +36,7 @@
36#define KBT_LOOP 3 /* Backtrace entered a loop */ 36#define KBT_LOOP 3 /* Backtrace entered a loop */
37 37
38/* Is address on the specified kernel stack? */ 38/* Is address on the specified kernel stack? */
39static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) 39static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
40{ 40{
41 ulong kstack_base = (ulong) kbt->task->stack; 41 ulong kstack_base = (ulong) kbt->task->stack;
42 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */ 42 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
@@ -45,7 +45,7 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
45} 45}
46 46
47/* Is address valid for reading? */ 47/* Is address valid for reading? */
48static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) 48static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
49{ 49{
50 HV_PTE *l1_pgtable = kbt->pgtable; 50 HV_PTE *l1_pgtable = kbt->pgtable;
51 HV_PTE *l2_pgtable; 51 HV_PTE *l2_pgtable;
@@ -97,7 +97,7 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
97} 97}
98 98
99/* Callback for backtracer; basically a glorified memcpy */ 99/* Callback for backtracer; basically a glorified memcpy */
100static bool read_memory_func(void *result, VirtualAddress address, 100static bool read_memory_func(void *result, unsigned long address,
101 unsigned int size, void *vkbt) 101 unsigned int size, void *vkbt)
102{ 102{
103 int retval; 103 int retval;
@@ -124,7 +124,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
124{ 124{
125 const char *fault = NULL; /* happy compiler */ 125 const char *fault = NULL; /* happy compiler */
126 char fault_buf[64]; 126 char fault_buf[64];
127 VirtualAddress sp = kbt->it.sp; 127 unsigned long sp = kbt->it.sp;
128 struct pt_regs *p; 128 struct pt_regs *p;
129 129
130 if (!in_kernel_stack(kbt, sp)) 130 if (!in_kernel_stack(kbt, sp))
@@ -163,7 +163,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
163} 163}
164 164
165/* Is the pc pointing to a sigreturn trampoline? */ 165/* Is the pc pointing to a sigreturn trampoline? */
166static int is_sigreturn(VirtualAddress pc) 166static int is_sigreturn(unsigned long pc)
167{ 167{
168 return (pc == VDSO_BASE); 168 return (pc == VDSO_BASE);
169} 169}
@@ -260,7 +260,7 @@ static void validate_stack(struct pt_regs *regs)
260void KBacktraceIterator_init(struct KBacktraceIterator *kbt, 260void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
261 struct task_struct *t, struct pt_regs *regs) 261 struct task_struct *t, struct pt_regs *regs)
262{ 262{
263 VirtualAddress pc, lr, sp, r52; 263 unsigned long pc, lr, sp, r52;
264 int is_current; 264 int is_current;
265 265
266 /* 266 /*
@@ -331,7 +331,7 @@ EXPORT_SYMBOL(KBacktraceIterator_end);
331 331
332void KBacktraceIterator_next(struct KBacktraceIterator *kbt) 332void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
333{ 333{
334 VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp; 334 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
335 kbt->new_context = 0; 335 kbt->new_context = 0;
336 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) { 336 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
337 kbt->end = KBT_DONE; 337 kbt->end = KBT_DONE;
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index e2187d24a9b4..cb44ba7ccd2d 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -56,13 +56,6 @@ ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count)
56 return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count); 56 return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count);
57} 57}
58 58
59long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
60 u32 len, int advice)
61{
62 return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo,
63 len, advice);
64}
65
66int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, 59int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
67 u32 len_lo, u32 len_hi, int advice) 60 u32 len_lo, u32 len_hi, int advice)
68{ 61{
@@ -103,10 +96,8 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
103 96
104#ifndef __tilegx__ 97#ifndef __tilegx__
105/* See comments at the top of the file. */ 98/* See comments at the top of the file. */
106#define sys_fadvise64 sys32_fadvise64
107#define sys_fadvise64_64 sys32_fadvise64_64 99#define sys_fadvise64_64 sys32_fadvise64_64
108#define sys_readahead sys32_readahead 100#define sys_readahead sys32_readahead
109#define sys_sync_file_range sys_sync_file_range2
110#endif 101#endif
111 102
112/* Call the trampolines to manage pt_regs where necessary. */ 103/* Call the trampolines to manage pt_regs where necessary. */
diff --git a/arch/tile/kernel/tile-desc_32.c b/arch/tile/kernel/tile-desc_32.c
index 69af0e150f78..7e31a1285788 100644
--- a/arch/tile/kernel/tile-desc_32.c
+++ b/arch/tile/kernel/tile-desc_32.c
@@ -2413,12 +2413,13 @@ const struct tile_operand tile_operands[43] =
2413 2413
2414 2414
2415 2415
2416/* Given a set of bundle bits and the lookup FSM for a specific pipe, 2416/* Given a set of bundle bits and a specific pipe, returns which
2417 * returns which instruction the bundle contains in that pipe. 2417 * instruction the bundle contains in that pipe.
2418 */ 2418 */
2419static const struct tile_opcode * 2419const struct tile_opcode *
2420find_opcode(tile_bundle_bits bits, const unsigned short *table) 2420find_opcode(tile_bundle_bits bits, tile_pipeline pipe)
2421{ 2421{
2422 const unsigned short *table = tile_bundle_decoder_fsms[pipe];
2422 int index = 0; 2423 int index = 0;
2423 2424
2424 while (1) 2425 while (1)
@@ -2465,7 +2466,7 @@ parse_insn_tile(tile_bundle_bits bits,
2465 int i; 2466 int i;
2466 2467
2467 d = &decoded[num_instructions++]; 2468 d = &decoded[num_instructions++];
2468 opc = find_opcode (bits, tile_bundle_decoder_fsms[pipe]); 2469 opc = find_opcode (bits, (tile_pipeline)pipe);
2469 d->opcode = opc; 2470 d->opcode = opc;
2470 2471
2471 /* Decode each operand, sign extending, etc. as appropriate. */ 2472 /* Decode each operand, sign extending, etc. as appropriate. */
diff --git a/arch/tile/kernel/tile-desc_64.c b/arch/tile/kernel/tile-desc_64.c
new file mode 100644
index 000000000000..d57007bed77f
--- /dev/null
+++ b/arch/tile/kernel/tile-desc_64.c
@@ -0,0 +1,2200 @@
1/* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */
2#define BFD_RELOC(x) -1
3
4/* Special registers. */
5#define TREG_LR 55
6#define TREG_SN 56
7#define TREG_ZERO 63
8
9/* FIXME: Rename this. */
10#include <asm/opcode-tile_64.h>
11
12#include <linux/stddef.h>
13
14const struct tilegx_opcode tilegx_opcodes[334] =
15{
16 { "bpt", TILEGX_OPC_BPT, 0x2, 0, TREG_ZERO, 0,
17 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
18 },
19 { "info", TILEGX_OPC_INFO, 0xf, 1, TREG_ZERO, 1,
20 { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } },
21 },
22 { "infol", TILEGX_OPC_INFOL, 0x3, 1, TREG_ZERO, 1,
23 { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } },
24 },
25 { "move", TILEGX_OPC_MOVE, 0xf, 2, TREG_ZERO, 1,
26 { { 6, 7 }, { 8, 9 }, { 10, 11 }, { 12, 13 }, { 0, } },
27 },
28 { "movei", TILEGX_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1,
29 { { 6, 0 }, { 8, 1 }, { 10, 2 }, { 12, 3 }, { 0, } },
30 },
31 { "moveli", TILEGX_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1,
32 { { 6, 4 }, { 8, 5 }, { 0, }, { 0, }, { 0, } },
33 },
34 { "prefetch", TILEGX_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1,
35 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
36 },
37 { "prefetch_add_l1", TILEGX_OPC_PREFETCH_ADD_L1, 0x2, 2, TREG_ZERO, 1,
38 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
39 },
40 { "prefetch_add_l1_fault", TILEGX_OPC_PREFETCH_ADD_L1_FAULT, 0x2, 2, TREG_ZERO, 1,
41 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
42 },
43 { "prefetch_add_l2", TILEGX_OPC_PREFETCH_ADD_L2, 0x2, 2, TREG_ZERO, 1,
44 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
45 },
46 { "prefetch_add_l2_fault", TILEGX_OPC_PREFETCH_ADD_L2_FAULT, 0x2, 2, TREG_ZERO, 1,
47 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
48 },
49 { "prefetch_add_l3", TILEGX_OPC_PREFETCH_ADD_L3, 0x2, 2, TREG_ZERO, 1,
50 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
51 },
52 { "prefetch_add_l3_fault", TILEGX_OPC_PREFETCH_ADD_L3_FAULT, 0x2, 2, TREG_ZERO, 1,
53 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
54 },
55 { "prefetch_l1", TILEGX_OPC_PREFETCH_L1, 0x12, 1, TREG_ZERO, 1,
56 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
57 },
58 { "prefetch_l1_fault", TILEGX_OPC_PREFETCH_L1_FAULT, 0x12, 1, TREG_ZERO, 1,
59 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
60 },
61 { "prefetch_l2", TILEGX_OPC_PREFETCH_L2, 0x12, 1, TREG_ZERO, 1,
62 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
63 },
64 { "prefetch_l2_fault", TILEGX_OPC_PREFETCH_L2_FAULT, 0x12, 1, TREG_ZERO, 1,
65 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
66 },
67 { "prefetch_l3", TILEGX_OPC_PREFETCH_L3, 0x12, 1, TREG_ZERO, 1,
68 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
69 },
70 { "prefetch_l3_fault", TILEGX_OPC_PREFETCH_L3_FAULT, 0x12, 1, TREG_ZERO, 1,
71 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
72 },
73 { "raise", TILEGX_OPC_RAISE, 0x2, 0, TREG_ZERO, 1,
74 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
75 },
76 { "add", TILEGX_OPC_ADD, 0xf, 3, TREG_ZERO, 1,
77 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
78 },
79 { "addi", TILEGX_OPC_ADDI, 0xf, 3, TREG_ZERO, 1,
80 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
81 },
82 { "addli", TILEGX_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1,
83 { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
84 },
85 { "addx", TILEGX_OPC_ADDX, 0xf, 3, TREG_ZERO, 1,
86 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
87 },
88 { "addxi", TILEGX_OPC_ADDXI, 0xf, 3, TREG_ZERO, 1,
89 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
90 },
91 { "addxli", TILEGX_OPC_ADDXLI, 0x3, 3, TREG_ZERO, 1,
92 { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
93 },
94 { "addxsc", TILEGX_OPC_ADDXSC, 0x3, 3, TREG_ZERO, 1,
95 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
96 },
97 { "and", TILEGX_OPC_AND, 0xf, 3, TREG_ZERO, 1,
98 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
99 },
100 { "andi", TILEGX_OPC_ANDI, 0xf, 3, TREG_ZERO, 1,
101 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
102 },
103 { "beqz", TILEGX_OPC_BEQZ, 0x2, 2, TREG_ZERO, 1,
104 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
105 },
106 { "beqzt", TILEGX_OPC_BEQZT, 0x2, 2, TREG_ZERO, 1,
107 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
108 },
109 { "bfexts", TILEGX_OPC_BFEXTS, 0x1, 4, TREG_ZERO, 1,
110 { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
111 },
112 { "bfextu", TILEGX_OPC_BFEXTU, 0x1, 4, TREG_ZERO, 1,
113 { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
114 },
115 { "bfins", TILEGX_OPC_BFINS, 0x1, 4, TREG_ZERO, 1,
116 { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
117 },
118 { "bgez", TILEGX_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1,
119 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
120 },
121 { "bgezt", TILEGX_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1,
122 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
123 },
124 { "bgtz", TILEGX_OPC_BGTZ, 0x2, 2, TREG_ZERO, 1,
125 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
126 },
127 { "bgtzt", TILEGX_OPC_BGTZT, 0x2, 2, TREG_ZERO, 1,
128 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
129 },
130 { "blbc", TILEGX_OPC_BLBC, 0x2, 2, TREG_ZERO, 1,
131 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
132 },
133 { "blbct", TILEGX_OPC_BLBCT, 0x2, 2, TREG_ZERO, 1,
134 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
135 },
136 { "blbs", TILEGX_OPC_BLBS, 0x2, 2, TREG_ZERO, 1,
137 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
138 },
139 { "blbst", TILEGX_OPC_BLBST, 0x2, 2, TREG_ZERO, 1,
140 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
141 },
142 { "blez", TILEGX_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1,
143 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
144 },
145 { "blezt", TILEGX_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1,
146 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
147 },
148 { "bltz", TILEGX_OPC_BLTZ, 0x2, 2, TREG_ZERO, 1,
149 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
150 },
151 { "bltzt", TILEGX_OPC_BLTZT, 0x2, 2, TREG_ZERO, 1,
152 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
153 },
154 { "bnez", TILEGX_OPC_BNEZ, 0x2, 2, TREG_ZERO, 1,
155 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
156 },
157 { "bnezt", TILEGX_OPC_BNEZT, 0x2, 2, TREG_ZERO, 1,
158 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
159 },
160 { "clz", TILEGX_OPC_CLZ, 0x5, 2, TREG_ZERO, 1,
161 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
162 },
163 { "cmoveqz", TILEGX_OPC_CMOVEQZ, 0x5, 3, TREG_ZERO, 1,
164 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
165 },
166 { "cmovnez", TILEGX_OPC_CMOVNEZ, 0x5, 3, TREG_ZERO, 1,
167 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
168 },
169 { "cmpeq", TILEGX_OPC_CMPEQ, 0xf, 3, TREG_ZERO, 1,
170 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
171 },
172 { "cmpeqi", TILEGX_OPC_CMPEQI, 0xf, 3, TREG_ZERO, 1,
173 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
174 },
175 { "cmpexch", TILEGX_OPC_CMPEXCH, 0x2, 3, TREG_ZERO, 1,
176 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
177 },
178 { "cmpexch4", TILEGX_OPC_CMPEXCH4, 0x2, 3, TREG_ZERO, 1,
179 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
180 },
181 { "cmples", TILEGX_OPC_CMPLES, 0xf, 3, TREG_ZERO, 1,
182 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
183 },
184 { "cmpleu", TILEGX_OPC_CMPLEU, 0xf, 3, TREG_ZERO, 1,
185 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
186 },
187 { "cmplts", TILEGX_OPC_CMPLTS, 0xf, 3, TREG_ZERO, 1,
188 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
189 },
190 { "cmpltsi", TILEGX_OPC_CMPLTSI, 0xf, 3, TREG_ZERO, 1,
191 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
192 },
193 { "cmpltu", TILEGX_OPC_CMPLTU, 0xf, 3, TREG_ZERO, 1,
194 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
195 },
196 { "cmpltui", TILEGX_OPC_CMPLTUI, 0x3, 3, TREG_ZERO, 1,
197 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
198 },
199 { "cmpne", TILEGX_OPC_CMPNE, 0xf, 3, TREG_ZERO, 1,
200 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
201 },
202 { "cmul", TILEGX_OPC_CMUL, 0x1, 3, TREG_ZERO, 1,
203 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
204 },
205 { "cmula", TILEGX_OPC_CMULA, 0x1, 3, TREG_ZERO, 1,
206 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
207 },
208 { "cmulaf", TILEGX_OPC_CMULAF, 0x1, 3, TREG_ZERO, 1,
209 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
210 },
211 { "cmulf", TILEGX_OPC_CMULF, 0x1, 3, TREG_ZERO, 1,
212 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
213 },
214 { "cmulfr", TILEGX_OPC_CMULFR, 0x1, 3, TREG_ZERO, 1,
215 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
216 },
217 { "cmulh", TILEGX_OPC_CMULH, 0x1, 3, TREG_ZERO, 1,
218 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
219 },
220 { "cmulhr", TILEGX_OPC_CMULHR, 0x1, 3, TREG_ZERO, 1,
221 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
222 },
223 { "crc32_32", TILEGX_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1,
224 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
225 },
226 { "crc32_8", TILEGX_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1,
227 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
228 },
229 { "ctz", TILEGX_OPC_CTZ, 0x5, 2, TREG_ZERO, 1,
230 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
231 },
232 { "dblalign", TILEGX_OPC_DBLALIGN, 0x1, 3, TREG_ZERO, 1,
233 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
234 },
235 { "dblalign2", TILEGX_OPC_DBLALIGN2, 0x3, 3, TREG_ZERO, 1,
236 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
237 },
238 { "dblalign4", TILEGX_OPC_DBLALIGN4, 0x3, 3, TREG_ZERO, 1,
239 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
240 },
241 { "dblalign6", TILEGX_OPC_DBLALIGN6, 0x3, 3, TREG_ZERO, 1,
242 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
243 },
244 { "drain", TILEGX_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0,
245 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
246 },
247 { "dtlbpr", TILEGX_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1,
248 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
249 },
250 { "exch", TILEGX_OPC_EXCH, 0x2, 3, TREG_ZERO, 1,
251 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
252 },
253 { "exch4", TILEGX_OPC_EXCH4, 0x2, 3, TREG_ZERO, 1,
254 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
255 },
256 { "fdouble_add_flags", TILEGX_OPC_FDOUBLE_ADD_FLAGS, 0x1, 3, TREG_ZERO, 1,
257 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
258 },
259 { "fdouble_addsub", TILEGX_OPC_FDOUBLE_ADDSUB, 0x1, 3, TREG_ZERO, 1,
260 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
261 },
262 { "fdouble_mul_flags", TILEGX_OPC_FDOUBLE_MUL_FLAGS, 0x1, 3, TREG_ZERO, 1,
263 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
264 },
265 { "fdouble_pack1", TILEGX_OPC_FDOUBLE_PACK1, 0x1, 3, TREG_ZERO, 1,
266 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
267 },
268 { "fdouble_pack2", TILEGX_OPC_FDOUBLE_PACK2, 0x1, 3, TREG_ZERO, 1,
269 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
270 },
271 { "fdouble_sub_flags", TILEGX_OPC_FDOUBLE_SUB_FLAGS, 0x1, 3, TREG_ZERO, 1,
272 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
273 },
274 { "fdouble_unpack_max", TILEGX_OPC_FDOUBLE_UNPACK_MAX, 0x1, 3, TREG_ZERO, 1,
275 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
276 },
277 { "fdouble_unpack_min", TILEGX_OPC_FDOUBLE_UNPACK_MIN, 0x1, 3, TREG_ZERO, 1,
278 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
279 },
280 { "fetchadd", TILEGX_OPC_FETCHADD, 0x2, 3, TREG_ZERO, 1,
281 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
282 },
283 { "fetchadd4", TILEGX_OPC_FETCHADD4, 0x2, 3, TREG_ZERO, 1,
284 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
285 },
286 { "fetchaddgez", TILEGX_OPC_FETCHADDGEZ, 0x2, 3, TREG_ZERO, 1,
287 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
288 },
289 { "fetchaddgez4", TILEGX_OPC_FETCHADDGEZ4, 0x2, 3, TREG_ZERO, 1,
290 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
291 },
292 { "fetchand", TILEGX_OPC_FETCHAND, 0x2, 3, TREG_ZERO, 1,
293 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
294 },
295 { "fetchand4", TILEGX_OPC_FETCHAND4, 0x2, 3, TREG_ZERO, 1,
296 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
297 },
298 { "fetchor", TILEGX_OPC_FETCHOR, 0x2, 3, TREG_ZERO, 1,
299 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
300 },
301 { "fetchor4", TILEGX_OPC_FETCHOR4, 0x2, 3, TREG_ZERO, 1,
302 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
303 },
304 { "finv", TILEGX_OPC_FINV, 0x2, 1, TREG_ZERO, 1,
305 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
306 },
307 { "flush", TILEGX_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1,
308 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
309 },
310 { "flushwb", TILEGX_OPC_FLUSHWB, 0x2, 0, TREG_ZERO, 1,
311 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
312 },
313 { "fnop", TILEGX_OPC_FNOP, 0xf, 0, TREG_ZERO, 1,
314 { { }, { }, { }, { }, { 0, } },
315 },
316 { "fsingle_add1", TILEGX_OPC_FSINGLE_ADD1, 0x1, 3, TREG_ZERO, 1,
317 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
318 },
319 { "fsingle_addsub2", TILEGX_OPC_FSINGLE_ADDSUB2, 0x1, 3, TREG_ZERO, 1,
320 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
321 },
322 { "fsingle_mul1", TILEGX_OPC_FSINGLE_MUL1, 0x1, 3, TREG_ZERO, 1,
323 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
324 },
325 { "fsingle_mul2", TILEGX_OPC_FSINGLE_MUL2, 0x1, 3, TREG_ZERO, 1,
326 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
327 },
328 { "fsingle_pack1", TILEGX_OPC_FSINGLE_PACK1, 0x5, 2, TREG_ZERO, 1,
329 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
330 },
331 { "fsingle_pack2", TILEGX_OPC_FSINGLE_PACK2, 0x1, 3, TREG_ZERO, 1,
332 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
333 },
334 { "fsingle_sub1", TILEGX_OPC_FSINGLE_SUB1, 0x1, 3, TREG_ZERO, 1,
335 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
336 },
337 { "icoh", TILEGX_OPC_ICOH, 0x2, 1, TREG_ZERO, 1,
338 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
339 },
340 { "ill", TILEGX_OPC_ILL, 0xa, 0, TREG_ZERO, 1,
341 { { 0, }, { }, { 0, }, { }, { 0, } },
342 },
343 { "inv", TILEGX_OPC_INV, 0x2, 1, TREG_ZERO, 1,
344 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
345 },
346 { "iret", TILEGX_OPC_IRET, 0x2, 0, TREG_ZERO, 1,
347 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
348 },
349 { "j", TILEGX_OPC_J, 0x2, 1, TREG_ZERO, 1,
350 { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
351 },
352 { "jal", TILEGX_OPC_JAL, 0x2, 1, TREG_LR, 1,
353 { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
354 },
355 { "jalr", TILEGX_OPC_JALR, 0xa, 1, TREG_LR, 1,
356 { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
357 },
358 { "jalrp", TILEGX_OPC_JALRP, 0xa, 1, TREG_LR, 1,
359 { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
360 },
361 { "jr", TILEGX_OPC_JR, 0xa, 1, TREG_ZERO, 1,
362 { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
363 },
364 { "jrp", TILEGX_OPC_JRP, 0xa, 1, TREG_ZERO, 1,
365 { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
366 },
367 { "ld", TILEGX_OPC_LD, 0x12, 2, TREG_ZERO, 1,
368 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
369 },
370 { "ld1s", TILEGX_OPC_LD1S, 0x12, 2, TREG_ZERO, 1,
371 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
372 },
373 { "ld1s_add", TILEGX_OPC_LD1S_ADD, 0x2, 3, TREG_ZERO, 1,
374 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
375 },
376 { "ld1u", TILEGX_OPC_LD1U, 0x12, 2, TREG_ZERO, 1,
377 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
378 },
379 { "ld1u_add", TILEGX_OPC_LD1U_ADD, 0x2, 3, TREG_ZERO, 1,
380 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
381 },
382 { "ld2s", TILEGX_OPC_LD2S, 0x12, 2, TREG_ZERO, 1,
383 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
384 },
385 { "ld2s_add", TILEGX_OPC_LD2S_ADD, 0x2, 3, TREG_ZERO, 1,
386 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
387 },
388 { "ld2u", TILEGX_OPC_LD2U, 0x12, 2, TREG_ZERO, 1,
389 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
390 },
391 { "ld2u_add", TILEGX_OPC_LD2U_ADD, 0x2, 3, TREG_ZERO, 1,
392 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
393 },
394 { "ld4s", TILEGX_OPC_LD4S, 0x12, 2, TREG_ZERO, 1,
395 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
396 },
397 { "ld4s_add", TILEGX_OPC_LD4S_ADD, 0x2, 3, TREG_ZERO, 1,
398 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
399 },
400 { "ld4u", TILEGX_OPC_LD4U, 0x12, 2, TREG_ZERO, 1,
401 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
402 },
403 { "ld4u_add", TILEGX_OPC_LD4U_ADD, 0x2, 3, TREG_ZERO, 1,
404 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
405 },
406 { "ld_add", TILEGX_OPC_LD_ADD, 0x2, 3, TREG_ZERO, 1,
407 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
408 },
409 { "ldna", TILEGX_OPC_LDNA, 0x2, 2, TREG_ZERO, 1,
410 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
411 },
412 { "ldna_add", TILEGX_OPC_LDNA_ADD, 0x2, 3, TREG_ZERO, 1,
413 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
414 },
415 { "ldnt", TILEGX_OPC_LDNT, 0x2, 2, TREG_ZERO, 1,
416 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
417 },
418 { "ldnt1s", TILEGX_OPC_LDNT1S, 0x2, 2, TREG_ZERO, 1,
419 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
420 },
421 { "ldnt1s_add", TILEGX_OPC_LDNT1S_ADD, 0x2, 3, TREG_ZERO, 1,
422 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
423 },
424 { "ldnt1u", TILEGX_OPC_LDNT1U, 0x2, 2, TREG_ZERO, 1,
425 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
426 },
427 { "ldnt1u_add", TILEGX_OPC_LDNT1U_ADD, 0x2, 3, TREG_ZERO, 1,
428 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
429 },
430 { "ldnt2s", TILEGX_OPC_LDNT2S, 0x2, 2, TREG_ZERO, 1,
431 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
432 },
433 { "ldnt2s_add", TILEGX_OPC_LDNT2S_ADD, 0x2, 3, TREG_ZERO, 1,
434 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
435 },
436 { "ldnt2u", TILEGX_OPC_LDNT2U, 0x2, 2, TREG_ZERO, 1,
437 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
438 },
439 { "ldnt2u_add", TILEGX_OPC_LDNT2U_ADD, 0x2, 3, TREG_ZERO, 1,
440 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
441 },
442 { "ldnt4s", TILEGX_OPC_LDNT4S, 0x2, 2, TREG_ZERO, 1,
443 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
444 },
445 { "ldnt4s_add", TILEGX_OPC_LDNT4S_ADD, 0x2, 3, TREG_ZERO, 1,
446 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
447 },
448 { "ldnt4u", TILEGX_OPC_LDNT4U, 0x2, 2, TREG_ZERO, 1,
449 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
450 },
451 { "ldnt4u_add", TILEGX_OPC_LDNT4U_ADD, 0x2, 3, TREG_ZERO, 1,
452 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
453 },
454 { "ldnt_add", TILEGX_OPC_LDNT_ADD, 0x2, 3, TREG_ZERO, 1,
455 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
456 },
457 { "lnk", TILEGX_OPC_LNK, 0xa, 1, TREG_ZERO, 1,
458 { { 0, }, { 8 }, { 0, }, { 12 }, { 0, } },
459 },
460 { "mf", TILEGX_OPC_MF, 0x2, 0, TREG_ZERO, 1,
461 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
462 },
463 { "mfspr", TILEGX_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1,
464 { { 0, }, { 8, 27 }, { 0, }, { 0, }, { 0, } },
465 },
466 { "mm", TILEGX_OPC_MM, 0x1, 4, TREG_ZERO, 1,
467 { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
468 },
469 { "mnz", TILEGX_OPC_MNZ, 0xf, 3, TREG_ZERO, 1,
470 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
471 },
472 { "mtspr", TILEGX_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1,
473 { { 0, }, { 28, 9 }, { 0, }, { 0, }, { 0, } },
474 },
475 { "mul_hs_hs", TILEGX_OPC_MUL_HS_HS, 0x5, 3, TREG_ZERO, 1,
476 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
477 },
478 { "mul_hs_hu", TILEGX_OPC_MUL_HS_HU, 0x1, 3, TREG_ZERO, 1,
479 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
480 },
481 { "mul_hs_ls", TILEGX_OPC_MUL_HS_LS, 0x1, 3, TREG_ZERO, 1,
482 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
483 },
484 { "mul_hs_lu", TILEGX_OPC_MUL_HS_LU, 0x1, 3, TREG_ZERO, 1,
485 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
486 },
487 { "mul_hu_hu", TILEGX_OPC_MUL_HU_HU, 0x5, 3, TREG_ZERO, 1,
488 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
489 },
490 { "mul_hu_ls", TILEGX_OPC_MUL_HU_LS, 0x1, 3, TREG_ZERO, 1,
491 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
492 },
493 { "mul_hu_lu", TILEGX_OPC_MUL_HU_LU, 0x1, 3, TREG_ZERO, 1,
494 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
495 },
496 { "mul_ls_ls", TILEGX_OPC_MUL_LS_LS, 0x5, 3, TREG_ZERO, 1,
497 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
498 },
499 { "mul_ls_lu", TILEGX_OPC_MUL_LS_LU, 0x1, 3, TREG_ZERO, 1,
500 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
501 },
502 { "mul_lu_lu", TILEGX_OPC_MUL_LU_LU, 0x5, 3, TREG_ZERO, 1,
503 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
504 },
505 { "mula_hs_hs", TILEGX_OPC_MULA_HS_HS, 0x5, 3, TREG_ZERO, 1,
506 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
507 },
508 { "mula_hs_hu", TILEGX_OPC_MULA_HS_HU, 0x1, 3, TREG_ZERO, 1,
509 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
510 },
511 { "mula_hs_ls", TILEGX_OPC_MULA_HS_LS, 0x1, 3, TREG_ZERO, 1,
512 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
513 },
514 { "mula_hs_lu", TILEGX_OPC_MULA_HS_LU, 0x1, 3, TREG_ZERO, 1,
515 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
516 },
517 { "mula_hu_hu", TILEGX_OPC_MULA_HU_HU, 0x5, 3, TREG_ZERO, 1,
518 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
519 },
520 { "mula_hu_ls", TILEGX_OPC_MULA_HU_LS, 0x1, 3, TREG_ZERO, 1,
521 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
522 },
523 { "mula_hu_lu", TILEGX_OPC_MULA_HU_LU, 0x1, 3, TREG_ZERO, 1,
524 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
525 },
526 { "mula_ls_ls", TILEGX_OPC_MULA_LS_LS, 0x5, 3, TREG_ZERO, 1,
527 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
528 },
529 { "mula_ls_lu", TILEGX_OPC_MULA_LS_LU, 0x1, 3, TREG_ZERO, 1,
530 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
531 },
532 { "mula_lu_lu", TILEGX_OPC_MULA_LU_LU, 0x5, 3, TREG_ZERO, 1,
533 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
534 },
535 { "mulax", TILEGX_OPC_MULAX, 0x5, 3, TREG_ZERO, 1,
536 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
537 },
538 { "mulx", TILEGX_OPC_MULX, 0x5, 3, TREG_ZERO, 1,
539 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
540 },
541 { "mz", TILEGX_OPC_MZ, 0xf, 3, TREG_ZERO, 1,
542 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
543 },
544 { "nap", TILEGX_OPC_NAP, 0x2, 0, TREG_ZERO, 0,
545 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
546 },
547 { "nop", TILEGX_OPC_NOP, 0xf, 0, TREG_ZERO, 1,
548 { { }, { }, { }, { }, { 0, } },
549 },
550 { "nor", TILEGX_OPC_NOR, 0xf, 3, TREG_ZERO, 1,
551 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
552 },
553 { "or", TILEGX_OPC_OR, 0xf, 3, TREG_ZERO, 1,
554 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
555 },
556 { "ori", TILEGX_OPC_ORI, 0x3, 3, TREG_ZERO, 1,
557 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
558 },
559 { "pcnt", TILEGX_OPC_PCNT, 0x5, 2, TREG_ZERO, 1,
560 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
561 },
562 { "revbits", TILEGX_OPC_REVBITS, 0x5, 2, TREG_ZERO, 1,
563 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
564 },
565 { "revbytes", TILEGX_OPC_REVBYTES, 0x5, 2, TREG_ZERO, 1,
566 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
567 },
568 { "rotl", TILEGX_OPC_ROTL, 0xf, 3, TREG_ZERO, 1,
569 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
570 },
571 { "rotli", TILEGX_OPC_ROTLI, 0xf, 3, TREG_ZERO, 1,
572 { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
573 },
574 { "shl", TILEGX_OPC_SHL, 0xf, 3, TREG_ZERO, 1,
575 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
576 },
577 { "shl16insli", TILEGX_OPC_SHL16INSLI, 0x3, 3, TREG_ZERO, 1,
578 { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
579 },
580 { "shl1add", TILEGX_OPC_SHL1ADD, 0xf, 3, TREG_ZERO, 1,
581 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
582 },
583 { "shl1addx", TILEGX_OPC_SHL1ADDX, 0xf, 3, TREG_ZERO, 1,
584 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
585 },
586 { "shl2add", TILEGX_OPC_SHL2ADD, 0xf, 3, TREG_ZERO, 1,
587 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
588 },
589 { "shl2addx", TILEGX_OPC_SHL2ADDX, 0xf, 3, TREG_ZERO, 1,
590 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
591 },
592 { "shl3add", TILEGX_OPC_SHL3ADD, 0xf, 3, TREG_ZERO, 1,
593 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
594 },
595 { "shl3addx", TILEGX_OPC_SHL3ADDX, 0xf, 3, TREG_ZERO, 1,
596 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
597 },
598 { "shli", TILEGX_OPC_SHLI, 0xf, 3, TREG_ZERO, 1,
599 { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
600 },
601 { "shlx", TILEGX_OPC_SHLX, 0x3, 3, TREG_ZERO, 1,
602 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
603 },
604 { "shlxi", TILEGX_OPC_SHLXI, 0x3, 3, TREG_ZERO, 1,
605 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
606 },
607 { "shrs", TILEGX_OPC_SHRS, 0xf, 3, TREG_ZERO, 1,
608 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
609 },
610 { "shrsi", TILEGX_OPC_SHRSI, 0xf, 3, TREG_ZERO, 1,
611 { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
612 },
613 { "shru", TILEGX_OPC_SHRU, 0xf, 3, TREG_ZERO, 1,
614 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
615 },
616 { "shrui", TILEGX_OPC_SHRUI, 0xf, 3, TREG_ZERO, 1,
617 { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
618 },
619 { "shrux", TILEGX_OPC_SHRUX, 0x3, 3, TREG_ZERO, 1,
620 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
621 },
622 { "shruxi", TILEGX_OPC_SHRUXI, 0x3, 3, TREG_ZERO, 1,
623 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
624 },
625 { "shufflebytes", TILEGX_OPC_SHUFFLEBYTES, 0x1, 3, TREG_ZERO, 1,
626 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
627 },
628 { "st", TILEGX_OPC_ST, 0x12, 2, TREG_ZERO, 1,
629 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
630 },
631 { "st1", TILEGX_OPC_ST1, 0x12, 2, TREG_ZERO, 1,
632 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
633 },
634 { "st1_add", TILEGX_OPC_ST1_ADD, 0x2, 3, TREG_ZERO, 1,
635 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
636 },
637 { "st2", TILEGX_OPC_ST2, 0x12, 2, TREG_ZERO, 1,
638 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
639 },
640 { "st2_add", TILEGX_OPC_ST2_ADD, 0x2, 3, TREG_ZERO, 1,
641 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
642 },
643 { "st4", TILEGX_OPC_ST4, 0x12, 2, TREG_ZERO, 1,
644 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
645 },
646 { "st4_add", TILEGX_OPC_ST4_ADD, 0x2, 3, TREG_ZERO, 1,
647 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
648 },
649 { "st_add", TILEGX_OPC_ST_ADD, 0x2, 3, TREG_ZERO, 1,
650 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
651 },
652 { "stnt", TILEGX_OPC_STNT, 0x2, 2, TREG_ZERO, 1,
653 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
654 },
655 { "stnt1", TILEGX_OPC_STNT1, 0x2, 2, TREG_ZERO, 1,
656 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
657 },
658 { "stnt1_add", TILEGX_OPC_STNT1_ADD, 0x2, 3, TREG_ZERO, 1,
659 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
660 },
661 { "stnt2", TILEGX_OPC_STNT2, 0x2, 2, TREG_ZERO, 1,
662 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
663 },
664 { "stnt2_add", TILEGX_OPC_STNT2_ADD, 0x2, 3, TREG_ZERO, 1,
665 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
666 },
667 { "stnt4", TILEGX_OPC_STNT4, 0x2, 2, TREG_ZERO, 1,
668 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
669 },
670 { "stnt4_add", TILEGX_OPC_STNT4_ADD, 0x2, 3, TREG_ZERO, 1,
671 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
672 },
673 { "stnt_add", TILEGX_OPC_STNT_ADD, 0x2, 3, TREG_ZERO, 1,
674 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
675 },
676 { "sub", TILEGX_OPC_SUB, 0xf, 3, TREG_ZERO, 1,
677 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
678 },
679 { "subx", TILEGX_OPC_SUBX, 0xf, 3, TREG_ZERO, 1,
680 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
681 },
682 { "subxsc", TILEGX_OPC_SUBXSC, 0x3, 3, TREG_ZERO, 1,
683 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
684 },
685 { "swint0", TILEGX_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0,
686 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
687 },
688 { "swint1", TILEGX_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0,
689 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
690 },
691 { "swint2", TILEGX_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0,
692 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
693 },
694 { "swint3", TILEGX_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0,
695 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
696 },
697 { "tblidxb0", TILEGX_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1,
698 { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
699 },
700 { "tblidxb1", TILEGX_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1,
701 { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
702 },
703 { "tblidxb2", TILEGX_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1,
704 { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
705 },
706 { "tblidxb3", TILEGX_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1,
707 { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
708 },
709 { "v1add", TILEGX_OPC_V1ADD, 0x3, 3, TREG_ZERO, 1,
710 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
711 },
712 { "v1addi", TILEGX_OPC_V1ADDI, 0x3, 3, TREG_ZERO, 1,
713 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
714 },
715 { "v1adduc", TILEGX_OPC_V1ADDUC, 0x3, 3, TREG_ZERO, 1,
716 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
717 },
718 { "v1adiffu", TILEGX_OPC_V1ADIFFU, 0x1, 3, TREG_ZERO, 1,
719 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
720 },
721 { "v1avgu", TILEGX_OPC_V1AVGU, 0x1, 3, TREG_ZERO, 1,
722 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
723 },
724 { "v1cmpeq", TILEGX_OPC_V1CMPEQ, 0x3, 3, TREG_ZERO, 1,
725 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
726 },
727 { "v1cmpeqi", TILEGX_OPC_V1CMPEQI, 0x3, 3, TREG_ZERO, 1,
728 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
729 },
730 { "v1cmples", TILEGX_OPC_V1CMPLES, 0x3, 3, TREG_ZERO, 1,
731 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
732 },
733 { "v1cmpleu", TILEGX_OPC_V1CMPLEU, 0x3, 3, TREG_ZERO, 1,
734 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
735 },
736 { "v1cmplts", TILEGX_OPC_V1CMPLTS, 0x3, 3, TREG_ZERO, 1,
737 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
738 },
739 { "v1cmpltsi", TILEGX_OPC_V1CMPLTSI, 0x3, 3, TREG_ZERO, 1,
740 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
741 },
742 { "v1cmpltu", TILEGX_OPC_V1CMPLTU, 0x3, 3, TREG_ZERO, 1,
743 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
744 },
745 { "v1cmpltui", TILEGX_OPC_V1CMPLTUI, 0x3, 3, TREG_ZERO, 1,
746 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
747 },
748 { "v1cmpne", TILEGX_OPC_V1CMPNE, 0x3, 3, TREG_ZERO, 1,
749 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
750 },
751 { "v1ddotpu", TILEGX_OPC_V1DDOTPU, 0x1, 3, TREG_ZERO, 1,
752 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
753 },
754 { "v1ddotpua", TILEGX_OPC_V1DDOTPUA, 0x1, 3, TREG_ZERO, 1,
755 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
756 },
757 { "v1ddotpus", TILEGX_OPC_V1DDOTPUS, 0x1, 3, TREG_ZERO, 1,
758 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
759 },
760 { "v1ddotpusa", TILEGX_OPC_V1DDOTPUSA, 0x1, 3, TREG_ZERO, 1,
761 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
762 },
763 { "v1dotp", TILEGX_OPC_V1DOTP, 0x1, 3, TREG_ZERO, 1,
764 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
765 },
766 { "v1dotpa", TILEGX_OPC_V1DOTPA, 0x1, 3, TREG_ZERO, 1,
767 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
768 },
769 { "v1dotpu", TILEGX_OPC_V1DOTPU, 0x1, 3, TREG_ZERO, 1,
770 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
771 },
772 { "v1dotpua", TILEGX_OPC_V1DOTPUA, 0x1, 3, TREG_ZERO, 1,
773 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
774 },
775 { "v1dotpus", TILEGX_OPC_V1DOTPUS, 0x1, 3, TREG_ZERO, 1,
776 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
777 },
778 { "v1dotpusa", TILEGX_OPC_V1DOTPUSA, 0x1, 3, TREG_ZERO, 1,
779 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
780 },
781 { "v1int_h", TILEGX_OPC_V1INT_H, 0x3, 3, TREG_ZERO, 1,
782 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
783 },
784 { "v1int_l", TILEGX_OPC_V1INT_L, 0x3, 3, TREG_ZERO, 1,
785 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
786 },
787 { "v1maxu", TILEGX_OPC_V1MAXU, 0x3, 3, TREG_ZERO, 1,
788 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
789 },
790 { "v1maxui", TILEGX_OPC_V1MAXUI, 0x3, 3, TREG_ZERO, 1,
791 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
792 },
793 { "v1minu", TILEGX_OPC_V1MINU, 0x3, 3, TREG_ZERO, 1,
794 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
795 },
796 { "v1minui", TILEGX_OPC_V1MINUI, 0x3, 3, TREG_ZERO, 1,
797 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
798 },
799 { "v1mnz", TILEGX_OPC_V1MNZ, 0x3, 3, TREG_ZERO, 1,
800 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
801 },
802 { "v1multu", TILEGX_OPC_V1MULTU, 0x1, 3, TREG_ZERO, 1,
803 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
804 },
805 { "v1mulu", TILEGX_OPC_V1MULU, 0x1, 3, TREG_ZERO, 1,
806 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
807 },
808 { "v1mulus", TILEGX_OPC_V1MULUS, 0x1, 3, TREG_ZERO, 1,
809 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
810 },
811 { "v1mz", TILEGX_OPC_V1MZ, 0x3, 3, TREG_ZERO, 1,
812 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
813 },
814 { "v1sadau", TILEGX_OPC_V1SADAU, 0x1, 3, TREG_ZERO, 1,
815 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
816 },
817 { "v1sadu", TILEGX_OPC_V1SADU, 0x1, 3, TREG_ZERO, 1,
818 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
819 },
820 { "v1shl", TILEGX_OPC_V1SHL, 0x3, 3, TREG_ZERO, 1,
821 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
822 },
823 { "v1shli", TILEGX_OPC_V1SHLI, 0x3, 3, TREG_ZERO, 1,
824 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
825 },
826 { "v1shrs", TILEGX_OPC_V1SHRS, 0x3, 3, TREG_ZERO, 1,
827 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
828 },
829 { "v1shrsi", TILEGX_OPC_V1SHRSI, 0x3, 3, TREG_ZERO, 1,
830 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
831 },
832 { "v1shru", TILEGX_OPC_V1SHRU, 0x3, 3, TREG_ZERO, 1,
833 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
834 },
835 { "v1shrui", TILEGX_OPC_V1SHRUI, 0x3, 3, TREG_ZERO, 1,
836 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
837 },
838 { "v1sub", TILEGX_OPC_V1SUB, 0x3, 3, TREG_ZERO, 1,
839 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
840 },
841 { "v1subuc", TILEGX_OPC_V1SUBUC, 0x3, 3, TREG_ZERO, 1,
842 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
843 },
844 { "v2add", TILEGX_OPC_V2ADD, 0x3, 3, TREG_ZERO, 1,
845 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
846 },
847 { "v2addi", TILEGX_OPC_V2ADDI, 0x3, 3, TREG_ZERO, 1,
848 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
849 },
850 { "v2addsc", TILEGX_OPC_V2ADDSC, 0x3, 3, TREG_ZERO, 1,
851 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
852 },
853 { "v2adiffs", TILEGX_OPC_V2ADIFFS, 0x1, 3, TREG_ZERO, 1,
854 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
855 },
856 { "v2avgs", TILEGX_OPC_V2AVGS, 0x1, 3, TREG_ZERO, 1,
857 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
858 },
859 { "v2cmpeq", TILEGX_OPC_V2CMPEQ, 0x3, 3, TREG_ZERO, 1,
860 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
861 },
862 { "v2cmpeqi", TILEGX_OPC_V2CMPEQI, 0x3, 3, TREG_ZERO, 1,
863 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
864 },
865 { "v2cmples", TILEGX_OPC_V2CMPLES, 0x3, 3, TREG_ZERO, 1,
866 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
867 },
868 { "v2cmpleu", TILEGX_OPC_V2CMPLEU, 0x3, 3, TREG_ZERO, 1,
869 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
870 },
871 { "v2cmplts", TILEGX_OPC_V2CMPLTS, 0x3, 3, TREG_ZERO, 1,
872 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
873 },
874 { "v2cmpltsi", TILEGX_OPC_V2CMPLTSI, 0x3, 3, TREG_ZERO, 1,
875 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
876 },
877 { "v2cmpltu", TILEGX_OPC_V2CMPLTU, 0x3, 3, TREG_ZERO, 1,
878 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
879 },
880 { "v2cmpltui", TILEGX_OPC_V2CMPLTUI, 0x3, 3, TREG_ZERO, 1,
881 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
882 },
883 { "v2cmpne", TILEGX_OPC_V2CMPNE, 0x3, 3, TREG_ZERO, 1,
884 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
885 },
886 { "v2dotp", TILEGX_OPC_V2DOTP, 0x1, 3, TREG_ZERO, 1,
887 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
888 },
889 { "v2dotpa", TILEGX_OPC_V2DOTPA, 0x1, 3, TREG_ZERO, 1,
890 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
891 },
892 { "v2int_h", TILEGX_OPC_V2INT_H, 0x3, 3, TREG_ZERO, 1,
893 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
894 },
895 { "v2int_l", TILEGX_OPC_V2INT_L, 0x3, 3, TREG_ZERO, 1,
896 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
897 },
898 { "v2maxs", TILEGX_OPC_V2MAXS, 0x3, 3, TREG_ZERO, 1,
899 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
900 },
901 { "v2maxsi", TILEGX_OPC_V2MAXSI, 0x3, 3, TREG_ZERO, 1,
902 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
903 },
904 { "v2mins", TILEGX_OPC_V2MINS, 0x3, 3, TREG_ZERO, 1,
905 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
906 },
907 { "v2minsi", TILEGX_OPC_V2MINSI, 0x3, 3, TREG_ZERO, 1,
908 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
909 },
910 { "v2mnz", TILEGX_OPC_V2MNZ, 0x3, 3, TREG_ZERO, 1,
911 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
912 },
913 { "v2mulfsc", TILEGX_OPC_V2MULFSC, 0x1, 3, TREG_ZERO, 1,
914 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
915 },
916 { "v2muls", TILEGX_OPC_V2MULS, 0x1, 3, TREG_ZERO, 1,
917 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
918 },
919 { "v2mults", TILEGX_OPC_V2MULTS, 0x1, 3, TREG_ZERO, 1,
920 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
921 },
922 { "v2mz", TILEGX_OPC_V2MZ, 0x3, 3, TREG_ZERO, 1,
923 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
924 },
925 { "v2packh", TILEGX_OPC_V2PACKH, 0x3, 3, TREG_ZERO, 1,
926 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
927 },
928 { "v2packl", TILEGX_OPC_V2PACKL, 0x3, 3, TREG_ZERO, 1,
929 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
930 },
931 { "v2packuc", TILEGX_OPC_V2PACKUC, 0x3, 3, TREG_ZERO, 1,
932 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
933 },
934 { "v2sadas", TILEGX_OPC_V2SADAS, 0x1, 3, TREG_ZERO, 1,
935 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
936 },
937 { "v2sadau", TILEGX_OPC_V2SADAU, 0x1, 3, TREG_ZERO, 1,
938 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
939 },
940 { "v2sads", TILEGX_OPC_V2SADS, 0x1, 3, TREG_ZERO, 1,
941 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
942 },
943 { "v2sadu", TILEGX_OPC_V2SADU, 0x1, 3, TREG_ZERO, 1,
944 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
945 },
946 { "v2shl", TILEGX_OPC_V2SHL, 0x3, 3, TREG_ZERO, 1,
947 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
948 },
949 { "v2shli", TILEGX_OPC_V2SHLI, 0x3, 3, TREG_ZERO, 1,
950 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
951 },
952 { "v2shlsc", TILEGX_OPC_V2SHLSC, 0x3, 3, TREG_ZERO, 1,
953 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
954 },
955 { "v2shrs", TILEGX_OPC_V2SHRS, 0x3, 3, TREG_ZERO, 1,
956 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
957 },
958 { "v2shrsi", TILEGX_OPC_V2SHRSI, 0x3, 3, TREG_ZERO, 1,
959 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
960 },
961 { "v2shru", TILEGX_OPC_V2SHRU, 0x3, 3, TREG_ZERO, 1,
962 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
963 },
964 { "v2shrui", TILEGX_OPC_V2SHRUI, 0x3, 3, TREG_ZERO, 1,
965 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
966 },
967 { "v2sub", TILEGX_OPC_V2SUB, 0x3, 3, TREG_ZERO, 1,
968 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
969 },
970 { "v2subsc", TILEGX_OPC_V2SUBSC, 0x3, 3, TREG_ZERO, 1,
971 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
972 },
973 { "v4add", TILEGX_OPC_V4ADD, 0x3, 3, TREG_ZERO, 1,
974 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
975 },
976 { "v4addsc", TILEGX_OPC_V4ADDSC, 0x3, 3, TREG_ZERO, 1,
977 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
978 },
979 { "v4int_h", TILEGX_OPC_V4INT_H, 0x3, 3, TREG_ZERO, 1,
980 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
981 },
982 { "v4int_l", TILEGX_OPC_V4INT_L, 0x3, 3, TREG_ZERO, 1,
983 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
984 },
985 { "v4packsc", TILEGX_OPC_V4PACKSC, 0x3, 3, TREG_ZERO, 1,
986 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
987 },
988 { "v4shl", TILEGX_OPC_V4SHL, 0x3, 3, TREG_ZERO, 1,
989 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
990 },
991 { "v4shlsc", TILEGX_OPC_V4SHLSC, 0x3, 3, TREG_ZERO, 1,
992 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
993 },
994 { "v4shrs", TILEGX_OPC_V4SHRS, 0x3, 3, TREG_ZERO, 1,
995 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
996 },
997 { "v4shru", TILEGX_OPC_V4SHRU, 0x3, 3, TREG_ZERO, 1,
998 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
999 },
1000 { "v4sub", TILEGX_OPC_V4SUB, 0x3, 3, TREG_ZERO, 1,
1001 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
1002 },
1003 { "v4subsc", TILEGX_OPC_V4SUBSC, 0x3, 3, TREG_ZERO, 1,
1004 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
1005 },
1006 { "wh64", TILEGX_OPC_WH64, 0x2, 1, TREG_ZERO, 1,
1007 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
1008 },
1009 { "xor", TILEGX_OPC_XOR, 0xf, 3, TREG_ZERO, 1,
1010 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
1011 },
1012 { "xori", TILEGX_OPC_XORI, 0x3, 3, TREG_ZERO, 1,
1013 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
1014 },
1015 { NULL, TILEGX_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } },
1016 }
1017};
1018#define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6))
1019#define CHILD(array_index) (TILEGX_OPC_NONE + (array_index))
1020
1021static const unsigned short decode_X0_fsm[936] =
1022{
1023 BITFIELD(22, 9) /* index 0 */,
1024 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1025 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1026 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1027 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1028 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1029 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1030 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1031 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1032 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1033 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1034 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1035 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1036 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1037 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1038 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1039 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1040 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1041 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1042 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1043 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1044 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1045 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1046 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1047 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1048 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1049 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1050 CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
1051 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1052 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1053 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1054 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1055 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1056 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1057 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1058 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1059 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1060 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1061 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1062 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1063 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1064 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1065 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1066 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
1067 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1068 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1069 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1070 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BFEXTS,
1071 TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTU,
1072 TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFINS,
1073 TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_MM,
1074 TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_NONE,
1075 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1076 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1077 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1078 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1079 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1080 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1081 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1082 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(528), CHILD(578),
1083 CHILD(583), CHILD(588), CHILD(593), CHILD(598), TILEGX_OPC_NONE,
1084 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1085 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1086 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1087 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1088 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1089 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1090 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1091 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1092 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1093 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1094 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1095 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1096 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1097 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1098 TILEGX_OPC_NONE, CHILD(603), CHILD(620), CHILD(637), CHILD(654), CHILD(671),
1099 CHILD(703), CHILD(797), CHILD(814), CHILD(831), CHILD(848), CHILD(865),
1100 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1101 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1102 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1103 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1104 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1105 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1106 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1107 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1108 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1109 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1110 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1111 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1112 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1113 TILEGX_OPC_NONE, CHILD(889), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1114 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1115 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1116 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1117 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1118 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1119 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1120 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1121 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1122 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1123 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1124 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1125 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1126 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1127 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1128 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1129 TILEGX_OPC_NONE, CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1130 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1131 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1132 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1133 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1134 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1135 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1136 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1137 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1138 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1139 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1140 BITFIELD(6, 2) /* index 513 */,
1141 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
1142 BITFIELD(8, 2) /* index 518 */,
1143 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
1144 BITFIELD(10, 2) /* index 523 */,
1145 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
1146 BITFIELD(20, 2) /* index 528 */,
1147 TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
1148 BITFIELD(6, 2) /* index 533 */,
1149 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
1150 BITFIELD(8, 2) /* index 538 */,
1151 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
1152 BITFIELD(10, 2) /* index 543 */,
1153 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
1154 BITFIELD(0, 2) /* index 548 */,
1155 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
1156 BITFIELD(2, 2) /* index 553 */,
1157 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
1158 BITFIELD(4, 2) /* index 558 */,
1159 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
1160 BITFIELD(6, 2) /* index 563 */,
1161 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
1162 BITFIELD(8, 2) /* index 568 */,
1163 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
1164 BITFIELD(10, 2) /* index 573 */,
1165 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
1166 BITFIELD(20, 2) /* index 578 */,
1167 TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, TILEGX_OPC_ORI,
1168 BITFIELD(20, 2) /* index 583 */,
1169 TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI, TILEGX_OPC_V1CMPLTSI,
1170 TILEGX_OPC_V1CMPLTUI,
1171 BITFIELD(20, 2) /* index 588 */,
1172 TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI, TILEGX_OPC_V2ADDI,
1173 TILEGX_OPC_V2CMPEQI,
1174 BITFIELD(20, 2) /* index 593 */,
1175 TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI, TILEGX_OPC_V2MAXSI,
1176 TILEGX_OPC_V2MINSI,
1177 BITFIELD(20, 2) /* index 598 */,
1178 TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1179 BITFIELD(18, 4) /* index 603 */,
1180 TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
1181 TILEGX_OPC_AND, TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_CMPEQ,
1182 TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
1183 TILEGX_OPC_CMPNE, TILEGX_OPC_CMULAF, TILEGX_OPC_CMULA, TILEGX_OPC_CMULFR,
1184 BITFIELD(18, 4) /* index 620 */,
1185 TILEGX_OPC_CMULF, TILEGX_OPC_CMULHR, TILEGX_OPC_CMULH, TILEGX_OPC_CMUL,
1186 TILEGX_OPC_CRC32_32, TILEGX_OPC_CRC32_8, TILEGX_OPC_DBLALIGN2,
1187 TILEGX_OPC_DBLALIGN4, TILEGX_OPC_DBLALIGN6, TILEGX_OPC_DBLALIGN,
1188 TILEGX_OPC_FDOUBLE_ADDSUB, TILEGX_OPC_FDOUBLE_ADD_FLAGS,
1189 TILEGX_OPC_FDOUBLE_MUL_FLAGS, TILEGX_OPC_FDOUBLE_PACK1,
1190 TILEGX_OPC_FDOUBLE_PACK2, TILEGX_OPC_FDOUBLE_SUB_FLAGS,
1191 BITFIELD(18, 4) /* index 637 */,
1192 TILEGX_OPC_FDOUBLE_UNPACK_MAX, TILEGX_OPC_FDOUBLE_UNPACK_MIN,
1193 TILEGX_OPC_FSINGLE_ADD1, TILEGX_OPC_FSINGLE_ADDSUB2,
1194 TILEGX_OPC_FSINGLE_MUL1, TILEGX_OPC_FSINGLE_MUL2, TILEGX_OPC_FSINGLE_PACK2,
1195 TILEGX_OPC_FSINGLE_SUB1, TILEGX_OPC_MNZ, TILEGX_OPC_MULAX,
1196 TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HS_HU, TILEGX_OPC_MULA_HS_LS,
1197 TILEGX_OPC_MULA_HS_LU, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_HU_LS,
1198 BITFIELD(18, 4) /* index 654 */,
1199 TILEGX_OPC_MULA_HU_LU, TILEGX_OPC_MULA_LS_LS, TILEGX_OPC_MULA_LS_LU,
1200 TILEGX_OPC_MULA_LU_LU, TILEGX_OPC_MULX, TILEGX_OPC_MUL_HS_HS,
1201 TILEGX_OPC_MUL_HS_HU, TILEGX_OPC_MUL_HS_LS, TILEGX_OPC_MUL_HS_LU,
1202 TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_HU_LS, TILEGX_OPC_MUL_HU_LU,
1203 TILEGX_OPC_MUL_LS_LS, TILEGX_OPC_MUL_LS_LU, TILEGX_OPC_MUL_LU_LU,
1204 TILEGX_OPC_MZ,
1205 BITFIELD(18, 4) /* index 671 */,
1206 TILEGX_OPC_NOR, CHILD(688), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
1207 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
1208 TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
1209 TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_SHUFFLEBYTES,
1210 TILEGX_OPC_SUBXSC,
1211 BITFIELD(12, 2) /* index 688 */,
1212 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(693),
1213 BITFIELD(14, 2) /* index 693 */,
1214 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(698),
1215 BITFIELD(16, 2) /* index 698 */,
1216 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
1217 BITFIELD(18, 4) /* index 703 */,
1218 TILEGX_OPC_SUBX, TILEGX_OPC_SUB, CHILD(720), TILEGX_OPC_V1ADDUC,
1219 TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADIFFU, TILEGX_OPC_V1AVGU,
1220 TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
1221 TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
1222 TILEGX_OPC_V1DDOTPUSA, TILEGX_OPC_V1DDOTPUS, TILEGX_OPC_V1DOTPA,
1223 BITFIELD(12, 4) /* index 720 */,
1224 TILEGX_OPC_NONE, CHILD(737), CHILD(742), CHILD(747), CHILD(752), CHILD(757),
1225 CHILD(762), CHILD(767), CHILD(772), CHILD(777), CHILD(782), CHILD(787),
1226 CHILD(792), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1227 BITFIELD(16, 2) /* index 737 */,
1228 TILEGX_OPC_CLZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1229 BITFIELD(16, 2) /* index 742 */,
1230 TILEGX_OPC_CTZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1231 BITFIELD(16, 2) /* index 747 */,
1232 TILEGX_OPC_FNOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1233 BITFIELD(16, 2) /* index 752 */,
1234 TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1235 BITFIELD(16, 2) /* index 757 */,
1236 TILEGX_OPC_NOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1237 BITFIELD(16, 2) /* index 762 */,
1238 TILEGX_OPC_PCNT, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1239 BITFIELD(16, 2) /* index 767 */,
1240 TILEGX_OPC_REVBITS, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1241 BITFIELD(16, 2) /* index 772 */,
1242 TILEGX_OPC_REVBYTES, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1243 BITFIELD(16, 2) /* index 777 */,
1244 TILEGX_OPC_TBLIDXB0, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1245 BITFIELD(16, 2) /* index 782 */,
1246 TILEGX_OPC_TBLIDXB1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1247 BITFIELD(16, 2) /* index 787 */,
1248 TILEGX_OPC_TBLIDXB2, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1249 BITFIELD(16, 2) /* index 792 */,
1250 TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1251 BITFIELD(18, 4) /* index 797 */,
1252 TILEGX_OPC_V1DOTPUSA, TILEGX_OPC_V1DOTPUS, TILEGX_OPC_V1DOTP,
1253 TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1MAXU,
1254 TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MULTU, TILEGX_OPC_V1MULUS,
1255 TILEGX_OPC_V1MULU, TILEGX_OPC_V1MZ, TILEGX_OPC_V1SADAU, TILEGX_OPC_V1SADU,
1256 TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS,
1257 BITFIELD(18, 4) /* index 814 */,
1258 TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC, TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC,
1259 TILEGX_OPC_V2ADD, TILEGX_OPC_V2ADIFFS, TILEGX_OPC_V2AVGS,
1260 TILEGX_OPC_V2CMPEQ, TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU,
1261 TILEGX_OPC_V2CMPLTS, TILEGX_OPC_V2CMPLTU, TILEGX_OPC_V2CMPNE,
1262 TILEGX_OPC_V2DOTPA, TILEGX_OPC_V2DOTP, TILEGX_OPC_V2INT_H,
1263 BITFIELD(18, 4) /* index 831 */,
1264 TILEGX_OPC_V2INT_L, TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ,
1265 TILEGX_OPC_V2MULFSC, TILEGX_OPC_V2MULS, TILEGX_OPC_V2MULTS, TILEGX_OPC_V2MZ,
1266 TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
1267 TILEGX_OPC_V2SADAS, TILEGX_OPC_V2SADAU, TILEGX_OPC_V2SADS,
1268 TILEGX_OPC_V2SADU, TILEGX_OPC_V2SHLSC,
1269 BITFIELD(18, 4) /* index 848 */,
1270 TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU, TILEGX_OPC_V2SUBSC,
1271 TILEGX_OPC_V2SUB, TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
1272 TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
1273 TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
1274 TILEGX_OPC_V4SUB,
1275 BITFIELD(18, 3) /* index 865 */,
1276 CHILD(874), CHILD(877), CHILD(880), CHILD(883), CHILD(886), TILEGX_OPC_NONE,
1277 TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1278 BITFIELD(21, 1) /* index 874 */,
1279 TILEGX_OPC_XOR, TILEGX_OPC_NONE,
1280 BITFIELD(21, 1) /* index 877 */,
1281 TILEGX_OPC_V1DDOTPUA, TILEGX_OPC_NONE,
1282 BITFIELD(21, 1) /* index 880 */,
1283 TILEGX_OPC_V1DDOTPU, TILEGX_OPC_NONE,
1284 BITFIELD(21, 1) /* index 883 */,
1285 TILEGX_OPC_V1DOTPUA, TILEGX_OPC_NONE,
1286 BITFIELD(21, 1) /* index 886 */,
1287 TILEGX_OPC_V1DOTPU, TILEGX_OPC_NONE,
1288 BITFIELD(18, 4) /* index 889 */,
1289 TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
1290 TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
1291 TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
1292 TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1293 TILEGX_OPC_NONE,
1294 BITFIELD(0, 2) /* index 906 */,
1295 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1296 CHILD(911),
1297 BITFIELD(2, 2) /* index 911 */,
1298 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1299 CHILD(916),
1300 BITFIELD(4, 2) /* index 916 */,
1301 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1302 CHILD(921),
1303 BITFIELD(6, 2) /* index 921 */,
1304 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1305 CHILD(926),
1306 BITFIELD(8, 2) /* index 926 */,
1307 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1308 CHILD(931),
1309 BITFIELD(10, 2) /* index 931 */,
1310 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1311 TILEGX_OPC_INFOL,
1312};
1313
1314static const unsigned short decode_X1_fsm[1206] =
1315{
1316 BITFIELD(53, 9) /* index 0 */,
1317 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1318 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1319 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1320 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1321 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1322 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1323 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1324 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1325 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1326 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1327 CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
1328 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1329 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1330 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1331 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1332 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1333 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1334 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1335 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1336 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1337 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1338 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1339 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1340 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1341 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1342 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1343 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
1344 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1345 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1346 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1347 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1348 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1349 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1350 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1351 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BEQZT,
1352 TILEGX_OPC_BEQZT, TILEGX_OPC_BEQZ, TILEGX_OPC_BEQZ, TILEGX_OPC_BGEZT,
1353 TILEGX_OPC_BGEZT, TILEGX_OPC_BGEZ, TILEGX_OPC_BGEZ, TILEGX_OPC_BGTZT,
1354 TILEGX_OPC_BGTZT, TILEGX_OPC_BGTZ, TILEGX_OPC_BGTZ, TILEGX_OPC_BLBCT,
1355 TILEGX_OPC_BLBCT, TILEGX_OPC_BLBC, TILEGX_OPC_BLBC, TILEGX_OPC_BLBST,
1356 TILEGX_OPC_BLBST, TILEGX_OPC_BLBS, TILEGX_OPC_BLBS, TILEGX_OPC_BLEZT,
1357 TILEGX_OPC_BLEZT, TILEGX_OPC_BLEZ, TILEGX_OPC_BLEZ, TILEGX_OPC_BLTZT,
1358 TILEGX_OPC_BLTZT, TILEGX_OPC_BLTZ, TILEGX_OPC_BLTZ, TILEGX_OPC_BNEZT,
1359 TILEGX_OPC_BNEZT, TILEGX_OPC_BNEZ, TILEGX_OPC_BNEZ, CHILD(528), CHILD(578),
1360 CHILD(598), CHILD(663), CHILD(683), CHILD(688), CHILD(693), CHILD(698),
1361 CHILD(703), CHILD(708), CHILD(713), CHILD(718), TILEGX_OPC_NONE,
1362 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1363 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1364 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1365 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1366 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1367 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1368 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1369 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1370 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1371 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1372 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1373 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1374 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_JAL,
1375 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1376 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1377 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1378 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1379 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1380 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1381 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1382 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_J, TILEGX_OPC_J,
1383 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1384 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1385 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1386 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1387 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1388 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1389 CHILD(723), CHILD(740), CHILD(772), CHILD(789), CHILD(1108), CHILD(1125),
1390 CHILD(1142), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1391 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1392 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1393 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1394 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1395 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1396 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1397 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1398 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1399 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1400 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1401 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1402 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1403 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1404 TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1159), TILEGX_OPC_NONE,
1405 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1406 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1407 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1408 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1409 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1410 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1411 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1412 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1413 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1414 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1415 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1416 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1417 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1418 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1419 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1420 TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1176), CHILD(1176), CHILD(1176),
1421 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1422 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1423 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1424 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1425 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1426 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1427 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1428 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1429 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1430 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1431 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1432 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1433 CHILD(1176),
1434 BITFIELD(37, 2) /* index 513 */,
1435 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
1436 BITFIELD(39, 2) /* index 518 */,
1437 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
1438 BITFIELD(41, 2) /* index 523 */,
1439 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
1440 BITFIELD(51, 2) /* index 528 */,
1441 TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
1442 BITFIELD(37, 2) /* index 533 */,
1443 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
1444 BITFIELD(39, 2) /* index 538 */,
1445 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
1446 BITFIELD(41, 2) /* index 543 */,
1447 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
1448 BITFIELD(31, 2) /* index 548 */,
1449 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
1450 BITFIELD(33, 2) /* index 553 */,
1451 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
1452 BITFIELD(35, 2) /* index 558 */,
1453 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
1454 BITFIELD(37, 2) /* index 563 */,
1455 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
1456 BITFIELD(39, 2) /* index 568 */,
1457 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
1458 BITFIELD(41, 2) /* index 573 */,
1459 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
1460 BITFIELD(51, 2) /* index 578 */,
1461 TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, CHILD(583),
1462 BITFIELD(31, 2) /* index 583 */,
1463 TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(588),
1464 BITFIELD(33, 2) /* index 588 */,
1465 TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(593),
1466 BITFIELD(35, 2) /* index 593 */,
1467 TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD,
1468 TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
1469 BITFIELD(51, 2) /* index 598 */,
1470 CHILD(603), CHILD(618), CHILD(633), CHILD(648),
1471 BITFIELD(31, 2) /* index 603 */,
1472 TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(608),
1473 BITFIELD(33, 2) /* index 608 */,
1474 TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(613),
1475 BITFIELD(35, 2) /* index 613 */,
1476 TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD,
1477 TILEGX_OPC_PREFETCH_ADD_L1,
1478 BITFIELD(31, 2) /* index 618 */,
1479 TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(623),
1480 BITFIELD(33, 2) /* index 623 */,
1481 TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(628),
1482 BITFIELD(35, 2) /* index 628 */,
1483 TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD,
1484 TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
1485 BITFIELD(31, 2) /* index 633 */,
1486 TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(638),
1487 BITFIELD(33, 2) /* index 638 */,
1488 TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(643),
1489 BITFIELD(35, 2) /* index 643 */,
1490 TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD,
1491 TILEGX_OPC_PREFETCH_ADD_L2,
1492 BITFIELD(31, 2) /* index 648 */,
1493 TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(653),
1494 BITFIELD(33, 2) /* index 653 */,
1495 TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(658),
1496 BITFIELD(35, 2) /* index 658 */,
1497 TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD,
1498 TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
1499 BITFIELD(51, 2) /* index 663 */,
1500 CHILD(668), TILEGX_OPC_LDNT1S_ADD, TILEGX_OPC_LDNT1U_ADD,
1501 TILEGX_OPC_LDNT2S_ADD,
1502 BITFIELD(31, 2) /* index 668 */,
1503 TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(673),
1504 BITFIELD(33, 2) /* index 673 */,
1505 TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(678),
1506 BITFIELD(35, 2) /* index 678 */,
1507 TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD,
1508 TILEGX_OPC_PREFETCH_ADD_L3,
1509 BITFIELD(51, 2) /* index 683 */,
1510 TILEGX_OPC_LDNT2U_ADD, TILEGX_OPC_LDNT4S_ADD, TILEGX_OPC_LDNT4U_ADD,
1511 TILEGX_OPC_LDNT_ADD,
1512 BITFIELD(51, 2) /* index 688 */,
1513 TILEGX_OPC_LD_ADD, TILEGX_OPC_LDNA_ADD, TILEGX_OPC_MFSPR, TILEGX_OPC_MTSPR,
1514 BITFIELD(51, 2) /* index 693 */,
1515 TILEGX_OPC_ORI, TILEGX_OPC_ST1_ADD, TILEGX_OPC_ST2_ADD, TILEGX_OPC_ST4_ADD,
1516 BITFIELD(51, 2) /* index 698 */,
1517 TILEGX_OPC_STNT1_ADD, TILEGX_OPC_STNT2_ADD, TILEGX_OPC_STNT4_ADD,
1518 TILEGX_OPC_STNT_ADD,
1519 BITFIELD(51, 2) /* index 703 */,
1520 TILEGX_OPC_ST_ADD, TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI,
1521 TILEGX_OPC_V1CMPLTSI,
1522 BITFIELD(51, 2) /* index 708 */,
1523 TILEGX_OPC_V1CMPLTUI, TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI,
1524 TILEGX_OPC_V2ADDI,
1525 BITFIELD(51, 2) /* index 713 */,
1526 TILEGX_OPC_V2CMPEQI, TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI,
1527 TILEGX_OPC_V2MAXSI,
1528 BITFIELD(51, 2) /* index 718 */,
1529 TILEGX_OPC_V2MINSI, TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1530 BITFIELD(49, 4) /* index 723 */,
1531 TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
1532 TILEGX_OPC_AND, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPEXCH4, TILEGX_OPC_CMPEXCH,
1533 TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
1534 TILEGX_OPC_CMPNE, TILEGX_OPC_DBLALIGN2, TILEGX_OPC_DBLALIGN4,
1535 TILEGX_OPC_DBLALIGN6,
1536 BITFIELD(49, 4) /* index 740 */,
1537 TILEGX_OPC_EXCH4, TILEGX_OPC_EXCH, TILEGX_OPC_FETCHADD4,
1538 TILEGX_OPC_FETCHADDGEZ4, TILEGX_OPC_FETCHADDGEZ, TILEGX_OPC_FETCHADD,
1539 TILEGX_OPC_FETCHAND4, TILEGX_OPC_FETCHAND, TILEGX_OPC_FETCHOR4,
1540 TILEGX_OPC_FETCHOR, TILEGX_OPC_MNZ, TILEGX_OPC_MZ, TILEGX_OPC_NOR,
1541 CHILD(757), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
1542 BITFIELD(43, 2) /* index 757 */,
1543 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(762),
1544 BITFIELD(45, 2) /* index 762 */,
1545 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(767),
1546 BITFIELD(47, 2) /* index 767 */,
1547 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
1548 BITFIELD(49, 4) /* index 772 */,
1549 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
1550 TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
1551 TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_ST1,
1552 TILEGX_OPC_ST2, TILEGX_OPC_ST4, TILEGX_OPC_STNT1, TILEGX_OPC_STNT2,
1553 TILEGX_OPC_STNT4,
1554 BITFIELD(46, 7) /* index 789 */,
1555 TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
1556 TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
1557 TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST,
1558 TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_SUBXSC,
1559 TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC,
1560 TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBX,
1561 TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX,
1562 TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
1563 TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB,
1564 TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, CHILD(918), CHILD(927),
1565 CHILD(1006), CHILD(1090), CHILD(1099), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1566 TILEGX_OPC_NONE, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
1567 TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
1568 TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
1569 TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
1570 TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
1571 TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
1572 TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
1573 TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
1574 TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
1575 TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
1576 TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
1577 TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
1578 TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
1579 TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
1580 TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
1581 TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
1582 TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
1583 TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
1584 TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
1585 TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
1586 TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
1587 TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
1588 TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
1589 TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
1590 TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
1591 TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
1592 BITFIELD(43, 3) /* index 918 */,
1593 TILEGX_OPC_NONE, TILEGX_OPC_DRAIN, TILEGX_OPC_DTLBPR, TILEGX_OPC_FINV,
1594 TILEGX_OPC_FLUSHWB, TILEGX_OPC_FLUSH, TILEGX_OPC_FNOP, TILEGX_OPC_ICOH,
1595 BITFIELD(43, 3) /* index 927 */,
1596 CHILD(936), TILEGX_OPC_INV, TILEGX_OPC_IRET, TILEGX_OPC_JALRP,
1597 TILEGX_OPC_JALR, TILEGX_OPC_JRP, TILEGX_OPC_JR, CHILD(991),
1598 BITFIELD(31, 2) /* index 936 */,
1599 CHILD(941), CHILD(966), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1600 BITFIELD(33, 2) /* index 941 */,
1601 TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(946),
1602 BITFIELD(35, 2) /* index 946 */,
1603 TILEGX_OPC_ILL, CHILD(951), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1604 BITFIELD(37, 2) /* index 951 */,
1605 TILEGX_OPC_ILL, CHILD(956), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1606 BITFIELD(39, 2) /* index 956 */,
1607 TILEGX_OPC_ILL, CHILD(961), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1608 BITFIELD(41, 2) /* index 961 */,
1609 TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_BPT, TILEGX_OPC_ILL,
1610 BITFIELD(33, 2) /* index 966 */,
1611 TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(971),
1612 BITFIELD(35, 2) /* index 971 */,
1613 TILEGX_OPC_ILL, CHILD(976), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1614 BITFIELD(37, 2) /* index 976 */,
1615 TILEGX_OPC_ILL, CHILD(981), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1616 BITFIELD(39, 2) /* index 981 */,
1617 TILEGX_OPC_ILL, CHILD(986), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1618 BITFIELD(41, 2) /* index 986 */,
1619 TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_RAISE, TILEGX_OPC_ILL,
1620 BITFIELD(31, 2) /* index 991 */,
1621 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(996),
1622 BITFIELD(33, 2) /* index 996 */,
1623 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(1001),
1624 BITFIELD(35, 2) /* index 1001 */,
1625 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
1626 TILEGX_OPC_PREFETCH_L1_FAULT,
1627 BITFIELD(43, 3) /* index 1006 */,
1628 CHILD(1015), CHILD(1030), CHILD(1045), CHILD(1060), CHILD(1075),
1629 TILEGX_OPC_LDNA, TILEGX_OPC_LDNT1S, TILEGX_OPC_LDNT1U,
1630 BITFIELD(31, 2) /* index 1015 */,
1631 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1020),
1632 BITFIELD(33, 2) /* index 1020 */,
1633 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1025),
1634 BITFIELD(35, 2) /* index 1025 */,
1635 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
1636 BITFIELD(31, 2) /* index 1030 */,
1637 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1035),
1638 BITFIELD(33, 2) /* index 1035 */,
1639 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1040),
1640 BITFIELD(35, 2) /* index 1040 */,
1641 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
1642 TILEGX_OPC_PREFETCH_L2_FAULT,
1643 BITFIELD(31, 2) /* index 1045 */,
1644 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1050),
1645 BITFIELD(33, 2) /* index 1050 */,
1646 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1055),
1647 BITFIELD(35, 2) /* index 1055 */,
1648 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
1649 BITFIELD(31, 2) /* index 1060 */,
1650 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1065),
1651 BITFIELD(33, 2) /* index 1065 */,
1652 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1070),
1653 BITFIELD(35, 2) /* index 1070 */,
1654 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S,
1655 TILEGX_OPC_PREFETCH_L3_FAULT,
1656 BITFIELD(31, 2) /* index 1075 */,
1657 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1080),
1658 BITFIELD(33, 2) /* index 1080 */,
1659 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1085),
1660 BITFIELD(35, 2) /* index 1085 */,
1661 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
1662 BITFIELD(43, 3) /* index 1090 */,
1663 TILEGX_OPC_LDNT2S, TILEGX_OPC_LDNT2U, TILEGX_OPC_LDNT4S, TILEGX_OPC_LDNT4U,
1664 TILEGX_OPC_LDNT, TILEGX_OPC_LD, TILEGX_OPC_LNK, TILEGX_OPC_MF,
1665 BITFIELD(43, 3) /* index 1099 */,
1666 TILEGX_OPC_NAP, TILEGX_OPC_NOP, TILEGX_OPC_SWINT0, TILEGX_OPC_SWINT1,
1667 TILEGX_OPC_SWINT2, TILEGX_OPC_SWINT3, TILEGX_OPC_WH64, TILEGX_OPC_NONE,
1668 BITFIELD(49, 4) /* index 1108 */,
1669 TILEGX_OPC_V1MAXU, TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MZ,
1670 TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS, TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC,
1671 TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC, TILEGX_OPC_V2ADD, TILEGX_OPC_V2CMPEQ,
1672 TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU, TILEGX_OPC_V2CMPLTS,
1673 TILEGX_OPC_V2CMPLTU,
1674 BITFIELD(49, 4) /* index 1125 */,
1675 TILEGX_OPC_V2CMPNE, TILEGX_OPC_V2INT_H, TILEGX_OPC_V2INT_L,
1676 TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ, TILEGX_OPC_V2MZ,
1677 TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
1678 TILEGX_OPC_V2SHLSC, TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU,
1679 TILEGX_OPC_V2SUBSC, TILEGX_OPC_V2SUB,
1680 BITFIELD(49, 4) /* index 1142 */,
1681 TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
1682 TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
1683 TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
1684 TILEGX_OPC_V4SUB, TILEGX_OPC_XOR, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1685 TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1686 BITFIELD(49, 4) /* index 1159 */,
1687 TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
1688 TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
1689 TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
1690 TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1691 TILEGX_OPC_NONE,
1692 BITFIELD(31, 2) /* index 1176 */,
1693 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1694 CHILD(1181),
1695 BITFIELD(33, 2) /* index 1181 */,
1696 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1697 CHILD(1186),
1698 BITFIELD(35, 2) /* index 1186 */,
1699 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1700 CHILD(1191),
1701 BITFIELD(37, 2) /* index 1191 */,
1702 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1703 CHILD(1196),
1704 BITFIELD(39, 2) /* index 1196 */,
1705 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1706 CHILD(1201),
1707 BITFIELD(41, 2) /* index 1201 */,
1708 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1709 TILEGX_OPC_INFOL,
1710};
1711
1712static const unsigned short decode_Y0_fsm[178] =
1713{
1714 BITFIELD(27, 4) /* index 0 */,
1715 CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
1716 TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(118), CHILD(123),
1717 CHILD(128), CHILD(133), CHILD(153), CHILD(158), CHILD(163), CHILD(168),
1718 CHILD(173),
1719 BITFIELD(6, 2) /* index 17 */,
1720 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
1721 BITFIELD(8, 2) /* index 22 */,
1722 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
1723 BITFIELD(10, 2) /* index 27 */,
1724 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
1725 BITFIELD(0, 2) /* index 32 */,
1726 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
1727 BITFIELD(2, 2) /* index 37 */,
1728 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
1729 BITFIELD(4, 2) /* index 42 */,
1730 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
1731 BITFIELD(6, 2) /* index 47 */,
1732 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
1733 BITFIELD(8, 2) /* index 52 */,
1734 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
1735 BITFIELD(10, 2) /* index 57 */,
1736 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
1737 BITFIELD(18, 2) /* index 62 */,
1738 TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
1739 BITFIELD(15, 5) /* index 67 */,
1740 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
1741 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
1742 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD,
1743 TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
1744 TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
1745 TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
1746 TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
1747 TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(100),
1748 CHILD(109), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1749 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1750 BITFIELD(12, 3) /* index 100 */,
1751 TILEGX_OPC_NONE, TILEGX_OPC_CLZ, TILEGX_OPC_CTZ, TILEGX_OPC_FNOP,
1752 TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NOP, TILEGX_OPC_PCNT,
1753 TILEGX_OPC_REVBITS,
1754 BITFIELD(12, 3) /* index 109 */,
1755 TILEGX_OPC_REVBYTES, TILEGX_OPC_TBLIDXB0, TILEGX_OPC_TBLIDXB1,
1756 TILEGX_OPC_TBLIDXB2, TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1757 TILEGX_OPC_NONE,
1758 BITFIELD(18, 2) /* index 118 */,
1759 TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
1760 BITFIELD(18, 2) /* index 123 */,
1761 TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE, TILEGX_OPC_MULAX, TILEGX_OPC_MULX,
1762 BITFIELD(18, 2) /* index 128 */,
1763 TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
1764 BITFIELD(18, 2) /* index 133 */,
1765 TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(138), TILEGX_OPC_XOR,
1766 BITFIELD(12, 2) /* index 138 */,
1767 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(143),
1768 BITFIELD(14, 2) /* index 143 */,
1769 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(148),
1770 BITFIELD(16, 2) /* index 148 */,
1771 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
1772 BITFIELD(18, 2) /* index 153 */,
1773 TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
1774 BITFIELD(18, 2) /* index 158 */,
1775 TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
1776 TILEGX_OPC_SHL3ADDX,
1777 BITFIELD(18, 2) /* index 163 */,
1778 TILEGX_OPC_MUL_HS_HS, TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_LS_LS,
1779 TILEGX_OPC_MUL_LU_LU,
1780 BITFIELD(18, 2) /* index 168 */,
1781 TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_LS_LS,
1782 TILEGX_OPC_MULA_LU_LU,
1783 BITFIELD(18, 2) /* index 173 */,
1784 TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
1785};
1786
1787static const unsigned short decode_Y1_fsm[167] =
1788{
1789 BITFIELD(58, 4) /* index 0 */,
1790 TILEGX_OPC_NONE, CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
1791 TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(117), CHILD(122),
1792 CHILD(127), CHILD(132), CHILD(152), CHILD(157), CHILD(162), TILEGX_OPC_NONE,
1793 BITFIELD(37, 2) /* index 17 */,
1794 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
1795 BITFIELD(39, 2) /* index 22 */,
1796 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
1797 BITFIELD(41, 2) /* index 27 */,
1798 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
1799 BITFIELD(31, 2) /* index 32 */,
1800 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
1801 BITFIELD(33, 2) /* index 37 */,
1802 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
1803 BITFIELD(35, 2) /* index 42 */,
1804 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
1805 BITFIELD(37, 2) /* index 47 */,
1806 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
1807 BITFIELD(39, 2) /* index 52 */,
1808 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
1809 BITFIELD(41, 2) /* index 57 */,
1810 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
1811 BITFIELD(49, 2) /* index 62 */,
1812 TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
1813 BITFIELD(47, 4) /* index 67 */,
1814 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
1815 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
1816 TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD,
1817 TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(84),
1818 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1819 BITFIELD(43, 3) /* index 84 */,
1820 CHILD(93), CHILD(96), CHILD(99), CHILD(102), CHILD(105), CHILD(108),
1821 CHILD(111), CHILD(114),
1822 BITFIELD(46, 1) /* index 93 */,
1823 TILEGX_OPC_NONE, TILEGX_OPC_FNOP,
1824 BITFIELD(46, 1) /* index 96 */,
1825 TILEGX_OPC_NONE, TILEGX_OPC_ILL,
1826 BITFIELD(46, 1) /* index 99 */,
1827 TILEGX_OPC_NONE, TILEGX_OPC_JALRP,
1828 BITFIELD(46, 1) /* index 102 */,
1829 TILEGX_OPC_NONE, TILEGX_OPC_JALR,
1830 BITFIELD(46, 1) /* index 105 */,
1831 TILEGX_OPC_NONE, TILEGX_OPC_JRP,
1832 BITFIELD(46, 1) /* index 108 */,
1833 TILEGX_OPC_NONE, TILEGX_OPC_JR,
1834 BITFIELD(46, 1) /* index 111 */,
1835 TILEGX_OPC_NONE, TILEGX_OPC_LNK,
1836 BITFIELD(46, 1) /* index 114 */,
1837 TILEGX_OPC_NONE, TILEGX_OPC_NOP,
1838 BITFIELD(49, 2) /* index 117 */,
1839 TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
1840 BITFIELD(49, 2) /* index 122 */,
1841 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE,
1842 BITFIELD(49, 2) /* index 127 */,
1843 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
1844 BITFIELD(49, 2) /* index 132 */,
1845 TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(137), TILEGX_OPC_XOR,
1846 BITFIELD(43, 2) /* index 137 */,
1847 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(142),
1848 BITFIELD(45, 2) /* index 142 */,
1849 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(147),
1850 BITFIELD(47, 2) /* index 147 */,
1851 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
1852 BITFIELD(49, 2) /* index 152 */,
1853 TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
1854 BITFIELD(49, 2) /* index 157 */,
1855 TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
1856 TILEGX_OPC_SHL3ADDX,
1857 BITFIELD(49, 2) /* index 162 */,
1858 TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
1859};
1860
1861static const unsigned short decode_Y2_fsm[118] =
1862{
1863 BITFIELD(62, 2) /* index 0 */,
1864 TILEGX_OPC_NONE, CHILD(5), CHILD(66), CHILD(109),
1865 BITFIELD(55, 3) /* index 5 */,
1866 CHILD(14), CHILD(14), CHILD(14), CHILD(17), CHILD(40), CHILD(40), CHILD(40),
1867 CHILD(43),
1868 BITFIELD(26, 1) /* index 14 */,
1869 TILEGX_OPC_LD1S, TILEGX_OPC_LD1U,
1870 BITFIELD(26, 1) /* index 17 */,
1871 CHILD(20), CHILD(30),
1872 BITFIELD(51, 2) /* index 20 */,
1873 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(25),
1874 BITFIELD(53, 2) /* index 25 */,
1875 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
1876 TILEGX_OPC_PREFETCH_L1_FAULT,
1877 BITFIELD(51, 2) /* index 30 */,
1878 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(35),
1879 BITFIELD(53, 2) /* index 35 */,
1880 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
1881 BITFIELD(26, 1) /* index 40 */,
1882 TILEGX_OPC_LD2S, TILEGX_OPC_LD2U,
1883 BITFIELD(26, 1) /* index 43 */,
1884 CHILD(46), CHILD(56),
1885 BITFIELD(51, 2) /* index 46 */,
1886 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(51),
1887 BITFIELD(53, 2) /* index 51 */,
1888 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
1889 TILEGX_OPC_PREFETCH_L2_FAULT,
1890 BITFIELD(51, 2) /* index 56 */,
1891 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(61),
1892 BITFIELD(53, 2) /* index 61 */,
1893 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
1894 BITFIELD(56, 2) /* index 66 */,
1895 CHILD(71), CHILD(74), CHILD(90), CHILD(93),
1896 BITFIELD(26, 1) /* index 71 */,
1897 TILEGX_OPC_NONE, TILEGX_OPC_LD4S,
1898 BITFIELD(26, 1) /* index 74 */,
1899 TILEGX_OPC_NONE, CHILD(77),
1900 BITFIELD(51, 2) /* index 77 */,
1901 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(82),
1902 BITFIELD(53, 2) /* index 82 */,
1903 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(87),
1904 BITFIELD(55, 1) /* index 87 */,
1905 TILEGX_OPC_LD4S, TILEGX_OPC_PREFETCH_L3_FAULT,
1906 BITFIELD(26, 1) /* index 90 */,
1907 TILEGX_OPC_LD4U, TILEGX_OPC_LD,
1908 BITFIELD(26, 1) /* index 93 */,
1909 CHILD(96), TILEGX_OPC_LD,
1910 BITFIELD(51, 2) /* index 96 */,
1911 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(101),
1912 BITFIELD(53, 2) /* index 101 */,
1913 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(106),
1914 BITFIELD(55, 1) /* index 106 */,
1915 TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
1916 BITFIELD(26, 1) /* index 109 */,
1917 CHILD(112), CHILD(115),
1918 BITFIELD(57, 1) /* index 112 */,
1919 TILEGX_OPC_ST1, TILEGX_OPC_ST4,
1920 BITFIELD(57, 1) /* index 115 */,
1921 TILEGX_OPC_ST2, TILEGX_OPC_ST,
1922};
1923
1924#undef BITFIELD
1925#undef CHILD
1926const unsigned short * const
1927tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS] =
1928{
1929 decode_X0_fsm,
1930 decode_X1_fsm,
1931 decode_Y0_fsm,
1932 decode_Y1_fsm,
1933 decode_Y2_fsm
1934};
1935const struct tilegx_operand tilegx_operands[35] =
1936{
1937 {
1938 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X0),
1939 8, 1, 0, 0, 0, 0,
1940 create_Imm8_X0, get_Imm8_X0
1941 },
1942 {
1943 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X1),
1944 8, 1, 0, 0, 0, 0,
1945 create_Imm8_X1, get_Imm8_X1
1946 },
1947 {
1948 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y0),
1949 8, 1, 0, 0, 0, 0,
1950 create_Imm8_Y0, get_Imm8_Y0
1951 },
1952 {
1953 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y1),
1954 8, 1, 0, 0, 0, 0,
1955 create_Imm8_Y1, get_Imm8_Y1
1956 },
1957 {
1958 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X0_HW0_LAST),
1959 16, 1, 0, 0, 0, 0,
1960 create_Imm16_X0, get_Imm16_X0
1961 },
1962 {
1963 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X1_HW0_LAST),
1964 16, 1, 0, 0, 0, 0,
1965 create_Imm16_X1, get_Imm16_X1
1966 },
1967 {
1968 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1969 6, 0, 0, 1, 0, 0,
1970 create_Dest_X0, get_Dest_X0
1971 },
1972 {
1973 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1974 6, 0, 1, 0, 0, 0,
1975 create_SrcA_X0, get_SrcA_X0
1976 },
1977 {
1978 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1979 6, 0, 0, 1, 0, 0,
1980 create_Dest_X1, get_Dest_X1
1981 },
1982 {
1983 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1984 6, 0, 1, 0, 0, 0,
1985 create_SrcA_X1, get_SrcA_X1
1986 },
1987 {
1988 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1989 6, 0, 0, 1, 0, 0,
1990 create_Dest_Y0, get_Dest_Y0
1991 },
1992 {
1993 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1994 6, 0, 1, 0, 0, 0,
1995 create_SrcA_Y0, get_SrcA_Y0
1996 },
1997 {
1998 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1999 6, 0, 0, 1, 0, 0,
2000 create_Dest_Y1, get_Dest_Y1
2001 },
2002 {
2003 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2004 6, 0, 1, 0, 0, 0,
2005 create_SrcA_Y1, get_SrcA_Y1
2006 },
2007 {
2008 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2009 6, 0, 1, 0, 0, 0,
2010 create_SrcA_Y2, get_SrcA_Y2
2011 },
2012 {
2013 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2014 6, 0, 1, 1, 0, 0,
2015 create_SrcA_X1, get_SrcA_X1
2016 },
2017 {
2018 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2019 6, 0, 1, 0, 0, 0,
2020 create_SrcB_X0, get_SrcB_X0
2021 },
2022 {
2023 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2024 6, 0, 1, 0, 0, 0,
2025 create_SrcB_X1, get_SrcB_X1
2026 },
2027 {
2028 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2029 6, 0, 1, 0, 0, 0,
2030 create_SrcB_Y0, get_SrcB_Y0
2031 },
2032 {
2033 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2034 6, 0, 1, 0, 0, 0,
2035 create_SrcB_Y1, get_SrcB_Y1
2036 },
2037 {
2038 TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_BROFF_X1),
2039 17, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
2040 create_BrOff_X1, get_BrOff_X1
2041 },
2042 {
2043 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE),
2044 6, 0, 0, 0, 0, 0,
2045 create_BFStart_X0, get_BFStart_X0
2046 },
2047 {
2048 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE),
2049 6, 0, 0, 0, 0, 0,
2050 create_BFEnd_X0, get_BFEnd_X0
2051 },
2052 {
2053 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2054 6, 0, 1, 1, 0, 0,
2055 create_Dest_X0, get_Dest_X0
2056 },
2057 {
2058 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2059 6, 0, 1, 1, 0, 0,
2060 create_Dest_Y0, get_Dest_Y0
2061 },
2062 {
2063 TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_JUMPOFF_X1),
2064 27, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
2065 create_JumpOff_X1, get_JumpOff_X1
2066 },
2067 {
2068 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2069 6, 0, 0, 1, 0, 0,
2070 create_SrcBDest_Y2, get_SrcBDest_Y2
2071 },
2072 {
2073 TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MF_IMM14_X1),
2074 14, 0, 0, 0, 0, 0,
2075 create_MF_Imm14_X1, get_MF_Imm14_X1
2076 },
2077 {
2078 TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MT_IMM14_X1),
2079 14, 0, 0, 0, 0, 0,
2080 create_MT_Imm14_X1, get_MT_Imm14_X1
2081 },
2082 {
2083 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X0),
2084 6, 0, 0, 0, 0, 0,
2085 create_ShAmt_X0, get_ShAmt_X0
2086 },
2087 {
2088 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X1),
2089 6, 0, 0, 0, 0, 0,
2090 create_ShAmt_X1, get_ShAmt_X1
2091 },
2092 {
2093 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y0),
2094 6, 0, 0, 0, 0, 0,
2095 create_ShAmt_Y0, get_ShAmt_Y0
2096 },
2097 {
2098 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y1),
2099 6, 0, 0, 0, 0, 0,
2100 create_ShAmt_Y1, get_ShAmt_Y1
2101 },
2102 {
2103 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2104 6, 0, 1, 0, 0, 0,
2105 create_SrcBDest_Y2, get_SrcBDest_Y2
2106 },
2107 {
2108 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_DEST_IMM8_X1),
2109 8, 1, 0, 0, 0, 0,
2110 create_Dest_Imm8_X1, get_Dest_Imm8_X1
2111 }
2112};
2113
2114
2115
2116
2117/* Given a set of bundle bits and the lookup FSM for a specific pipe,
2118 * returns which instruction the bundle contains in that pipe.
2119 */
2120static const struct tilegx_opcode *
2121find_opcode(tilegx_bundle_bits bits, const unsigned short *table)
2122{
2123 int index = 0;
2124
2125 while (1)
2126 {
2127 unsigned short bitspec = table[index];
2128 unsigned int bitfield =
2129 ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6);
2130
2131 unsigned short next = table[index + 1 + bitfield];
2132 if (next <= TILEGX_OPC_NONE)
2133 return &tilegx_opcodes[next];
2134
2135 index = next - TILEGX_OPC_NONE;
2136 }
2137}
2138
2139
2140int
2141parse_insn_tilegx(tilegx_bundle_bits bits,
2142 unsigned long long pc,
2143 struct tilegx_decoded_instruction
2144 decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE])
2145{
2146 int num_instructions = 0;
2147 int pipe;
2148
2149 int min_pipe, max_pipe;
2150 if ((bits & TILEGX_BUNDLE_MODE_MASK) == 0)
2151 {
2152 min_pipe = TILEGX_PIPELINE_X0;
2153 max_pipe = TILEGX_PIPELINE_X1;
2154 }
2155 else
2156 {
2157 min_pipe = TILEGX_PIPELINE_Y0;
2158 max_pipe = TILEGX_PIPELINE_Y2;
2159 }
2160
2161 /* For each pipe, find an instruction that fits. */
2162 for (pipe = min_pipe; pipe <= max_pipe; pipe++)
2163 {
2164 const struct tilegx_opcode *opc;
2165 struct tilegx_decoded_instruction *d;
2166 int i;
2167
2168 d = &decoded[num_instructions++];
2169 opc = find_opcode (bits, tilegx_bundle_decoder_fsms[pipe]);
2170 d->opcode = opc;
2171
2172 /* Decode each operand, sign extending, etc. as appropriate. */
2173 for (i = 0; i < opc->num_operands; i++)
2174 {
2175 const struct tilegx_operand *op =
2176 &tilegx_operands[opc->operands[pipe][i]];
2177 int raw_opval = op->extract (bits);
2178 long long opval;
2179
2180 if (op->is_signed)
2181 {
2182 /* Sign-extend the operand. */
2183 int shift = (int)((sizeof(int) * 8) - op->num_bits);
2184 raw_opval = (raw_opval << shift) >> shift;
2185 }
2186
2187 /* Adjust PC-relative scaled branch offsets. */
2188 if (op->type == TILEGX_OP_TYPE_ADDRESS)
2189 opval = (raw_opval * TILEGX_BUNDLE_SIZE_IN_BYTES) + pc;
2190 else
2191 opval = raw_opval;
2192
2193 /* Record the final value. */
2194 d->operands[i] = op;
2195 d->operand_values[i] = opval;
2196 }
2197 }
2198
2199 return num_instructions;
2200}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 49a605be94c5..c4be58cc5d50 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -22,6 +22,7 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/module.h>
25#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
26#include <asm/traps.h> 27#include <asm/traps.h>
27#include <hv/hypervisor.h> 28#include <hv/hypervisor.h>
@@ -56,6 +57,7 @@ cycles_t get_cycles(void)
56 57
57 return (((cycles_t)high) << 32) | low; 58 return (((cycles_t)high) << 32) | low;
58} 59}
60EXPORT_SYMBOL(get_cycles);
59#endif 61#endif
60 62
61/* 63/*
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index 2dffc1044d83..a5f241c24cac 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -34,13 +34,13 @@ void flush_tlb_mm(struct mm_struct *mm)
34{ 34{
35 HV_Remote_ASID asids[NR_CPUS]; 35 HV_Remote_ASID asids[NR_CPUS];
36 int i = 0, cpu; 36 int i = 0, cpu;
37 for_each_cpu(cpu, &mm->cpu_vm_mask) { 37 for_each_cpu(cpu, mm_cpumask(mm)) {
38 HV_Remote_ASID *asid = &asids[i++]; 38 HV_Remote_ASID *asid = &asids[i++];
39 asid->y = cpu / smp_topology.width; 39 asid->y = cpu / smp_topology.width;
40 asid->x = cpu % smp_topology.width; 40 asid->x = cpu % smp_topology.width;
41 asid->asid = per_cpu(current_asid, cpu); 41 asid->asid = per_cpu(current_asid, cpu);
42 } 42 }
43 flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask, 43 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
44 0, 0, 0, NULL, asids, i); 44 0, 0, 0, NULL, asids, i);
45} 45}
46 46
@@ -54,8 +54,8 @@ void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
54{ 54{
55 unsigned long size = hv_page_size(vma); 55 unsigned long size = hv_page_size(vma);
56 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 56 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
57 flush_remote(0, cache, &mm->cpu_vm_mask, 57 flush_remote(0, cache, mm_cpumask(mm),
58 va, size, size, &mm->cpu_vm_mask, NULL, 0); 58 va, size, size, mm_cpumask(mm), NULL, 0);
59} 59}
60 60
61void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) 61void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
@@ -70,8 +70,8 @@ void flush_tlb_range(const struct vm_area_struct *vma,
70 unsigned long size = hv_page_size(vma); 70 unsigned long size = hv_page_size(vma);
71 struct mm_struct *mm = vma->vm_mm; 71 struct mm_struct *mm = vma->vm_mm;
72 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 72 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
73 flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size, 73 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
74 &mm->cpu_vm_mask, NULL, 0); 74 mm_cpumask(mm), NULL, 0);
75} 75}
76 76
77void flush_tlb_all(void) 77void flush_tlb_all(void)
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 5474fc2e77e8..f9803dfa7357 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -308,6 +308,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
308 info.si_addr = (void __user *)address; 308 info.si_addr = (void __user *)address;
309 if (signo == SIGILL) 309 if (signo == SIGILL)
310 info.si_trapno = fault_num; 310 info.si_trapno = fault_num;
311 trace_unhandled_signal("trap", regs, address, signo);
311 force_sig_info(signo, &info, current); 312 force_sig_info(signo, &info, current);
312} 313}
313 314
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 82f64cc63658..24448734f6f1 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -59,7 +59,7 @@
59 * bad kernel addresses). 59 * bad kernel addresses).
60 * 60 *
61 * Note that if the value we would store is the same as what we 61 * Note that if the value we would store is the same as what we
62 * loaded, we bypass the load. Other platforms with true atomics can 62 * loaded, we bypass the store. Other platforms with true atomics can
63 * make the guarantee that a non-atomic __clear_bit(), for example, 63 * make the guarantee that a non-atomic __clear_bit(), for example,
64 * can safely race with an atomic test_and_set_bit(); this example is 64 * can safely race with an atomic test_and_set_bit(); this example is
65 * from bit_spinlock.h in slub_lock() / slub_unlock(). We can't do 65 * from bit_spinlock.h in slub_lock() / slub_unlock(). We can't do
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index 35c1d8ca5f38..8928aace7a64 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -15,6 +15,7 @@
15#include <asm/page.h> 15#include <asm/page.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <arch/icache.h> 17#include <arch/icache.h>
18#include <arch/spr_def.h>
18 19
19 20
20void __flush_icache_range(unsigned long start, unsigned long end) 21void __flush_icache_range(unsigned long start, unsigned long end)
@@ -39,6 +40,18 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
39 char *p, *base; 40 char *p, *base;
40 size_t step_size, load_count; 41 size_t step_size, load_count;
41 const unsigned long STRIPE_WIDTH = 8192; 42 const unsigned long STRIPE_WIDTH = 8192;
43#ifdef __tilegx__
44 /*
45 * On TILE-Gx, we must disable the dstream prefetcher before doing
46 * a cache flush; otherwise, we could end up with data in the cache
47 * that we don't want there. Note that normally we'd do an mf
48 * after the SPR write to disabling the prefetcher, but we do one
49 * below, before any further loads, so there's no need to do it
50 * here.
51 */
52 uint_reg_t old_dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
53 __insn_mtspr(SPR_DSTREAM_PF, 0);
54#endif
42 55
43 /* 56 /*
44 * Flush and invalidate the buffer out of the local L1/L2 57 * Flush and invalidate the buffer out of the local L1/L2
@@ -122,4 +135,9 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
122 135
123 /* Wait for the load+inv's (and thus finvs) to have completed. */ 136 /* Wait for the load+inv's (and thus finvs) to have completed. */
124 __insn_mf(); 137 __insn_mf();
138
139#ifdef __tilegx__
140 /* Reenable the prefetcher. */
141 __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
142#endif
125} 143}
diff --git a/arch/tile/lib/memchr_64.c b/arch/tile/lib/memchr_64.c
new file mode 100644
index 000000000000..84fdc8d8e735
--- /dev/null
+++ b/arch/tile/lib/memchr_64.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18
19void *memchr(const void *s, int c, size_t n)
20{
21 const uint64_t *last_word_ptr;
22 const uint64_t *p;
23 const char *last_byte_ptr;
24 uintptr_t s_int;
25 uint64_t goal, before_mask, v, bits;
26 char *ret;
27
28 if (__builtin_expect(n == 0, 0)) {
29 /* Don't dereference any memory if the array is empty. */
30 return NULL;
31 }
32
33 /* Get an aligned pointer. */
34 s_int = (uintptr_t) s;
35 p = (const uint64_t *)(s_int & -8);
36
37 /* Create eight copies of the byte for which we are looking. */
38 goal = 0x0101010101010101ULL * (uint8_t) c;
39
40 /* Read the first word, but munge it so that bytes before the array
41 * will not match goal.
42 *
43 * Note that this shift count expression works because we know
44 * shift counts are taken mod 64.
45 */
46 before_mask = (1ULL << (s_int << 3)) - 1;
47 v = (*p | before_mask) ^ (goal & before_mask);
48
49 /* Compute the address of the last byte. */
50 last_byte_ptr = (const char *)s + n - 1;
51
52 /* Compute the address of the word containing the last byte. */
53 last_word_ptr = (const uint64_t *)((uintptr_t) last_byte_ptr & -8);
54
55 while ((bits = __insn_v1cmpeq(v, goal)) == 0) {
56 if (__builtin_expect(p == last_word_ptr, 0)) {
57 /* We already read the last word in the array,
58 * so give up.
59 */
60 return NULL;
61 }
62 v = *++p;
63 }
64
65 /* We found a match, but it might be in a byte past the end
66 * of the array.
67 */
68 ret = ((char *)p) + (__insn_ctz(bits) >> 3);
69 return (ret <= last_byte_ptr) ? ret : NULL;
70}
71EXPORT_SYMBOL(memchr);
diff --git a/arch/tile/lib/memcpy_64.c b/arch/tile/lib/memcpy_64.c
new file mode 100644
index 000000000000..3fab9a6a2bbe
--- /dev/null
+++ b/arch/tile/lib/memcpy_64.c
@@ -0,0 +1,220 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18#define __memcpy memcpy
19/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
20
21/* Must be 8 bytes in size. */
22#define word_t uint64_t
23
24#if CHIP_L2_LINE_SIZE() != 64 && CHIP_L2_LINE_SIZE() != 128
25#error "Assumes 64 or 128 byte line size"
26#endif
27
28/* How many cache lines ahead should we prefetch? */
29#define PREFETCH_LINES_AHEAD 3
30
31/*
32 * Provide "base versions" of load and store for the normal code path.
33 * The kernel provides other versions for userspace copies.
34 */
35#define ST(p, v) (*(p) = (v))
36#define LD(p) (*(p))
37
38#ifndef USERCOPY_FUNC
39#define ST1 ST
40#define ST2 ST
41#define ST4 ST
42#define ST8 ST
43#define LD1 LD
44#define LD2 LD
45#define LD4 LD
46#define LD8 LD
47#define RETVAL dstv
48void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n)
49#else
50/*
51 * Special kernel version will provide implementation of the LDn/STn
52 * macros to return a count of uncopied bytes due to mm fault.
53 */
54#define RETVAL 0
55int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
56#endif
57{
58 char *__restrict dst1 = (char *)dstv;
59 const char *__restrict src1 = (const char *)srcv;
60 const char *__restrict src1_end;
61 const char *__restrict prefetch;
62 word_t *__restrict dst8; /* 8-byte pointer to destination memory. */
63 word_t final; /* Final bytes to write to trailing word, if any */
64 long i;
65
66 if (n < 16) {
67 for (; n; n--)
68 ST1(dst1++, LD1(src1++));
69 return RETVAL;
70 }
71
72 /*
73 * Locate the end of source memory we will copy. Don't
74 * prefetch past this.
75 */
76 src1_end = src1 + n - 1;
77
78 /* Prefetch ahead a few cache lines, but not past the end. */
79 prefetch = src1;
80 for (i = 0; i < PREFETCH_LINES_AHEAD; i++) {
81 __insn_prefetch(prefetch);
82 prefetch += CHIP_L2_LINE_SIZE();
83 prefetch = (prefetch > src1_end) ? prefetch : src1;
84 }
85
86 /* Copy bytes until dst is word-aligned. */
87 for (; (uintptr_t)dst1 & (sizeof(word_t) - 1); n--)
88 ST1(dst1++, LD1(src1++));
89
90 /* 8-byte pointer to destination memory. */
91 dst8 = (word_t *)dst1;
92
93 if (__builtin_expect((uintptr_t)src1 & (sizeof(word_t) - 1), 0)) {
94 /*
95 * Misaligned copy. Copy 8 bytes at a time, but don't
96 * bother with other fanciness.
97 *
98 * TODO: Consider prefetching and using wh64 as well.
99 */
100
101 /* Create an aligned src8. */
102 const word_t *__restrict src8 =
103 (const word_t *)((uintptr_t)src1 & -sizeof(word_t));
104 word_t b;
105
106 word_t a = LD8(src8++);
107 for (; n >= sizeof(word_t); n -= sizeof(word_t)) {
108 b = LD8(src8++);
109 a = __insn_dblalign(a, b, src1);
110 ST8(dst8++, a);
111 a = b;
112 }
113
114 if (n == 0)
115 return RETVAL;
116
117 b = ((const char *)src8 <= src1_end) ? *src8 : 0;
118
119 /*
120 * Final source bytes to write to trailing partial
121 * word, if any.
122 */
123 final = __insn_dblalign(a, b, src1);
124 } else {
125 /* Aligned copy. */
126
127 const word_t* __restrict src8 = (const word_t *)src1;
128
129 /* src8 and dst8 are both word-aligned. */
130 if (n >= CHIP_L2_LINE_SIZE()) {
131 /* Copy until 'dst' is cache-line-aligned. */
132 for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1);
133 n -= sizeof(word_t))
134 ST8(dst8++, LD8(src8++));
135
136 for (; n >= CHIP_L2_LINE_SIZE(); ) {
137 __insn_wh64(dst8);
138
139 /*
140 * Prefetch and advance to next line
141 * to prefetch, but don't go past the end
142 */
143 __insn_prefetch(prefetch);
144 prefetch += CHIP_L2_LINE_SIZE();
145 prefetch = (prefetch > src1_end) ? prefetch :
146 (const char *)src8;
147
148 /*
149 * Copy an entire cache line. Manually
150 * unrolled to avoid idiosyncracies of
151 * compiler unrolling.
152 */
153#define COPY_WORD(offset) ({ ST8(dst8+offset, LD8(src8+offset)); n -= 8; })
154 COPY_WORD(0);
155 COPY_WORD(1);
156 COPY_WORD(2);
157 COPY_WORD(3);
158 COPY_WORD(4);
159 COPY_WORD(5);
160 COPY_WORD(6);
161 COPY_WORD(7);
162#if CHIP_L2_LINE_SIZE() == 128
163 COPY_WORD(8);
164 COPY_WORD(9);
165 COPY_WORD(10);
166 COPY_WORD(11);
167 COPY_WORD(12);
168 COPY_WORD(13);
169 COPY_WORD(14);
170 COPY_WORD(15);
171#elif CHIP_L2_LINE_SIZE() != 64
172# error Fix code that assumes particular L2 cache line sizes
173#endif
174
175 dst8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
176 src8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
177 }
178 }
179
180 for (; n >= sizeof(word_t); n -= sizeof(word_t))
181 ST8(dst8++, LD8(src8++));
182
183 if (__builtin_expect(n == 0, 1))
184 return RETVAL;
185
186 final = LD8(src8);
187 }
188
189 /* n != 0 if we get here. Write out any trailing bytes. */
190 dst1 = (char *)dst8;
191 if (n & 4) {
192 ST4((uint32_t *)dst1, final);
193 dst1 += 4;
194 final >>= 32;
195 n &= 3;
196 }
197 if (n & 2) {
198 ST2((uint16_t *)dst1, final);
199 dst1 += 2;
200 final >>= 16;
201 n &= 1;
202 }
203 if (n)
204 ST1((uint8_t *)dst1, final);
205
206 return RETVAL;
207}
208
209
210#ifdef USERCOPY_FUNC
211#undef ST1
212#undef ST2
213#undef ST4
214#undef ST8
215#undef LD1
216#undef LD2
217#undef LD4
218#undef LD8
219#undef USERCOPY_FUNC
220#endif
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
new file mode 100644
index 000000000000..4763b3aff1cc
--- /dev/null
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Do memcpy(), but trap and return "n" when a load or store faults.
15 *
16 * Note: this idiom only works when memcpy() compiles to a leaf function.
17 * If "sp" is updated during memcpy, the "jrp lr" will be incorrect.
18 *
19 * Also note that we are capturing "n" from the containing scope here.
20 */
21
22#define _ST(p, inst, v) \
23 ({ \
24 asm("1: " #inst " %0, %1;" \
25 ".pushsection .coldtext.memcpy,\"ax\";" \
26 "2: { move r0, %2; jrp lr };" \
27 ".section __ex_table,\"a\";" \
28 ".quad 1b, 2b;" \
29 ".popsection" \
30 : "=m" (*(p)) : "r" (v), "r" (n)); \
31 })
32
33#define _LD(p, inst) \
34 ({ \
35 unsigned long __v; \
36 asm("1: " #inst " %0, %1;" \
37 ".pushsection .coldtext.memcpy,\"ax\";" \
38 "2: { move r0, %2; jrp lr };" \
39 ".section __ex_table,\"a\";" \
40 ".quad 1b, 2b;" \
41 ".popsection" \
42 : "=r" (__v) : "m" (*(p)), "r" (n)); \
43 __v; \
44 })
45
46#define USERCOPY_FUNC __copy_to_user_inatomic
47#define ST1(p, v) _ST((p), st1, (v))
48#define ST2(p, v) _ST((p), st2, (v))
49#define ST4(p, v) _ST((p), st4, (v))
50#define ST8(p, v) _ST((p), st, (v))
51#define LD1 LD
52#define LD2 LD
53#define LD4 LD
54#define LD8 LD
55#include "memcpy_64.c"
56
57#define USERCOPY_FUNC __copy_from_user_inatomic
58#define ST1 ST
59#define ST2 ST
60#define ST4 ST
61#define ST8 ST
62#define LD1(p) _LD((p), ld1u)
63#define LD2(p) _LD((p), ld2u)
64#define LD4(p) _LD((p), ld4u)
65#define LD8(p) _LD((p), ld)
66#include "memcpy_64.c"
67
68#define USERCOPY_FUNC __copy_in_user_inatomic
69#define ST1(p, v) _ST((p), st1, (v))
70#define ST2(p, v) _ST((p), st2, (v))
71#define ST4(p, v) _ST((p), st4, (v))
72#define ST8(p, v) _ST((p), st, (v))
73#define LD1(p) _LD((p), ld1u)
74#define LD2(p) _LD((p), ld2u)
75#define LD4(p) _LD((p), ld4u)
76#define LD8(p) _LD((p), ld)
77#include "memcpy_64.c"
78
79unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
80 unsigned long n)
81{
82 unsigned long rc = __copy_from_user_inatomic(to, from, n);
83 if (unlikely(rc))
84 memset(to + n - rc, 0, rc);
85 return rc;
86}
diff --git a/arch/tile/lib/memset_64.c b/arch/tile/lib/memset_64.c
new file mode 100644
index 000000000000..3873085711d5
--- /dev/null
+++ b/arch/tile/lib/memset_64.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <arch/chip.h>
16
17#include <linux/types.h>
18#include <linux/string.h>
19#include <linux/module.h>
20
21#undef memset
22
23void *memset(void *s, int c, size_t n)
24{
25 uint64_t *out64;
26 int n64, to_align64;
27 uint64_t v64;
28 uint8_t *out8 = s;
29
30 /* Experimentation shows that a trivial tight loop is a win up until
31 * around a size of 20, where writing a word at a time starts to win.
32 */
33#define BYTE_CUTOFF 20
34
35#if BYTE_CUTOFF < 7
36 /* This must be at least at least this big, or some code later
37 * on doesn't work.
38 */
39#error "BYTE_CUTOFF is too small"
40#endif
41
42 if (n < BYTE_CUTOFF) {
43 /* Strangely, this turns out to be the tightest way to
44 * write this loop.
45 */
46 if (n != 0) {
47 do {
48 /* Strangely, combining these into one line
49 * performs worse.
50 */
51 *out8 = c;
52 out8++;
53 } while (--n != 0);
54 }
55
56 return s;
57 }
58
59 /* Align 'out8'. We know n >= 7 so this won't write past the end. */
60 while (((uintptr_t) out8 & 7) != 0) {
61 *out8++ = c;
62 --n;
63 }
64
65 /* Align 'n'. */
66 while (n & 7)
67 out8[--n] = c;
68
69 out64 = (uint64_t *) out8;
70 n64 = n >> 3;
71
72 /* Tile input byte out to 64 bits. */
73 /* KLUDGE */
74 v64 = 0x0101010101010101ULL * (uint8_t)c;
75
76 /* This must be at least 8 or the following loop doesn't work. */
77#define CACHE_LINE_SIZE_IN_DOUBLEWORDS (CHIP_L2_LINE_SIZE() / 8)
78
79 /* Determine how many words we need to emit before the 'out32'
80 * pointer becomes aligned modulo the cache line size.
81 */
82 to_align64 = (-((uintptr_t)out64 >> 3)) &
83 (CACHE_LINE_SIZE_IN_DOUBLEWORDS - 1);
84
85 /* Only bother aligning and using wh64 if there is at least
86 * one full cache line to process. This check also prevents
87 * overrunning the end of the buffer with alignment words.
88 */
89 if (to_align64 <= n64 - CACHE_LINE_SIZE_IN_DOUBLEWORDS) {
90 int lines_left;
91
92 /* Align out64 mod the cache line size so we can use wh64. */
93 n64 -= to_align64;
94 for (; to_align64 != 0; to_align64--) {
95 *out64 = v64;
96 out64++;
97 }
98
99 /* Use unsigned divide to turn this into a right shift. */
100 lines_left = (unsigned)n64 / CACHE_LINE_SIZE_IN_DOUBLEWORDS;
101
102 do {
103 /* Only wh64 a few lines at a time, so we don't
104 * exceed the maximum number of victim lines.
105 */
106 int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS())
107 ? lines_left
108 : CHIP_MAX_OUTSTANDING_VICTIMS());
109 uint64_t *wh = out64;
110 int i = x;
111 int j;
112
113 lines_left -= x;
114
115 do {
116 __insn_wh64(wh);
117 wh += CACHE_LINE_SIZE_IN_DOUBLEWORDS;
118 } while (--i);
119
120 for (j = x * (CACHE_LINE_SIZE_IN_DOUBLEWORDS / 4);
121 j != 0; j--) {
122 *out64++ = v64;
123 *out64++ = v64;
124 *out64++ = v64;
125 *out64++ = v64;
126 }
127 } while (lines_left != 0);
128
129 /* We processed all full lines above, so only this many
130 * words remain to be processed.
131 */
132 n64 &= CACHE_LINE_SIZE_IN_DOUBLEWORDS - 1;
133 }
134
135 /* Now handle any leftover values. */
136 if (n64 != 0) {
137 do {
138 *out64 = v64;
139 out64++;
140 } while (--n64 != 0);
141 }
142
143 return s;
144}
145EXPORT_SYMBOL(memset);
diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c
new file mode 100644
index 000000000000..d6fb9581e980
--- /dev/null
+++ b/arch/tile/lib/spinlock_64.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <asm/processor.h>
18
19#include "spinlock_common.h"
20
21/*
22 * Read the spinlock value without allocating in our cache and without
23 * causing an invalidation to another cpu with a copy of the cacheline.
24 * This is important when we are spinning waiting for the lock.
25 */
26static inline u32 arch_spin_read_noalloc(void *lock)
27{
28 return atomic_cmpxchg((atomic_t *)lock, -1, -1);
29}
30
31/*
32 * Wait until the high bits (current) match my ticket.
33 * If we notice the overflow bit set on entry, we clear it.
34 */
35void arch_spin_lock_slow(arch_spinlock_t *lock, u32 my_ticket)
36{
37 if (unlikely(my_ticket & __ARCH_SPIN_NEXT_OVERFLOW)) {
38 __insn_fetchand4(&lock->lock, ~__ARCH_SPIN_NEXT_OVERFLOW);
39 my_ticket &= ~__ARCH_SPIN_NEXT_OVERFLOW;
40 }
41
42 for (;;) {
43 u32 val = arch_spin_read_noalloc(lock);
44 u32 delta = my_ticket - arch_spin_current(val);
45 if (delta == 0)
46 return;
47 relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
48 }
49}
50EXPORT_SYMBOL(arch_spin_lock_slow);
51
52/*
53 * Check the lock to see if it is plausible, and try to get it with cmpxchg().
54 */
55int arch_spin_trylock(arch_spinlock_t *lock)
56{
57 u32 val = arch_spin_read_noalloc(lock);
58 if (unlikely(arch_spin_current(val) != arch_spin_next(val)))
59 return 0;
60 return cmpxchg(&lock->lock, val, (val + 1) & ~__ARCH_SPIN_NEXT_OVERFLOW)
61 == val;
62}
63EXPORT_SYMBOL(arch_spin_trylock);
64
65void arch_spin_unlock_wait(arch_spinlock_t *lock)
66{
67 u32 iterations = 0;
68 while (arch_spin_is_locked(lock))
69 delay_backoff(iterations++);
70}
71EXPORT_SYMBOL(arch_spin_unlock_wait);
72
73/*
74 * If the read lock fails due to a writer, we retry periodically
75 * until the value is positive and we write our incremented reader count.
76 */
77void __read_lock_failed(arch_rwlock_t *rw)
78{
79 u32 val;
80 int iterations = 0;
81 do {
82 delay_backoff(iterations++);
83 val = __insn_fetchaddgez4(&rw->lock, 1);
84 } while (unlikely(arch_write_val_locked(val)));
85}
86EXPORT_SYMBOL(__read_lock_failed);
87
88/*
89 * If we failed because there were readers, clear the "writer" bit
90 * so we don't block additional readers. Otherwise, there was another
91 * writer anyway, so our "fetchor" made no difference. Then wait,
92 * issuing periodic fetchor instructions, till we get the lock.
93 */
94void __write_lock_failed(arch_rwlock_t *rw, u32 val)
95{
96 int iterations = 0;
97 do {
98 if (!arch_write_val_locked(val))
99 val = __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
100 delay_backoff(iterations++);
101 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
102 } while (val != 0);
103}
104EXPORT_SYMBOL(__write_lock_failed);
diff --git a/arch/tile/lib/strchr_64.c b/arch/tile/lib/strchr_64.c
new file mode 100644
index 000000000000..617a9273aaa8
--- /dev/null
+++ b/arch/tile/lib/strchr_64.c
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18
19#undef strchr
20
21char *strchr(const char *s, int c)
22{
23 int z, g;
24
25 /* Get an aligned pointer. */
26 const uintptr_t s_int = (uintptr_t) s;
27 const uint64_t *p = (const uint64_t *)(s_int & -8);
28
29 /* Create eight copies of the byte for which we are looking. */
30 const uint64_t goal = 0x0101010101010101ULL * (uint8_t) c;
31
32 /* Read the first aligned word, but force bytes before the string to
33 * match neither zero nor goal (we make sure the high bit of each
34 * byte is 1, and the low 7 bits are all the opposite of the goal
35 * byte).
36 *
37 * Note that this shift count expression works because we know shift
38 * counts are taken mod 64.
39 */
40 const uint64_t before_mask = (1ULL << (s_int << 3)) - 1;
41 uint64_t v = (*p | before_mask) ^
42 (goal & __insn_v1shrsi(before_mask, 1));
43
44 uint64_t zero_matches, goal_matches;
45 while (1) {
46 /* Look for a terminating '\0'. */
47 zero_matches = __insn_v1cmpeqi(v, 0);
48
49 /* Look for the goal byte. */
50 goal_matches = __insn_v1cmpeq(v, goal);
51
52 if (__builtin_expect((zero_matches | goal_matches) != 0, 0))
53 break;
54
55 v = *++p;
56 }
57
58 z = __insn_ctz(zero_matches);
59 g = __insn_ctz(goal_matches);
60
61 /* If we found c before '\0' we got a match. Note that if c == '\0'
62 * then g == z, and we correctly return the address of the '\0'
63 * rather than NULL.
64 */
65 return (g <= z) ? ((char *)p) + (g >> 3) : NULL;
66}
67EXPORT_SYMBOL(strchr);
diff --git a/arch/tile/lib/strlen_64.c b/arch/tile/lib/strlen_64.c
new file mode 100644
index 000000000000..1c92d46202a8
--- /dev/null
+++ b/arch/tile/lib/strlen_64.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18
19#undef strlen
20
21size_t strlen(const char *s)
22{
23 /* Get an aligned pointer. */
24 const uintptr_t s_int = (uintptr_t) s;
25 const uint64_t *p = (const uint64_t *)(s_int & -8);
26
27 /* Read the first word, but force bytes before the string to be nonzero.
28 * This expression works because we know shift counts are taken mod 64.
29 */
30 uint64_t v = *p | ((1ULL << (s_int << 3)) - 1);
31
32 uint64_t bits;
33 while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
34 v = *++p;
35
36 return ((const char *)p) + (__insn_ctz(bits) >> 3) - s;
37}
38EXPORT_SYMBOL(strlen);
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
new file mode 100644
index 000000000000..2ff44f87b78e
--- /dev/null
+++ b/arch/tile/lib/usercopy_64.S
@@ -0,0 +1,196 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/linkage.h>
16#include <asm/errno.h>
17#include <asm/cache.h>
18#include <arch/chip.h>
19
20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
21
22 .pushsection .fixup,"ax"
23
24get_user_fault:
25 { movei r1, -EFAULT; move r0, zero }
26 jrp lr
27 ENDPROC(get_user_fault)
28
29put_user_fault:
30 { movei r0, -EFAULT; jrp lr }
31 ENDPROC(put_user_fault)
32
33 .popsection
34
35/*
36 * __get_user_N functions take a pointer in r0, and return 0 in r1
37 * on success, with the value in r0; or else -EFAULT in r1.
38 */
39#define __get_user_N(bytes, LOAD) \
40 STD_ENTRY(__get_user_##bytes); \
411: { LOAD r0, r0; move r1, zero }; \
42 jrp lr; \
43 STD_ENDPROC(__get_user_##bytes); \
44 .pushsection __ex_table,"a"; \
45 .quad 1b, get_user_fault; \
46 .popsection
47
48__get_user_N(1, ld1u)
49__get_user_N(2, ld2u)
50__get_user_N(4, ld4u)
51__get_user_N(8, ld)
52
53/*
54 * __put_user_N functions take a value in r0 and a pointer in r1,
55 * and return 0 in r0 on success or -EFAULT on failure.
56 */
57#define __put_user_N(bytes, STORE) \
58 STD_ENTRY(__put_user_##bytes); \
591: { STORE r1, r0; move r0, zero }; \
60 jrp lr; \
61 STD_ENDPROC(__put_user_##bytes); \
62 .pushsection __ex_table,"a"; \
63 .quad 1b, put_user_fault; \
64 .popsection
65
66__put_user_N(1, st1)
67__put_user_N(2, st2)
68__put_user_N(4, st4)
69__put_user_N(8, st)
70
71/*
72 * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
73 * It returns the length, including the terminating NUL, or zero on exception.
74 * If length is greater than the bound, returns one plus the bound.
75 */
76STD_ENTRY(strnlen_user_asm)
77 { beqz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */
781: { ld1u r4, r0; addi r1, r1, -1 }
79 beqz r4, 2f
80 { bnezt r1, 1b; addi r0, r0, 1 }
812: { sub r0, r0, r3; jrp lr }
82 STD_ENDPROC(strnlen_user_asm)
83 .pushsection .fixup,"ax"
84strnlen_user_fault:
85 { move r0, zero; jrp lr }
86 ENDPROC(strnlen_user_fault)
87 .section __ex_table,"a"
88 .quad 1b, strnlen_user_fault
89 .popsection
90
91/*
92 * strncpy_from_user_asm takes the kernel target pointer in r0,
93 * the userspace source pointer in r1, and the length bound (including
94 * the trailing NUL) in r2. On success, it returns the string length
95 * (not including the trailing NUL), or -EFAULT on failure.
96 */
97STD_ENTRY(strncpy_from_user_asm)
98 { beqz r2, 2f; move r3, r0 }
991: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
100 { st1 r0, r4; addi r0, r0, 1 }
101 beqz r2, 2f
102 bnezt r4, 1b
103 addi r0, r0, -1 /* don't count the trailing NUL */
1042: { sub r0, r0, r3; jrp lr }
105 STD_ENDPROC(strncpy_from_user_asm)
106 .pushsection .fixup,"ax"
107strncpy_from_user_fault:
108 { movei r0, -EFAULT; jrp lr }
109 ENDPROC(strncpy_from_user_fault)
110 .section __ex_table,"a"
111 .quad 1b, strncpy_from_user_fault
112 .popsection
113
114/*
115 * clear_user_asm takes the user target address in r0 and the
116 * number of bytes to zero in r1.
117 * It returns the number of uncopiable bytes (hopefully zero) in r0.
118 * Note that we don't use a separate .fixup section here since we fall
119 * through into the "fixup" code as the last straight-line bundle anyway.
120 */
121STD_ENTRY(clear_user_asm)
122 { beqz r1, 2f; or r2, r0, r1 }
123 andi r2, r2, 7
124 beqzt r2, .Lclear_aligned_user_asm
1251: { st1 r0, zero; addi r0, r0, 1; addi r1, r1, -1 }
126 bnezt r1, 1b
1272: { move r0, r1; jrp lr }
128 .pushsection __ex_table,"a"
129 .quad 1b, 2b
130 .popsection
131
132.Lclear_aligned_user_asm:
1331: { st r0, zero; addi r0, r0, 8; addi r1, r1, -8 }
134 bnezt r1, 1b
1352: { move r0, r1; jrp lr }
136 STD_ENDPROC(clear_user_asm)
137 .pushsection __ex_table,"a"
138 .quad 1b, 2b
139 .popsection
140
141/*
142 * flush_user_asm takes the user target address in r0 and the
143 * number of bytes to flush in r1.
144 * It returns the number of unflushable bytes (hopefully zero) in r0.
145 */
146STD_ENTRY(flush_user_asm)
147 beqz r1, 2f
148 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
149 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
150 { and r0, r0, r2; and r1, r1, r2 }
151 { sub r1, r1, r0 }
1521: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() }
153 { addi r0, r0, CHIP_FLUSH_STRIDE(); bnezt r1, 1b }
1542: { move r0, r1; jrp lr }
155 STD_ENDPROC(flush_user_asm)
156 .pushsection __ex_table,"a"
157 .quad 1b, 2b
158 .popsection
159
160/*
161 * inv_user_asm takes the user target address in r0 and the
162 * number of bytes to invalidate in r1.
163 * It returns the number of not inv'able bytes (hopefully zero) in r0.
164 */
165STD_ENTRY(inv_user_asm)
166 beqz r1, 2f
167 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
168 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
169 { and r0, r0, r2; and r1, r1, r2 }
170 { sub r1, r1, r0 }
1711: { inv r0; addi r1, r1, -CHIP_INV_STRIDE() }
172 { addi r0, r0, CHIP_INV_STRIDE(); bnezt r1, 1b }
1732: { move r0, r1; jrp lr }
174 STD_ENDPROC(inv_user_asm)
175 .pushsection __ex_table,"a"
176 .quad 1b, 2b
177 .popsection
178
179/*
180 * finv_user_asm takes the user target address in r0 and the
181 * number of bytes to flush-invalidate in r1.
182 * It returns the number of not finv'able bytes (hopefully zero) in r0.
183 */
184STD_ENTRY(finv_user_asm)
185 beqz r1, 2f
186 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
187 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
188 { and r0, r0, r2; and r1, r1, r2 }
189 { sub r1, r1, r0 }
1901: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() }
191 { addi r0, r0, CHIP_FINV_STRIDE(); bnezt r1, 1b }
1922: { move r0, r1; jrp lr }
193 STD_ENDPROC(finv_user_asm)
194 .pushsection __ex_table,"a"
195 .quad 1b, 2b
196 .popsection
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 51f8663bf074..25b7b90fd620 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -43,8 +43,11 @@
43 43
44#include <arch/interrupts.h> 44#include <arch/interrupts.h>
45 45
46static noinline void force_sig_info_fault(int si_signo, int si_code, 46static noinline void force_sig_info_fault(const char *type, int si_signo,
47 unsigned long address, int fault_num, struct task_struct *tsk) 47 int si_code, unsigned long address,
48 int fault_num,
49 struct task_struct *tsk,
50 struct pt_regs *regs)
48{ 51{
49 siginfo_t info; 52 siginfo_t info;
50 53
@@ -59,6 +62,7 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
59 info.si_code = si_code; 62 info.si_code = si_code;
60 info.si_addr = (void __user *)address; 63 info.si_addr = (void __user *)address;
61 info.si_trapno = fault_num; 64 info.si_trapno = fault_num;
65 trace_unhandled_signal(type, regs, address, si_signo);
62 force_sig_info(si_signo, &info, tsk); 66 force_sig_info(si_signo, &info, tsk);
63} 67}
64 68
@@ -71,11 +75,12 @@ SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
71 struct pt_regs *, regs) 75 struct pt_regs *, regs)
72{ 76{
73 if (address >= PAGE_OFFSET) 77 if (address >= PAGE_OFFSET)
74 force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, 78 force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
75 INT_DTLB_MISS, current); 79 address, INT_DTLB_MISS, current, regs);
76 else 80 else
77 force_sig_info_fault(SIGBUS, BUS_ADRALN, address, 81 force_sig_info_fault("atomic alignment fault", SIGBUS,
78 INT_UNALIGN_DATA, current); 82 BUS_ADRALN, address,
83 INT_UNALIGN_DATA, current, regs);
79 84
80 /* 85 /*
81 * Adjust pc to point at the actual instruction, which is unusual 86 * Adjust pc to point at the actual instruction, which is unusual
@@ -471,8 +476,8 @@ bad_area_nosemaphore:
471 */ 476 */
472 local_irq_enable(); 477 local_irq_enable();
473 478
474 force_sig_info_fault(SIGSEGV, si_code, address, 479 force_sig_info_fault("segfault", SIGSEGV, si_code, address,
475 fault_num, tsk); 480 fault_num, tsk, regs);
476 return 0; 481 return 0;
477 } 482 }
478 483
@@ -547,7 +552,8 @@ do_sigbus:
547 if (is_kernel_mode) 552 if (is_kernel_mode)
548 goto no_context; 553 goto no_context;
549 554
550 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, fault_num, tsk); 555 force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
556 fault_num, tsk, regs);
551 return 0; 557 return 0;
552} 558}
553 559
@@ -732,6 +738,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
732 panic("Bad fault number %d in do_page_fault", fault_num); 738 panic("Bad fault number %d in do_page_fault", fault_num);
733 } 739 }
734 740
741#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
735 if (EX1_PL(regs->ex1) != USER_PL) { 742 if (EX1_PL(regs->ex1) != USER_PL) {
736 struct async_tlb *async; 743 struct async_tlb *async;
737 switch (fault_num) { 744 switch (fault_num) {
@@ -775,6 +782,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
775 return; 782 return;
776 } 783 }
777 } 784 }
785#endif
778 786
779 handle_page_fault(regs, fault_num, is_page_fault, address, write); 787 handle_page_fault(regs, fault_num, is_page_fault, address, write);
780} 788}
@@ -801,8 +809,6 @@ static void handle_async_page_fault(struct pt_regs *regs,
801 async->address, async->is_write); 809 async->address, async->is_write);
802 } 810 }
803} 811}
804#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
805
806 812
807/* 813/*
808 * This routine effectively re-issues asynchronous page faults 814 * This routine effectively re-issues asynchronous page faults
@@ -824,6 +830,8 @@ void do_async_page_fault(struct pt_regs *regs)
824 handle_async_page_fault(regs, &current->thread.sn_async_tlb); 830 handle_async_page_fault(regs, &current->thread.sn_async_tlb);
825#endif 831#endif
826} 832}
833#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
834
827 835
828void vmalloc_sync_all(void) 836void vmalloc_sync_all(void)
829{ 837{
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index d6e87fda2fb2..4e10c4023028 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -60,8 +60,6 @@ unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
60EXPORT_SYMBOL(VMALLOC_RESERVE); 60EXPORT_SYMBOL(VMALLOC_RESERVE);
61#endif 61#endif
62 62
63DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
64
65/* Create an L2 page table */ 63/* Create an L2 page table */
66static pte_t * __init alloc_pte(void) 64static pte_t * __init alloc_pte(void)
67{ 65{
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S
new file mode 100644
index 000000000000..e76fea688beb
--- /dev/null
+++ b/arch/tile/mm/migrate_64.S
@@ -0,0 +1,187 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * This routine is a helper for migrating the home of a set of pages to
15 * a new cpu. See the documentation in homecache.c for more information.
16 */
17
18#include <linux/linkage.h>
19#include <linux/threads.h>
20#include <asm/page.h>
21#include <asm/thread_info.h>
22#include <asm/types.h>
23#include <asm/asm-offsets.h>
24#include <hv/hypervisor.h>
25
26 .text
27
28/*
29 * First, some definitions that apply to all the code in the file.
30 */
31
32/* Locals (caller-save) */
33#define r_tmp r10
34#define r_save_sp r11
35
36/* What we save where in the stack frame; must include all callee-saves. */
37#define FRAME_SP 8
38#define FRAME_R30 16
39#define FRAME_R31 24
40#define FRAME_R32 32
41#define FRAME_R33 40
42#define FRAME_SIZE 48
43
44
45
46
47/*
48 * On entry:
49 *
50 * r0 the new context PA to install (moved to r_context)
51 * r1 PTE to use for context access (moved to r_access)
52 * r2 ASID to use for new context (moved to r_asid)
53 * r3 pointer to cpumask with just this cpu set in it (r_my_cpumask)
54 */
55
56/* Arguments (caller-save) */
57#define r_context_in r0
58#define r_access_in r1
59#define r_asid_in r2
60#define r_my_cpumask r3
61
62/* Locals (callee-save); must not be more than FRAME_xxx above. */
63#define r_save_ics r30
64#define r_context r31
65#define r_access r32
66#define r_asid r33
67
68/*
69 * Caller-save locals and frame constants are the same as
70 * for homecache_migrate_stack_and_flush.
71 */
72
73STD_ENTRY(flush_and_install_context)
74 /*
75 * Create a stack frame; we can't touch it once we flush the
76 * cache until we install the new page table and flush the TLB.
77 */
78 {
79 move r_save_sp, sp
80 st sp, lr
81 addi sp, sp, -FRAME_SIZE
82 }
83 addi r_tmp, sp, FRAME_SP
84 {
85 st r_tmp, r_save_sp
86 addi r_tmp, sp, FRAME_R30
87 }
88 {
89 st r_tmp, r30
90 addi r_tmp, sp, FRAME_R31
91 }
92 {
93 st r_tmp, r31
94 addi r_tmp, sp, FRAME_R32
95 }
96 {
97 st r_tmp, r32
98 addi r_tmp, sp, FRAME_R33
99 }
100 st r_tmp, r33
101
102 /* Move some arguments to callee-save registers. */
103 {
104 move r_context, r_context_in
105 move r_access, r_access_in
106 }
107 move r_asid, r_asid_in
108
109 /* Disable interrupts, since we can't use our stack. */
110 {
111 mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
112 movei r_tmp, 1
113 }
114 mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
115
116 /* First, flush our L2 cache. */
117 {
118 move r0, zero /* cache_pa */
119 moveli r1, hw2_last(HV_FLUSH_EVICT_L2) /* cache_control */
120 }
121 {
122 shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
123 move r2, r_my_cpumask /* cache_cpumask */
124 }
125 {
126 shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2)
127 move r3, zero /* tlb_va */
128 }
129 {
130 move r4, zero /* tlb_length */
131 move r5, zero /* tlb_pgsize */
132 }
133 {
134 move r6, zero /* tlb_cpumask */
135 move r7, zero /* asids */
136 }
137 {
138 move r8, zero /* asidcount */
139 jal hv_flush_remote
140 }
141 bnez r0, 1f
142
143 /* Now install the new page table. */
144 {
145 move r0, r_context
146 move r1, r_access
147 }
148 {
149 move r2, r_asid
150 movei r3, HV_CTX_DIRECTIO
151 }
152 jal hv_install_context
153 bnez r0, 1f
154
155 /* Finally, flush the TLB. */
156 {
157 movei r0, 0 /* preserve_global */
158 jal hv_flush_all
159 }
160
1611: /* Reset interrupts back how they were before. */
162 mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
163
164 /* Restore the callee-saved registers and return. */
165 addli lr, sp, FRAME_SIZE
166 {
167 ld lr, lr
168 addli r_tmp, sp, FRAME_R30
169 }
170 {
171 ld r30, r_tmp
172 addli r_tmp, sp, FRAME_R31
173 }
174 {
175 ld r31, r_tmp
176 addli r_tmp, sp, FRAME_R32
177 }
178 {
179 ld r32, r_tmp
180 addli r_tmp, sp, FRAME_R33
181 }
182 {
183 ld r33, r_tmp
184 addi sp, sp, FRAME_SIZE
185 }
186 jrp lr
187 STD_ENDPROC(flush_and_install_context)
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 8fce5e536b0f..68205fd3b08c 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -28,13 +28,13 @@ config GCOV
28 If you're involved in UML kernel development and want to use gcov, 28 If you're involved in UML kernel development and want to use gcov,
29 say Y. If you're unsure, say N. 29 say Y. If you're unsure, say N.
30 30
31config DEBUG_STACK_USAGE 31config EARLY_PRINTK
32 bool "Stack utilization instrumentation" 32 bool "Early printk"
33 default N 33 default y
34 help 34 ---help---
35 Track the maximum kernel stack usage - this will look at each 35 Write kernel log output directly to stdout.
36 kernel stack at process exit and log it if it's the deepest 36
37 stack seen so far. 37 This is useful for kernel debugging when your machine crashes very
38 early before the console code is initialized.
38 39
39 This option will slow down process creation and destruction somewhat.
40endmenu 40endmenu
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
index 795ea8e869f4..8aae429a56e2 100644
--- a/arch/um/Kconfig.x86
+++ b/arch/um/Kconfig.x86
@@ -15,7 +15,6 @@ endmenu
15config UML_X86 15config UML_X86
16 def_bool y 16 def_bool y
17 select GENERIC_FIND_FIRST_BIT 17 select GENERIC_FIND_FIRST_BIT
18 select GENERIC_FIND_NEXT_BIT
19 18
20config 64BIT 19config 64BIT
21 bool 20 bool
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 1d9b6ae967b0..e7582e1d248c 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -9,7 +9,7 @@
9slip-objs := slip_kern.o slip_user.o 9slip-objs := slip_kern.o slip_user.o
10slirp-objs := slirp_kern.o slirp_user.o 10slirp-objs := slirp_kern.o slirp_user.o
11daemon-objs := daemon_kern.o daemon_user.o 11daemon-objs := daemon_kern.o daemon_user.o
12mcast-objs := mcast_kern.o mcast_user.o 12umcast-objs := umcast_kern.o umcast_user.o
13net-objs := net_kern.o net_user.o 13net-objs := net_kern.o net_user.o
14mconsole-objs := mconsole_kern.o mconsole_user.o 14mconsole-objs := mconsole_kern.o mconsole_user.o
15hostaudio-objs := hostaudio_kern.o 15hostaudio-objs := hostaudio_kern.o
@@ -44,7 +44,7 @@ obj-$(CONFIG_UML_NET_SLIP) += slip.o slip_common.o
44obj-$(CONFIG_UML_NET_SLIRP) += slirp.o slip_common.o 44obj-$(CONFIG_UML_NET_SLIRP) += slirp.o slip_common.o
45obj-$(CONFIG_UML_NET_DAEMON) += daemon.o 45obj-$(CONFIG_UML_NET_DAEMON) += daemon.o
46obj-$(CONFIG_UML_NET_VDE) += vde.o 46obj-$(CONFIG_UML_NET_VDE) += vde.o
47obj-$(CONFIG_UML_NET_MCAST) += mcast.o 47obj-$(CONFIG_UML_NET_MCAST) += umcast.o
48obj-$(CONFIG_UML_NET_PCAP) += pcap.o 48obj-$(CONFIG_UML_NET_PCAP) += pcap.o
49obj-$(CONFIG_UML_NET) += net.o 49obj-$(CONFIG_UML_NET) += net.o
50obj-$(CONFIG_MCONSOLE) += mconsole.o 50obj-$(CONFIG_MCONSOLE) += mconsole.o
diff --git a/arch/um/drivers/mcast.h b/arch/um/drivers/mcast.h
deleted file mode 100644
index 6fa282e896be..000000000000
--- a/arch/um/drivers/mcast.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __DRIVERS_MCAST_H
7#define __DRIVERS_MCAST_H
8
9#include "net_user.h"
10
11struct mcast_data {
12 char *addr;
13 unsigned short port;
14 void *mcast_addr;
15 int ttl;
16 void *dev;
17};
18
19extern const struct net_user_info mcast_user_info;
20
21extern int mcast_user_write(int fd, void *buf, int len,
22 struct mcast_data *pri);
23
24#endif
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
deleted file mode 100644
index ffc6416d5ed7..000000000000
--- a/arch/um/drivers/mcast_kern.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * user-mode-linux networking multicast transport
3 * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org>
4 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 *
6 * based on the existing uml-networking code, which is
7 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
8 * James Leu (jleu@mindspring.net).
9 * Copyright (C) 2001 by various other people who didn't put their name here.
10 *
11 * Licensed under the GPL.
12 */
13
14#include "linux/init.h"
15#include <linux/netdevice.h>
16#include "mcast.h"
17#include "net_kern.h"
18
19struct mcast_init {
20 char *addr;
21 int port;
22 int ttl;
23};
24
25static void mcast_init(struct net_device *dev, void *data)
26{
27 struct uml_net_private *pri;
28 struct mcast_data *dpri;
29 struct mcast_init *init = data;
30
31 pri = netdev_priv(dev);
32 dpri = (struct mcast_data *) pri->user;
33 dpri->addr = init->addr;
34 dpri->port = init->port;
35 dpri->ttl = init->ttl;
36 dpri->dev = dev;
37
38 printk("mcast backend multicast address: %s:%u, TTL:%u\n",
39 dpri->addr, dpri->port, dpri->ttl);
40}
41
42static int mcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
43{
44 return net_recvfrom(fd, skb_mac_header(skb),
45 skb->dev->mtu + ETH_HEADER_OTHER);
46}
47
48static int mcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
49{
50 return mcast_user_write(fd, skb->data, skb->len,
51 (struct mcast_data *) &lp->user);
52}
53
54static const struct net_kern_info mcast_kern_info = {
55 .init = mcast_init,
56 .protocol = eth_protocol,
57 .read = mcast_read,
58 .write = mcast_write,
59};
60
61static int mcast_setup(char *str, char **mac_out, void *data)
62{
63 struct mcast_init *init = data;
64 char *port_str = NULL, *ttl_str = NULL, *remain;
65 char *last;
66
67 *init = ((struct mcast_init)
68 { .addr = "239.192.168.1",
69 .port = 1102,
70 .ttl = 1 });
71
72 remain = split_if_spec(str, mac_out, &init->addr, &port_str, &ttl_str,
73 NULL);
74 if (remain != NULL) {
75 printk(KERN_ERR "mcast_setup - Extra garbage on "
76 "specification : '%s'\n", remain);
77 return 0;
78 }
79
80 if (port_str != NULL) {
81 init->port = simple_strtoul(port_str, &last, 10);
82 if ((*last != '\0') || (last == port_str)) {
83 printk(KERN_ERR "mcast_setup - Bad port : '%s'\n",
84 port_str);
85 return 0;
86 }
87 }
88
89 if (ttl_str != NULL) {
90 init->ttl = simple_strtoul(ttl_str, &last, 10);
91 if ((*last != '\0') || (last == ttl_str)) {
92 printk(KERN_ERR "mcast_setup - Bad ttl : '%s'\n",
93 ttl_str);
94 return 0;
95 }
96 }
97
98 printk(KERN_INFO "Configured mcast device: %s:%u-%u\n", init->addr,
99 init->port, init->ttl);
100
101 return 1;
102}
103
104static struct transport mcast_transport = {
105 .list = LIST_HEAD_INIT(mcast_transport.list),
106 .name = "mcast",
107 .setup = mcast_setup,
108 .user = &mcast_user_info,
109 .kern = &mcast_kern_info,
110 .private_size = sizeof(struct mcast_data),
111 .setup_size = sizeof(struct mcast_init),
112};
113
114static int register_mcast(void)
115{
116 register_transport(&mcast_transport);
117 return 0;
118}
119
120late_initcall(register_mcast);
diff --git a/arch/um/drivers/mcast_user.c b/arch/um/drivers/mcast_user.c
deleted file mode 100644
index ee19e91568a2..000000000000
--- a/arch/um/drivers/mcast_user.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * user-mode-linux networking multicast transport
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org>
5 *
6 * based on the existing uml-networking code, which is
7 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
8 * James Leu (jleu@mindspring.net).
9 * Copyright (C) 2001 by various other people who didn't put their name here.
10 *
11 * Licensed under the GPL.
12 *
13 */
14
15#include <unistd.h>
16#include <errno.h>
17#include <netinet/in.h>
18#include "kern_constants.h"
19#include "mcast.h"
20#include "net_user.h"
21#include "um_malloc.h"
22#include "user.h"
23
24static struct sockaddr_in *new_addr(char *addr, unsigned short port)
25{
26 struct sockaddr_in *sin;
27
28 sin = uml_kmalloc(sizeof(struct sockaddr_in), UM_GFP_KERNEL);
29 if (sin == NULL) {
30 printk(UM_KERN_ERR "new_addr: allocation of sockaddr_in "
31 "failed\n");
32 return NULL;
33 }
34 sin->sin_family = AF_INET;
35 sin->sin_addr.s_addr = in_aton(addr);
36 sin->sin_port = htons(port);
37 return sin;
38}
39
40static int mcast_user_init(void *data, void *dev)
41{
42 struct mcast_data *pri = data;
43
44 pri->mcast_addr = new_addr(pri->addr, pri->port);
45 pri->dev = dev;
46 return 0;
47}
48
49static void mcast_remove(void *data)
50{
51 struct mcast_data *pri = data;
52
53 kfree(pri->mcast_addr);
54 pri->mcast_addr = NULL;
55}
56
57static int mcast_open(void *data)
58{
59 struct mcast_data *pri = data;
60 struct sockaddr_in *sin = pri->mcast_addr;
61 struct ip_mreq mreq;
62 int fd, yes = 1, err = -EINVAL;
63
64
65 if ((sin->sin_addr.s_addr == 0) || (sin->sin_port == 0))
66 goto out;
67
68 fd = socket(AF_INET, SOCK_DGRAM, 0);
69
70 if (fd < 0) {
71 err = -errno;
72 printk(UM_KERN_ERR "mcast_open : data socket failed, "
73 "errno = %d\n", errno);
74 goto out;
75 }
76
77 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) {
78 err = -errno;
79 printk(UM_KERN_ERR "mcast_open: SO_REUSEADDR failed, "
80 "errno = %d\n", errno);
81 goto out_close;
82 }
83
84 /* set ttl according to config */
85 if (setsockopt(fd, SOL_IP, IP_MULTICAST_TTL, &pri->ttl,
86 sizeof(pri->ttl)) < 0) {
87 err = -errno;
88 printk(UM_KERN_ERR "mcast_open: IP_MULTICAST_TTL failed, "
89 "error = %d\n", errno);
90 goto out_close;
91 }
92
93 /* set LOOP, so data does get fed back to local sockets */
94 if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP, &yes, sizeof(yes)) < 0) {
95 err = -errno;
96 printk(UM_KERN_ERR "mcast_open: IP_MULTICAST_LOOP failed, "
97 "error = %d\n", errno);
98 goto out_close;
99 }
100
101 /* bind socket to mcast address */
102 if (bind(fd, (struct sockaddr *) sin, sizeof(*sin)) < 0) {
103 err = -errno;
104 printk(UM_KERN_ERR "mcast_open : data bind failed, "
105 "errno = %d\n", errno);
106 goto out_close;
107 }
108
109 /* subscribe to the multicast group */
110 mreq.imr_multiaddr.s_addr = sin->sin_addr.s_addr;
111 mreq.imr_interface.s_addr = 0;
112 if (setsockopt(fd, SOL_IP, IP_ADD_MEMBERSHIP,
113 &mreq, sizeof(mreq)) < 0) {
114 err = -errno;
115 printk(UM_KERN_ERR "mcast_open: IP_ADD_MEMBERSHIP failed, "
116 "error = %d\n", errno);
117 printk(UM_KERN_ERR "There appears not to be a multicast-"
118 "capable network interface on the host.\n");
119 printk(UM_KERN_ERR "eth0 should be configured in order to use "
120 "the multicast transport.\n");
121 goto out_close;
122 }
123
124 return fd;
125
126 out_close:
127 close(fd);
128 out:
129 return err;
130}
131
132static void mcast_close(int fd, void *data)
133{
134 struct ip_mreq mreq;
135 struct mcast_data *pri = data;
136 struct sockaddr_in *sin = pri->mcast_addr;
137
138 mreq.imr_multiaddr.s_addr = sin->sin_addr.s_addr;
139 mreq.imr_interface.s_addr = 0;
140 if (setsockopt(fd, SOL_IP, IP_DROP_MEMBERSHIP,
141 &mreq, sizeof(mreq)) < 0) {
142 printk(UM_KERN_ERR "mcast_open: IP_DROP_MEMBERSHIP failed, "
143 "error = %d\n", errno);
144 }
145
146 close(fd);
147}
148
149int mcast_user_write(int fd, void *buf, int len, struct mcast_data *pri)
150{
151 struct sockaddr_in *data_addr = pri->mcast_addr;
152
153 return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr));
154}
155
156const struct net_user_info mcast_user_info = {
157 .init = mcast_user_init,
158 .open = mcast_open,
159 .close = mcast_close,
160 .remove = mcast_remove,
161 .add_address = NULL,
162 .delete_address = NULL,
163 .mtu = ETH_MAX_PACKET,
164 .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
165};
diff --git a/arch/um/drivers/umcast.h b/arch/um/drivers/umcast.h
new file mode 100644
index 000000000000..6f8c0fe890fb
--- /dev/null
+++ b/arch/um/drivers/umcast.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __DRIVERS_UMCAST_H
7#define __DRIVERS_UMCAST_H
8
9#include "net_user.h"
10
11struct umcast_data {
12 char *addr;
13 unsigned short lport;
14 unsigned short rport;
15 void *listen_addr;
16 void *remote_addr;
17 int ttl;
18 int unicast;
19 void *dev;
20};
21
22extern const struct net_user_info umcast_user_info;
23
24extern int umcast_user_write(int fd, void *buf, int len,
25 struct umcast_data *pri);
26
27#endif
diff --git a/arch/um/drivers/umcast_kern.c b/arch/um/drivers/umcast_kern.c
new file mode 100644
index 000000000000..42dab11d2ecf
--- /dev/null
+++ b/arch/um/drivers/umcast_kern.c
@@ -0,0 +1,188 @@
1/*
2 * user-mode-linux networking multicast transport
3 * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org>
4 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 *
6 * based on the existing uml-networking code, which is
7 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
8 * James Leu (jleu@mindspring.net).
9 * Copyright (C) 2001 by various other people who didn't put their name here.
10 *
11 * Licensed under the GPL.
12 */
13
14#include "linux/init.h"
15#include <linux/netdevice.h>
16#include "umcast.h"
17#include "net_kern.h"
18
19struct umcast_init {
20 char *addr;
21 int lport;
22 int rport;
23 int ttl;
24 bool unicast;
25};
26
27static void umcast_init(struct net_device *dev, void *data)
28{
29 struct uml_net_private *pri;
30 struct umcast_data *dpri;
31 struct umcast_init *init = data;
32
33 pri = netdev_priv(dev);
34 dpri = (struct umcast_data *) pri->user;
35 dpri->addr = init->addr;
36 dpri->lport = init->lport;
37 dpri->rport = init->rport;
38 dpri->unicast = init->unicast;
39 dpri->ttl = init->ttl;
40 dpri->dev = dev;
41
42 if (dpri->unicast) {
43 printk(KERN_INFO "ucast backend address: %s:%u listen port: "
44 "%u\n", dpri->addr, dpri->rport, dpri->lport);
45 } else {
46 printk(KERN_INFO "mcast backend multicast address: %s:%u, "
47 "TTL:%u\n", dpri->addr, dpri->lport, dpri->ttl);
48 }
49}
50
51static int umcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
52{
53 return net_recvfrom(fd, skb_mac_header(skb),
54 skb->dev->mtu + ETH_HEADER_OTHER);
55}
56
57static int umcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
58{
59 return umcast_user_write(fd, skb->data, skb->len,
60 (struct umcast_data *) &lp->user);
61}
62
63static const struct net_kern_info umcast_kern_info = {
64 .init = umcast_init,
65 .protocol = eth_protocol,
66 .read = umcast_read,
67 .write = umcast_write,
68};
69
70static int mcast_setup(char *str, char **mac_out, void *data)
71{
72 struct umcast_init *init = data;
73 char *port_str = NULL, *ttl_str = NULL, *remain;
74 char *last;
75
76 *init = ((struct umcast_init)
77 { .addr = "239.192.168.1",
78 .lport = 1102,
79 .ttl = 1 });
80
81 remain = split_if_spec(str, mac_out, &init->addr, &port_str, &ttl_str,
82 NULL);
83 if (remain != NULL) {
84 printk(KERN_ERR "mcast_setup - Extra garbage on "
85 "specification : '%s'\n", remain);
86 return 0;
87 }
88
89 if (port_str != NULL) {
90 init->lport = simple_strtoul(port_str, &last, 10);
91 if ((*last != '\0') || (last == port_str)) {
92 printk(KERN_ERR "mcast_setup - Bad port : '%s'\n",
93 port_str);
94 return 0;
95 }
96 }
97
98 if (ttl_str != NULL) {
99 init->ttl = simple_strtoul(ttl_str, &last, 10);
100 if ((*last != '\0') || (last == ttl_str)) {
101 printk(KERN_ERR "mcast_setup - Bad ttl : '%s'\n",
102 ttl_str);
103 return 0;
104 }
105 }
106
107 init->unicast = false;
108 init->rport = init->lport;
109
110 printk(KERN_INFO "Configured mcast device: %s:%u-%u\n", init->addr,
111 init->lport, init->ttl);
112
113 return 1;
114}
115
116static int ucast_setup(char *str, char **mac_out, void *data)
117{
118 struct umcast_init *init = data;
119 char *lport_str = NULL, *rport_str = NULL, *remain;
120 char *last;
121
122 *init = ((struct umcast_init)
123 { .addr = "",
124 .lport = 1102,
125 .rport = 1102 });
126
127 remain = split_if_spec(str, mac_out, &init->addr,
128 &lport_str, &rport_str, NULL);
129 if (remain != NULL) {
130 printk(KERN_ERR "ucast_setup - Extra garbage on "
131 "specification : '%s'\n", remain);
132 return 0;
133 }
134
135 if (lport_str != NULL) {
136 init->lport = simple_strtoul(lport_str, &last, 10);
137 if ((*last != '\0') || (last == lport_str)) {
138 printk(KERN_ERR "ucast_setup - Bad listen port : "
139 "'%s'\n", lport_str);
140 return 0;
141 }
142 }
143
144 if (rport_str != NULL) {
145 init->rport = simple_strtoul(rport_str, &last, 10);
146 if ((*last != '\0') || (last == rport_str)) {
147 printk(KERN_ERR "ucast_setup - Bad remote port : "
148 "'%s'\n", rport_str);
149 return 0;
150 }
151 }
152
153 init->unicast = true;
154
155 printk(KERN_INFO "Configured ucast device: :%u -> %s:%u\n",
156 init->lport, init->addr, init->rport);
157
158 return 1;
159}
160
161static struct transport mcast_transport = {
162 .list = LIST_HEAD_INIT(mcast_transport.list),
163 .name = "mcast",
164 .setup = mcast_setup,
165 .user = &umcast_user_info,
166 .kern = &umcast_kern_info,
167 .private_size = sizeof(struct umcast_data),
168 .setup_size = sizeof(struct umcast_init),
169};
170
171static struct transport ucast_transport = {
172 .list = LIST_HEAD_INIT(ucast_transport.list),
173 .name = "ucast",
174 .setup = ucast_setup,
175 .user = &umcast_user_info,
176 .kern = &umcast_kern_info,
177 .private_size = sizeof(struct umcast_data),
178 .setup_size = sizeof(struct umcast_init),
179};
180
181static int register_umcast(void)
182{
183 register_transport(&mcast_transport);
184 register_transport(&ucast_transport);
185 return 0;
186}
187
188late_initcall(register_umcast);
diff --git a/arch/um/drivers/umcast_user.c b/arch/um/drivers/umcast_user.c
new file mode 100644
index 000000000000..59c56fd6f52a
--- /dev/null
+++ b/arch/um/drivers/umcast_user.c
@@ -0,0 +1,186 @@
1/*
2 * user-mode-linux networking multicast transport
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org>
5 *
6 * based on the existing uml-networking code, which is
7 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
8 * James Leu (jleu@mindspring.net).
9 * Copyright (C) 2001 by various other people who didn't put their name here.
10 *
11 * Licensed under the GPL.
12 *
13 */
14
15#include <unistd.h>
16#include <errno.h>
17#include <netinet/in.h>
18#include "kern_constants.h"
19#include "umcast.h"
20#include "net_user.h"
21#include "um_malloc.h"
22#include "user.h"
23
24static struct sockaddr_in *new_addr(char *addr, unsigned short port)
25{
26 struct sockaddr_in *sin;
27
28 sin = uml_kmalloc(sizeof(struct sockaddr_in), UM_GFP_KERNEL);
29 if (sin == NULL) {
30 printk(UM_KERN_ERR "new_addr: allocation of sockaddr_in "
31 "failed\n");
32 return NULL;
33 }
34 sin->sin_family = AF_INET;
35 if (addr)
36 sin->sin_addr.s_addr = in_aton(addr);
37 else
38 sin->sin_addr.s_addr = INADDR_ANY;
39 sin->sin_port = htons(port);
40 return sin;
41}
42
43static int umcast_user_init(void *data, void *dev)
44{
45 struct umcast_data *pri = data;
46
47 pri->remote_addr = new_addr(pri->addr, pri->rport);
48 if (pri->unicast)
49 pri->listen_addr = new_addr(NULL, pri->lport);
50 else
51 pri->listen_addr = pri->remote_addr;
52 pri->dev = dev;
53 return 0;
54}
55
56static void umcast_remove(void *data)
57{
58 struct umcast_data *pri = data;
59
60 kfree(pri->listen_addr);
61 if (pri->unicast)
62 kfree(pri->remote_addr);
63 pri->listen_addr = pri->remote_addr = NULL;
64}
65
66static int umcast_open(void *data)
67{
68 struct umcast_data *pri = data;
69 struct sockaddr_in *lsin = pri->listen_addr;
70 struct sockaddr_in *rsin = pri->remote_addr;
71 struct ip_mreq mreq;
72 int fd, yes = 1, err = -EINVAL;
73
74
75 if ((!pri->unicast && lsin->sin_addr.s_addr == 0) ||
76 (rsin->sin_addr.s_addr == 0) ||
77 (lsin->sin_port == 0) || (rsin->sin_port == 0))
78 goto out;
79
80 fd = socket(AF_INET, SOCK_DGRAM, 0);
81
82 if (fd < 0) {
83 err = -errno;
84 printk(UM_KERN_ERR "umcast_open : data socket failed, "
85 "errno = %d\n", errno);
86 goto out;
87 }
88
89 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) {
90 err = -errno;
91 printk(UM_KERN_ERR "umcast_open: SO_REUSEADDR failed, "
92 "errno = %d\n", errno);
93 goto out_close;
94 }
95
96 if (!pri->unicast) {
97 /* set ttl according to config */
98 if (setsockopt(fd, SOL_IP, IP_MULTICAST_TTL, &pri->ttl,
99 sizeof(pri->ttl)) < 0) {
100 err = -errno;
101 printk(UM_KERN_ERR "umcast_open: IP_MULTICAST_TTL "
102 "failed, error = %d\n", errno);
103 goto out_close;
104 }
105
106 /* set LOOP, so data does get fed back to local sockets */
107 if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP,
108 &yes, sizeof(yes)) < 0) {
109 err = -errno;
110 printk(UM_KERN_ERR "umcast_open: IP_MULTICAST_LOOP "
111 "failed, error = %d\n", errno);
112 goto out_close;
113 }
114 }
115
116 /* bind socket to the address */
117 if (bind(fd, (struct sockaddr *) lsin, sizeof(*lsin)) < 0) {
118 err = -errno;
119 printk(UM_KERN_ERR "umcast_open : data bind failed, "
120 "errno = %d\n", errno);
121 goto out_close;
122 }
123
124 if (!pri->unicast) {
125 /* subscribe to the multicast group */
126 mreq.imr_multiaddr.s_addr = lsin->sin_addr.s_addr;
127 mreq.imr_interface.s_addr = 0;
128 if (setsockopt(fd, SOL_IP, IP_ADD_MEMBERSHIP,
129 &mreq, sizeof(mreq)) < 0) {
130 err = -errno;
131 printk(UM_KERN_ERR "umcast_open: IP_ADD_MEMBERSHIP "
132 "failed, error = %d\n", errno);
133 printk(UM_KERN_ERR "There appears not to be a "
134 "multicast-capable network interface on the "
135 "host.\n");
136 printk(UM_KERN_ERR "eth0 should be configured in order "
137 "to use the multicast transport.\n");
138 goto out_close;
139 }
140 }
141
142 return fd;
143
144 out_close:
145 close(fd);
146 out:
147 return err;
148}
149
150static void umcast_close(int fd, void *data)
151{
152 struct umcast_data *pri = data;
153
154 if (!pri->unicast) {
155 struct ip_mreq mreq;
156 struct sockaddr_in *lsin = pri->listen_addr;
157
158 mreq.imr_multiaddr.s_addr = lsin->sin_addr.s_addr;
159 mreq.imr_interface.s_addr = 0;
160 if (setsockopt(fd, SOL_IP, IP_DROP_MEMBERSHIP,
161 &mreq, sizeof(mreq)) < 0) {
162 printk(UM_KERN_ERR "umcast_close: IP_DROP_MEMBERSHIP "
163 "failed, error = %d\n", errno);
164 }
165 }
166
167 close(fd);
168}
169
170int umcast_user_write(int fd, void *buf, int len, struct umcast_data *pri)
171{
172 struct sockaddr_in *data_addr = pri->remote_addr;
173
174 return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr));
175}
176
177const struct net_user_info umcast_user_info = {
178 .init = umcast_user_init,
179 .open = umcast_open,
180 .close = umcast_close,
181 .remove = umcast_remove,
182 .add_address = NULL,
183 .delete_address = NULL,
184 .mtu = ETH_MAX_PACKET,
185 .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
186};
diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
index da2caa5a21ef..8ac7146c237f 100644
--- a/arch/um/drivers/xterm.c
+++ b/arch/um/drivers/xterm.c
@@ -90,7 +90,7 @@ static int xterm_open(int input, int output, int primary, void *d,
90 int pid, fd, new, err; 90 int pid, fd, new, err;
91 char title[256], file[] = "/tmp/xterm-pipeXXXXXX"; 91 char title[256], file[] = "/tmp/xterm-pipeXXXXXX";
92 char *argv[] = { terminal_emulator, title_switch, title, exec_switch, 92 char *argv[] = { terminal_emulator, title_switch, title, exec_switch,
93 "/usr/lib/uml/port-helper", "-uml-socket", 93 OS_LIB_PATH "/uml/port-helper", "-uml-socket",
94 file, NULL }; 94 file, NULL };
95 95
96 if (access(argv[4], X_OK) < 0) 96 if (access(argv[4], X_OK) < 0)
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index d1d1b0d8a0cd..98d01bc4fa92 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -14,6 +14,8 @@ struct task_struct;
14#include "registers.h" 14#include "registers.h"
15#include "sysdep/archsetjmp.h" 15#include "sysdep/archsetjmp.h"
16 16
17#include <linux/prefetch.h>
18
17struct mm_struct; 19struct mm_struct;
18 20
19struct thread_struct { 21struct thread_struct {
diff --git a/arch/um/include/asm/smp.h b/arch/um/include/asm/smp.h
index f27a96313174..4a4b09d4f366 100644
--- a/arch/um/include/asm/smp.h
+++ b/arch/um/include/asm/smp.h
@@ -11,7 +11,6 @@
11 11
12#define cpu_logical_map(n) (n) 12#define cpu_logical_map(n) (n)
13#define cpu_number_map(n) (n) 13#define cpu_number_map(n) (n)
14#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
15extern int hard_smp_processor_id(void); 14extern int hard_smp_processor_id(void);
16#define NO_PROC_ID -1 15#define NO_PROC_ID -1
17 16
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 660caedac9eb..4febacd1a8a1 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -22,9 +22,6 @@ struct mmu_gather {
22 unsigned int fullmm; /* non-zero means full mm flush */ 22 unsigned int fullmm; /* non-zero means full mm flush */
23}; 23};
24 24
25/* Users of the generic TLB shootdown code must declare this storage space. */
26DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27
28static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, 25static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
29 unsigned long address) 26 unsigned long address)
30{ 27{
@@ -47,27 +44,20 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
47 } 44 }
48} 45}
49 46
50/* tlb_gather_mmu 47static inline void
51 * Return a pointer to an initialized struct mmu_gather. 48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
52 */
53static inline struct mmu_gather *
54tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
55{ 49{
56 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
57
58 tlb->mm = mm; 50 tlb->mm = mm;
59 tlb->fullmm = full_mm_flush; 51 tlb->fullmm = full_mm_flush;
60 52
61 init_tlb_gather(tlb); 53 init_tlb_gather(tlb);
62
63 return tlb;
64} 54}
65 55
66extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 56extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
67 unsigned long end); 57 unsigned long end);
68 58
69static inline void 59static inline void
70tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 60tlb_flush_mmu(struct mmu_gather *tlb)
71{ 61{
72 if (!tlb->need_flush) 62 if (!tlb->need_flush)
73 return; 63 return;
@@ -83,12 +73,10 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
83static inline void 73static inline void
84tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 74tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
85{ 75{
86 tlb_flush_mmu(tlb, start, end); 76 tlb_flush_mmu(tlb);
87 77
88 /* keep the page table cache within bounds */ 78 /* keep the page table cache within bounds */
89 check_pgt_cache(); 79 check_pgt_cache();
90
91 put_cpu_var(mmu_gathers);
92} 80}
93 81
94/* tlb_remove_page 82/* tlb_remove_page
@@ -96,11 +84,16 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
96 * while handling the additional races in SMP caused by other CPUs 84 * while handling the additional races in SMP caused by other CPUs
97 * caching valid mappings in their TLBs. 85 * caching valid mappings in their TLBs.
98 */ 86 */
99static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 87static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
100{ 88{
101 tlb->need_flush = 1; 89 tlb->need_flush = 1;
102 free_page_and_swap_cache(page); 90 free_page_and_swap_cache(page);
103 return; 91 return 1; /* avoid calling tlb_flush_mmu */
92}
93
94static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
95{
96 __tlb_remove_page(tlb, page);
104} 97}
105 98
106/** 99/**
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index c4617baaa4f2..83c7c2ecd614 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -29,6 +29,12 @@
29#define OS_ACC_R_OK 4 /* Test for read permission. */ 29#define OS_ACC_R_OK 4 /* Test for read permission. */
30#define OS_ACC_RW_OK (OS_ACC_W_OK | OS_ACC_R_OK) /* Test for RW permission */ 30#define OS_ACC_RW_OK (OS_ACC_W_OK | OS_ACC_R_OK) /* Test for RW permission */
31 31
32#ifdef CONFIG_64BIT
33#define OS_LIB_PATH "/usr/lib64/"
34#else
35#define OS_LIB_PATH "/usr/lib/"
36#endif
37
32/* 38/*
33 * types taken from stat_file() in hostfs_user.c 39 * types taken from stat_file() in hostfs_user.c
34 * (if they are wrong here, they are wrong there...). 40 * (if they are wrong here, they are wrong there...).
@@ -238,6 +244,7 @@ extern int raw(int fd);
238extern void setup_machinename(char *machine_out); 244extern void setup_machinename(char *machine_out);
239extern void setup_hostinfo(char *buf, int len); 245extern void setup_hostinfo(char *buf, int len);
240extern void os_dump_core(void) __attribute__ ((noreturn)); 246extern void os_dump_core(void) __attribute__ ((noreturn));
247extern void um_early_printk(const char *s, unsigned int n);
241 248
242/* time.c */ 249/* time.c */
243extern void idle_sleep(unsigned long long nsecs); 250extern void idle_sleep(unsigned long long nsecs);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 1119233597a1..c4491c15afb2 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -17,6 +17,7 @@ obj-y = config.o exec.o exitcode.o init_task.o irq.o ksyms.o mem.o \
17obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o 17obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
18obj-$(CONFIG_GPROF) += gprof_syms.o 18obj-$(CONFIG_GPROF) += gprof_syms.o
19obj-$(CONFIG_GCOV) += gmon_syms.o 19obj-$(CONFIG_GCOV) += gmon_syms.o
20obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
20 21
21USER_OBJS := config.o 22USER_OBJS := config.o
22 23
diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c
new file mode 100644
index 000000000000..ec649bf72f68
--- /dev/null
+++ b/arch/um/kernel/early_printk.c
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/console.h>
11#include <linux/init.h>
12#include "os.h"
13
14static void early_console_write(struct console *con, const char *s, unsigned int n)
15{
16 um_early_printk(s, n);
17}
18
19static struct console early_console = {
20 .name = "earlycon",
21 .write = early_console_write,
22 .flags = CON_BOOT,
23 .index = -1,
24};
25
26static int __init setup_early_printk(char *buf)
27{
28 register_console(&early_console);
29
30 return 0;
31}
32
33early_param("earlyprintk", setup_early_printk);
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index eefb107d2d73..155206a66908 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -7,9 +7,6 @@
7#include "asm/pgalloc.h" 7#include "asm/pgalloc.h"
8#include "asm/tlb.h" 8#include "asm/tlb.h"
9 9
10/* For some reason, mmu_gathers are referenced when CONFIG_SMP is off. */
11DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
12
13#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
14 11
15#include "linux/sched.h" 12#include "linux/sched.h"
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 637c6505dc00..8c7b8823d1f0 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -113,6 +113,27 @@ out_of_memory:
113 return 0; 113 return 0;
114} 114}
115 115
116static void show_segv_info(struct uml_pt_regs *regs)
117{
118 struct task_struct *tsk = current;
119 struct faultinfo *fi = UPT_FAULTINFO(regs);
120
121 if (!unhandled_signal(tsk, SIGSEGV))
122 return;
123
124 if (!printk_ratelimit())
125 return;
126
127 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
128 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
129 tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
130 (void *)UPT_IP(regs), (void *)UPT_SP(regs),
131 fi->error_code);
132
133 print_vma_addr(KERN_CONT " in ", UPT_IP(regs));
134 printk(KERN_CONT "\n");
135}
136
116static void bad_segv(struct faultinfo fi, unsigned long ip) 137static void bad_segv(struct faultinfo fi, unsigned long ip)
117{ 138{
118 struct siginfo si; 139 struct siginfo si;
@@ -141,6 +162,7 @@ void segv_handler(int sig, struct uml_pt_regs *regs)
141 struct faultinfo * fi = UPT_FAULTINFO(regs); 162 struct faultinfo * fi = UPT_FAULTINFO(regs);
142 163
143 if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) { 164 if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) {
165 show_segv_info(regs);
144 bad_segv(*fi, UPT_IP(regs)); 166 bad_segv(*fi, UPT_IP(regs));
145 return; 167 return;
146 } 168 }
@@ -202,6 +224,8 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
202 address, ip); 224 address, ip);
203 } 225 }
204 226
227 show_segv_info(regs);
228
205 if (err == -EACCES) { 229 if (err == -EACCES) {
206 si.si_signo = SIGBUS; 230 si.si_signo = SIGBUS;
207 si.si_errno = 0; 231 si.si_errno = 0;
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index eee69b9f52c9..fb2a97a75fb1 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -78,7 +78,7 @@ static void install_fatal_handler(int sig)
78 } 78 }
79} 79}
80 80
81#define UML_LIB_PATH ":/usr/lib/uml" 81#define UML_LIB_PATH ":" OS_LIB_PATH "/uml"
82 82
83static void setup_env_path(void) 83static void setup_env_path(void)
84{ 84{
@@ -142,7 +142,6 @@ int __init main(int argc, char **argv, char **envp)
142 */ 142 */
143 install_fatal_handler(SIGINT); 143 install_fatal_handler(SIGINT);
144 install_fatal_handler(SIGTERM); 144 install_fatal_handler(SIGTERM);
145 install_fatal_handler(SIGHUP);
146 145
147 scan_elf_aux(envp); 146 scan_elf_aux(envp);
148 147
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index e0477c3ee894..0c45dc8efb05 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -253,6 +253,7 @@ void init_new_thread_signals(void)
253 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGALRM, 253 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGALRM,
254 SIGVTALRM, -1); 254 SIGVTALRM, -1);
255 signal(SIGWINCH, SIG_IGN); 255 signal(SIGWINCH, SIG_IGN);
256 signal(SIGTERM, SIG_DFL);
256} 257}
257 258
258int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr) 259int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr)
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
index 42827cafa6af..5803b1887672 100644
--- a/arch/um/os-Linux/util.c
+++ b/arch/um/os-Linux/util.c
@@ -139,3 +139,8 @@ void os_dump_core(void)
139 139
140 uml_abort(); 140 uml_abort();
141} 141}
142
143void um_early_printk(const char *s, unsigned int n)
144{
145 printf("%.*s", n, s);
146}
diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug
index 3140151ede45..ae2ec334c3c6 100644
--- a/arch/unicore32/Kconfig.debug
+++ b/arch/unicore32/Kconfig.debug
@@ -27,13 +27,6 @@ config EARLY_PRINTK
27 with klogd/syslogd or the X server. You should normally N here, 27 with klogd/syslogd or the X server. You should normally N here,
28 unless you want to debug such a crash. 28 unless you want to debug such a crash.
29 29
30config DEBUG_STACK_USAGE
31 bool "Enable stack utilization instrumentation"
32 depends on DEBUG_KERNEL
33 help
34 Enables the display of the minimum amount of free stack which each
35 task has ever had available in the sysrq-T output.
36
37# These options are only for real kernel hackers who want to get their hands dirty. 30# These options are only for real kernel hackers who want to get their hands dirty.
38config DEBUG_LL 31config DEBUG_LL
39 bool "Kernel low-level debugging functions" 32 bool "Kernel low-level debugging functions"
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 1fc02633f700..2d3e7112d2a3 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -62,7 +62,7 @@ void show_mem(unsigned int filter)
62 struct meminfo *mi = &meminfo; 62 struct meminfo *mi = &meminfo;
63 63
64 printk(KERN_DEFAULT "Mem-info:\n"); 64 printk(KERN_DEFAULT "Mem-info:\n");
65 show_free_areas(); 65 show_free_areas(filter);
66 66
67 for_each_bank(i, mi) { 67 for_each_bank(i, mi) {
68 struct membank *bank = &mi->bank[i]; 68 struct membank *bank = &mi->bank[i];
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index db2d334941b4..3e5c3e5a0b45 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -30,8 +30,6 @@
30 30
31#include "mm.h" 31#include "mm.h"
32 32
33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34
35/* 33/*
36 * empty_zero_page is a special page that is used for 34 * empty_zero_page is a special page that is used for
37 * zero-initialized data and COW. 35 * zero-initialized data and COW.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 880fcb6c86f4..da349723d411 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -17,8 +17,6 @@ config X86_64
17config X86 17config X86
18 def_bool y 18 def_bool y
19 select HAVE_AOUT if X86_32 19 select HAVE_AOUT if X86_32
20 select HAVE_READQ
21 select HAVE_WRITEQ
22 select HAVE_UNSTABLE_SCHED_CLOCK 20 select HAVE_UNSTABLE_SCHED_CLOCK
23 select HAVE_IDE 21 select HAVE_IDE
24 select HAVE_OPROFILE 22 select HAVE_OPROFILE
@@ -66,7 +64,6 @@ config X86
66 select HAVE_GENERIC_HARDIRQS 64 select HAVE_GENERIC_HARDIRQS
67 select HAVE_SPARSE_IRQ 65 select HAVE_SPARSE_IRQ
68 select GENERIC_FIND_FIRST_BIT 66 select GENERIC_FIND_FIRST_BIT
69 select GENERIC_FIND_NEXT_BIT
70 select GENERIC_IRQ_PROBE 67 select GENERIC_IRQ_PROBE
71 select GENERIC_PENDING_IRQ if SMP 68 select GENERIC_PENDING_IRQ if SMP
72 select GENERIC_IRQ_SHOW 69 select GENERIC_IRQ_SHOW
@@ -917,6 +914,7 @@ config TOSHIBA
917 914
918config I8K 915config I8K
919 tristate "Dell laptop support" 916 tristate "Dell laptop support"
917 select HWMON
920 ---help--- 918 ---help---
921 This adds a driver to safely access the System Management Mode 919 This adds a driver to safely access the System Management Mode
922 of the CPU on the Dell Inspiron 8000. The System Management Mode 920 of the CPU on the Dell Inspiron 8000. The System Management Mode
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 615e18810f48..c0f8a5c88910 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -66,26 +66,6 @@ config DEBUG_STACKOVERFLOW
66 This option will cause messages to be printed if free stack space 66 This option will cause messages to be printed if free stack space
67 drops below a certain limit. 67 drops below a certain limit.
68 68
69config DEBUG_STACK_USAGE
70 bool "Stack utilization instrumentation"
71 depends on DEBUG_KERNEL
72 ---help---
73 Enables the display of the minimum amount of free stack which each
74 task has ever had available in the sysrq-T and sysrq-P debug output.
75
76 This option will slow down process creation somewhat.
77
78config DEBUG_PER_CPU_MAPS
79 bool "Debug access to per_cpu maps"
80 depends on DEBUG_KERNEL
81 depends on SMP
82 ---help---
83 Say Y to verify that the per_cpu map being accessed has
84 been setup. Adds a fair amount of code to kernel memory
85 and decreases performance.
86
87 Say N if unsure.
88
89config X86_PTDUMP 69config X86_PTDUMP
90 bool "Export kernel pagetable layout to userspace via debugfs" 70 bool "Export kernel pagetable layout to userspace via debugfs"
91 depends on DEBUG_KERNEL 71 depends on DEBUG_KERNEL
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 6f9872658dd2..2bf18059fbea 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -10,7 +10,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
10CONFIG_AUDIT=y 10CONFIG_AUDIT=y
11CONFIG_LOG_BUF_SHIFT=18 11CONFIG_LOG_BUF_SHIFT=18
12CONFIG_CGROUPS=y 12CONFIG_CGROUPS=y
13CONFIG_CGROUP_NS=y
14CONFIG_CGROUP_FREEZER=y 13CONFIG_CGROUP_FREEZER=y
15CONFIG_CPUSETS=y 14CONFIG_CPUSETS=y
16CONFIG_CGROUP_CPUACCT=y 15CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index ee01a9d5d4f0..22a0dc8e51dd 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -11,7 +11,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
11CONFIG_AUDIT=y 11CONFIG_AUDIT=y
12CONFIG_LOG_BUF_SHIFT=18 12CONFIG_LOG_BUF_SHIFT=18
13CONFIG_CGROUPS=y 13CONFIG_CGROUPS=y
14CONFIG_CGROUP_NS=y
15CONFIG_CGROUP_FREEZER=y 14CONFIG_CGROUP_FREEZER=y
16CONFIG_CPUSETS=y 15CONFIG_CPUSETS=y
17CONFIG_CGROUP_CPUACCT=y 16CONFIG_CGROUP_CPUACCT=y
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 072273082528..d02804d650c4 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -38,7 +38,6 @@
38 38
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/compiler.h> 40#include <linux/compiler.h>
41#include <asm-generic/int-ll64.h>
42#include <asm/page.h> 41#include <asm/page.h>
43 42
44#include <xen/xen.h> 43#include <xen/xen.h>
@@ -87,27 +86,6 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
87build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 86build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
88build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 87build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
89 88
90#else
91
92static inline __u64 readq(const volatile void __iomem *addr)
93{
94 const volatile u32 __iomem *p = addr;
95 u32 low, high;
96
97 low = readl(p);
98 high = readl(p + 1);
99
100 return low + ((u64)high << 32);
101}
102
103static inline void writeq(__u64 val, volatile void __iomem *addr)
104{
105 writel(val, addr);
106 writel(val >> 32, addr+4);
107}
108
109#endif
110
111#define readq_relaxed(a) readq(a) 89#define readq_relaxed(a) readq(a)
112 90
113#define __raw_readq(a) readq(a) 91#define __raw_readq(a) readq(a)
@@ -117,6 +95,8 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
117#define readq readq 95#define readq readq
118#define writeq writeq 96#define writeq writeq
119 97
98#endif
99
120/** 100/**
121 * virt_to_phys - map virtual addresses to physical 101 * virt_to_phys - map virtual addresses to physical
122 * @address: address to remap 102 * @address: address to remap
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index 396f5b5fc4d7..77e95f54570a 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -77,6 +77,7 @@ static inline void arch_kgdb_breakpoint(void)
77} 77}
78#define BREAK_INSTR_SIZE 1 78#define BREAK_INSTR_SIZE 1
79#define CACHE_FLUSH_IS_SAFE 1 79#define CACHE_FLUSH_IS_SAFE 1
80#define GDB_ADJUSTS_BREAK_OFFSET
80 81
81extern int kgdb_ll_trap(int cmd, const char *str, 82extern int kgdb_ll_trap(int cmd, const char *str,
82 struct pt_regs *regs, long err, int trap, int sig); 83 struct pt_regs *regs, long err, int trap, int sig);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 1babf8adecdf..94e7618fcac8 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -136,6 +136,7 @@ struct cpuinfo_x86;
136struct task_struct; 136struct task_struct;
137 137
138extern unsigned long profile_pc(struct pt_regs *regs); 138extern unsigned long profile_pc(struct pt_regs *regs);
139#define profile_pc profile_pc
139 140
140extern unsigned long 141extern unsigned long
141convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 142convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
@@ -202,20 +203,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
202#endif 203#endif
203} 204}
204 205
205static inline unsigned long instruction_pointer(struct pt_regs *regs) 206#define GET_IP(regs) ((regs)->ip)
206{ 207#define GET_FP(regs) ((regs)->bp)
207 return regs->ip; 208#define GET_USP(regs) ((regs)->sp)
208}
209
210static inline unsigned long frame_pointer(struct pt_regs *regs)
211{
212 return regs->bp;
213}
214 209
215static inline unsigned long user_stack_pointer(struct pt_regs *regs) 210#include <asm-generic/ptrace.h>
216{
217 return regs->sp;
218}
219 211
220/* Query offset/name of register from its name/offset */ 212/* Query offset/name of register from its name/offset */
221extern int regs_query_register_offset(const char *name); 213extern int regs_query_register_offset(const char *name);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 83e2efd181e2..9db5583b6d38 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -51,6 +51,10 @@ extern int unsynchronized_tsc(void);
51extern int check_tsc_unstable(void); 51extern int check_tsc_unstable(void);
52extern unsigned long native_calibrate_tsc(void); 52extern unsigned long native_calibrate_tsc(void);
53 53
54#ifdef CONFIG_X86_64
55extern cycles_t vread_tsc(void);
56#endif
57
54/* 58/*
55 * Boot-time check whether the TSCs are synchronized across 59 * Boot-time check whether the TSCs are synchronized across
56 * all CPUs/cores: 60 * all CPUs/cores:
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 9064052b73de..bb0522850b74 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -1,20 +1,6 @@
1#ifndef _ASM_X86_VDSO_H 1#ifndef _ASM_X86_VDSO_H
2#define _ASM_X86_VDSO_H 2#define _ASM_X86_VDSO_H
3 3
4#ifdef CONFIG_X86_64
5extern const char VDSO64_PRELINK[];
6
7/*
8 * Given a pointer to the vDSO image, find the pointer to VDSO64_name
9 * as that symbol is defined in the vDSO sources or linker script.
10 */
11#define VDSO64_SYMBOL(base, name) \
12({ \
13 extern const char VDSO64_##name[]; \
14 (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \
15})
16#endif
17
18#if defined CONFIG_X86_32 || defined CONFIG_COMPAT 4#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
19extern const char VDSO32_PRELINK[]; 5extern const char VDSO32_PRELINK[];
20 6
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 3d61e204826f..646b4c1ca695 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -23,8 +23,6 @@ struct vsyscall_gtod_data {
23 struct timespec wall_to_monotonic; 23 struct timespec wall_to_monotonic;
24 struct timespec wall_time_coarse; 24 struct timespec wall_time_coarse;
25}; 25};
26extern struct vsyscall_gtod_data __vsyscall_gtod_data
27__section_vsyscall_gtod_data;
28extern struct vsyscall_gtod_data vsyscall_gtod_data; 26extern struct vsyscall_gtod_data vsyscall_gtod_data;
29 27
30#endif /* _ASM_X86_VGTOD_H */ 28#endif /* _ASM_X86_VGTOD_H */
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index d0983d255fbd..d55597351f6a 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -16,27 +16,19 @@ enum vsyscall_num {
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17#include <linux/seqlock.h> 17#include <linux/seqlock.h>
18 18
19#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
20#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
21
22/* Definitions for CONFIG_GENERIC_TIME definitions */ 19/* Definitions for CONFIG_GENERIC_TIME definitions */
23#define __section_vsyscall_gtod_data __attribute__ \
24 ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
25#define __section_vsyscall_clock __attribute__ \
26 ((unused, __section__ (".vsyscall_clock"),aligned(16)))
27#define __vsyscall_fn \ 20#define __vsyscall_fn \
28 __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace 21 __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
29 22
30#define VGETCPU_RDTSCP 1 23#define VGETCPU_RDTSCP 1
31#define VGETCPU_LSL 2 24#define VGETCPU_LSL 2
32 25
33extern int __vgetcpu_mode;
34extern volatile unsigned long __jiffies;
35
36/* kernel space (writeable) */ 26/* kernel space (writeable) */
37extern int vgetcpu_mode; 27extern int vgetcpu_mode;
38extern struct timezone sys_tz; 28extern struct timezone sys_tz;
39 29
30#include <asm/vvar.h>
31
40extern void map_vsyscall(void); 32extern void map_vsyscall(void);
41 33
42#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
new file mode 100644
index 000000000000..341b3559452b
--- /dev/null
+++ b/arch/x86/include/asm/vvar.h
@@ -0,0 +1,52 @@
1/*
2 * vvar.h: Shared vDSO/kernel variable declarations
3 * Copyright (c) 2011 Andy Lutomirski
4 * Subject to the GNU General Public License, version 2
5 *
6 * A handful of variables are accessible (read-only) from userspace
7 * code in the vsyscall page and the vdso. They are declared here.
8 * Some other file must define them with DEFINE_VVAR.
9 *
10 * In normal kernel code, they are used like any other variable.
11 * In user code, they are accessed through the VVAR macro.
12 *
13 * Each of these variables lives in the vsyscall page, and each
14 * one needs a unique offset within the little piece of the page
15 * reserved for vvars. Specify that offset in DECLARE_VVAR.
16 * (There are 896 bytes available. If you mess up, the linker will
17 * catch it.)
18 */
19
20/* Offset of vars within vsyscall page */
21#define VSYSCALL_VARS_OFFSET (3072 + 128)
22
23#if defined(__VVAR_KERNEL_LDS)
24
25/* The kernel linker script defines its own magic to put vvars in the
26 * right place.
27 */
28#define DECLARE_VVAR(offset, type, name) \
29 EMIT_VVAR(name, VSYSCALL_VARS_OFFSET + offset)
30
31#else
32
33#define DECLARE_VVAR(offset, type, name) \
34 static type const * const vvaraddr_ ## name = \
35 (void *)(VSYSCALL_START + VSYSCALL_VARS_OFFSET + (offset));
36
37#define DEFINE_VVAR(type, name) \
38 type __vvar_ ## name \
39 __attribute__((section(".vsyscall_var_" #name), aligned(16)))
40
41#define VVAR(name) (*vvaraddr_ ## name)
42
43#endif
44
45/* DECLARE_VVAR(offset, type, name) */
46
47DECLARE_VVAR(0, volatile unsigned long, jiffies)
48DECLARE_VVAR(8, int, vgetcpu_mode)
49DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
50
51#undef DECLARE_VVAR
52#undef VSYSCALL_VARS_OFFSET
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 8508bfe52296..d240ea950519 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -447,6 +447,13 @@ HYPERVISOR_hvm_op(int op, void *arg)
447 return _hypercall2(unsigned long, hvm_op, op, arg); 447 return _hypercall2(unsigned long, hvm_op, op, arg);
448} 448}
449 449
450static inline int
451HYPERVISOR_tmem_op(
452 struct tmem_op *op)
453{
454 return _hypercall1(int, tmem_op, op);
455}
456
450static inline void 457static inline void
451MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) 458MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
452{ 459{
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 250806472a7e..f5abe3a245b8 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -8,7 +8,6 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
8 8
9ifdef CONFIG_FUNCTION_TRACER 9ifdef CONFIG_FUNCTION_TRACER
10# Do not profile debug and lowlevel utilities 10# Do not profile debug and lowlevel utilities
11CFLAGS_REMOVE_tsc.o = -pg
12CFLAGS_REMOVE_rtc.o = -pg 11CFLAGS_REMOVE_rtc.o = -pg
13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 12CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_pvclock.o = -pg 13CFLAGS_REMOVE_pvclock.o = -pg
@@ -24,13 +23,16 @@ endif
24nostackp := $(call cc-option, -fno-stack-protector) 23nostackp := $(call cc-option, -fno-stack-protector)
25CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) 24CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp)
26CFLAGS_hpet.o := $(nostackp) 25CFLAGS_hpet.o := $(nostackp)
27CFLAGS_tsc.o := $(nostackp) 26CFLAGS_vread_tsc_64.o := $(nostackp)
28CFLAGS_paravirt.o := $(nostackp) 27CFLAGS_paravirt.o := $(nostackp)
29GCOV_PROFILE_vsyscall_64.o := n 28GCOV_PROFILE_vsyscall_64.o := n
30GCOV_PROFILE_hpet.o := n 29GCOV_PROFILE_hpet.o := n
31GCOV_PROFILE_tsc.o := n 30GCOV_PROFILE_tsc.o := n
32GCOV_PROFILE_paravirt.o := n 31GCOV_PROFILE_paravirt.o := n
33 32
33# vread_tsc_64 is hot and should be fully optimized:
34CFLAGS_REMOVE_vread_tsc_64.o = -pg -fno-optimize-sibling-calls
35
34obj-y := process_$(BITS).o signal.o entry_$(BITS).o 36obj-y := process_$(BITS).o signal.o entry_$(BITS).o
35obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o 37obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
36obj-y += time.o ioport.o ldt.o dumpstack.o 38obj-y += time.o ioport.o ldt.o dumpstack.o
@@ -39,7 +41,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
39obj-y += probe_roms.o 41obj-y += probe_roms.o
40obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o 42obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
41obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 43obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
42obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o 44obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o vread_tsc_64.o
43obj-y += bootflag.o e820.o 45obj-y += bootflag.o e820.o
44obj-y += pci-dma.o quirks.o topology.o kdebugfs.o 46obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
45obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o 47obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 605e5ae19c7f..a3e5948670c2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -946,6 +946,8 @@ void __init setup_arch(char **cmdline_p)
946 if (init_ohci1394_dma_early) 946 if (init_ohci1394_dma_early)
947 init_ohci1394_dma_on_all_controllers(); 947 init_ohci1394_dma_on_all_controllers();
948#endif 948#endif
949 /* Allocate bigger log buffer */
950 setup_log_buf(1);
949 951
950 reserve_initrd(); 952 reserve_initrd();
951 953
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 998e972f3b1a..30ac65df7d4e 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -110,7 +110,6 @@ static struct mm_struct tboot_mm = {
110 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), 110 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
111 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), 111 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
112 .mmlist = LIST_HEAD_INIT(init_mm.mmlist), 112 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
113 .cpu_vm_mask = CPU_MASK_ALL,
114}; 113};
115 114
116static inline void switch_to_tboot_pt(void) 115static inline void switch_to_tboot_pt(void)
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 25a28a245937..00cbb272627f 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -23,7 +23,7 @@
23#include <asm/time.h> 23#include <asm/time.h>
24 24
25#ifdef CONFIG_X86_64 25#ifdef CONFIG_X86_64
26volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; 26DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES;
27#endif 27#endif
28 28
29unsigned long profile_pc(struct pt_regs *regs) 29unsigned long profile_pc(struct pt_regs *regs)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 9335bf7dd2e7..6cc6922262af 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -763,25 +763,6 @@ static cycle_t read_tsc(struct clocksource *cs)
763 ret : clocksource_tsc.cycle_last; 763 ret : clocksource_tsc.cycle_last;
764} 764}
765 765
766#ifdef CONFIG_X86_64
767static cycle_t __vsyscall_fn vread_tsc(void)
768{
769 cycle_t ret;
770
771 /*
772 * Surround the RDTSC by barriers, to make sure it's not
773 * speculated to outside the seqlock critical section and
774 * does not cause time warps:
775 */
776 rdtsc_barrier();
777 ret = (cycle_t)vget_cycles();
778 rdtsc_barrier();
779
780 return ret >= __vsyscall_gtod_data.clock.cycle_last ?
781 ret : __vsyscall_gtod_data.clock.cycle_last;
782}
783#endif
784
785static void resume_tsc(struct clocksource *cs) 766static void resume_tsc(struct clocksource *cs)
786{ 767{
787 clocksource_tsc.cycle_last = 0; 768 clocksource_tsc.cycle_last = 0;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 61682f0ac264..89aed99aafce 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -161,6 +161,12 @@ SECTIONS
161 161
162#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) 162#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
163#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) 163#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
164#define EMIT_VVAR(x, offset) .vsyscall_var_ ## x \
165 ADDR(.vsyscall_0) + offset \
166 : AT(VLOAD(.vsyscall_var_ ## x)) { \
167 *(.vsyscall_var_ ## x) \
168 } \
169 x = VVIRT(.vsyscall_var_ ## x);
164 170
165 . = ALIGN(4096); 171 . = ALIGN(4096);
166 __vsyscall_0 = .; 172 __vsyscall_0 = .;
@@ -175,18 +181,6 @@ SECTIONS
175 *(.vsyscall_fn) 181 *(.vsyscall_fn)
176 } 182 }
177 183
178 . = ALIGN(L1_CACHE_BYTES);
179 .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
180 *(.vsyscall_gtod_data)
181 }
182
183 vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
184 .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
185 *(.vsyscall_clock)
186 }
187 vsyscall_clock = VVIRT(.vsyscall_clock);
188
189
190 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { 184 .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
191 *(.vsyscall_1) 185 *(.vsyscall_1)
192 } 186 }
@@ -194,21 +188,14 @@ SECTIONS
194 *(.vsyscall_2) 188 *(.vsyscall_2)
195 } 189 }
196 190
197 .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
198 *(.vgetcpu_mode)
199 }
200 vgetcpu_mode = VVIRT(.vgetcpu_mode);
201
202 . = ALIGN(L1_CACHE_BYTES);
203 .jiffies : AT(VLOAD(.jiffies)) {
204 *(.jiffies)
205 }
206 jiffies = VVIRT(.jiffies);
207
208 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { 191 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
209 *(.vsyscall_3) 192 *(.vsyscall_3)
210 } 193 }
211 194
195#define __VVAR_KERNEL_LDS
196#include <asm/vvar.h>
197#undef __VVAR_KERNEL_LDS
198
212 . = __vsyscall_0 + PAGE_SIZE; 199 . = __vsyscall_0 + PAGE_SIZE;
213 200
214#undef VSYSCALL_ADDR 201#undef VSYSCALL_ADDR
@@ -216,6 +203,7 @@ SECTIONS
216#undef VLOAD 203#undef VLOAD
217#undef VVIRT_OFFSET 204#undef VVIRT_OFFSET
218#undef VVIRT 205#undef VVIRT
206#undef EMIT_VVAR
219 207
220#endif /* CONFIG_X86_64 */ 208#endif /* CONFIG_X86_64 */
221 209
diff --git a/arch/x86/kernel/vread_tsc_64.c b/arch/x86/kernel/vread_tsc_64.c
new file mode 100644
index 000000000000..a81aa9e9894c
--- /dev/null
+++ b/arch/x86/kernel/vread_tsc_64.c
@@ -0,0 +1,36 @@
1/* This code runs in userspace. */
2
3#define DISABLE_BRANCH_PROFILING
4#include <asm/vgtod.h>
5
6notrace cycle_t __vsyscall_fn vread_tsc(void)
7{
8 cycle_t ret;
9 u64 last;
10
11 /*
12 * Empirically, a fence (of type that depends on the CPU)
13 * before rdtsc is enough to ensure that rdtsc is ordered
14 * with respect to loads. The various CPU manuals are unclear
15 * as to whether rdtsc can be reordered with later loads,
16 * but no one has ever seen it happen.
17 */
18 rdtsc_barrier();
19 ret = (cycle_t)vget_cycles();
20
21 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
22
23 if (likely(ret >= last))
24 return ret;
25
26 /*
27 * GCC likes to generate cmov here, but this branch is extremely
28 * predictable (it's just a funciton of time and the likely is
29 * very likely) and there's a data dependence, so force GCC
30 * to generate a branch instead. I don't barrier() because
31 * we don't actually need a barrier, and if this function
32 * ever gets inlined it will generate worse code.
33 */
34 asm volatile ("");
35 return last;
36}
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index dcbb28c4b694..3e682184d76c 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -49,17 +49,10 @@
49 __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace 49 __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
50#define __syscall_clobber "r11","cx","memory" 50#define __syscall_clobber "r11","cx","memory"
51 51
52/* 52DEFINE_VVAR(int, vgetcpu_mode);
53 * vsyscall_gtod_data contains data that is : 53DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
54 * - readonly from vsyscalls
55 * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
56 * Try to keep this structure as small as possible to avoid cache line ping pongs
57 */
58int __vgetcpu_mode __section_vgetcpu_mode;
59
60struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
61{ 54{
62 .lock = SEQLOCK_UNLOCKED, 55 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
63 .sysctl_enabled = 1, 56 .sysctl_enabled = 1,
64}; 57};
65 58
@@ -97,7 +90,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
97 */ 90 */
98static __always_inline void do_get_tz(struct timezone * tz) 91static __always_inline void do_get_tz(struct timezone * tz)
99{ 92{
100 *tz = __vsyscall_gtod_data.sys_tz; 93 *tz = VVAR(vsyscall_gtod_data).sys_tz;
101} 94}
102 95
103static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) 96static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
@@ -126,23 +119,24 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
126 unsigned long mult, shift, nsec; 119 unsigned long mult, shift, nsec;
127 cycle_t (*vread)(void); 120 cycle_t (*vread)(void);
128 do { 121 do {
129 seq = read_seqbegin(&__vsyscall_gtod_data.lock); 122 seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
130 123
131 vread = __vsyscall_gtod_data.clock.vread; 124 vread = VVAR(vsyscall_gtod_data).clock.vread;
132 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) { 125 if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
126 !vread)) {
133 gettimeofday(tv,NULL); 127 gettimeofday(tv,NULL);
134 return; 128 return;
135 } 129 }
136 130
137 now = vread(); 131 now = vread();
138 base = __vsyscall_gtod_data.clock.cycle_last; 132 base = VVAR(vsyscall_gtod_data).clock.cycle_last;
139 mask = __vsyscall_gtod_data.clock.mask; 133 mask = VVAR(vsyscall_gtod_data).clock.mask;
140 mult = __vsyscall_gtod_data.clock.mult; 134 mult = VVAR(vsyscall_gtod_data).clock.mult;
141 shift = __vsyscall_gtod_data.clock.shift; 135 shift = VVAR(vsyscall_gtod_data).clock.shift;
142 136
143 tv->tv_sec = __vsyscall_gtod_data.wall_time_sec; 137 tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec;
144 nsec = __vsyscall_gtod_data.wall_time_nsec; 138 nsec = VVAR(vsyscall_gtod_data).wall_time_nsec;
145 } while (read_seqretry(&__vsyscall_gtod_data.lock, seq)); 139 } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
146 140
147 /* calculate interval: */ 141 /* calculate interval: */
148 cycle_delta = (now - base) & mask; 142 cycle_delta = (now - base) & mask;
@@ -171,15 +165,15 @@ time_t __vsyscall(1) vtime(time_t *t)
171{ 165{
172 unsigned seq; 166 unsigned seq;
173 time_t result; 167 time_t result;
174 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) 168 if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
175 return time_syscall(t); 169 return time_syscall(t);
176 170
177 do { 171 do {
178 seq = read_seqbegin(&__vsyscall_gtod_data.lock); 172 seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
179 173
180 result = __vsyscall_gtod_data.wall_time_sec; 174 result = VVAR(vsyscall_gtod_data).wall_time_sec;
181 175
182 } while (read_seqretry(&__vsyscall_gtod_data.lock, seq)); 176 } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
183 177
184 if (t) 178 if (t)
185 *t = result; 179 *t = result;
@@ -208,9 +202,9 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
208 We do this here because otherwise user space would do it on 202 We do this here because otherwise user space would do it on
209 its own in a likely inferior way (no access to jiffies). 203 its own in a likely inferior way (no access to jiffies).
210 If you don't like it pass NULL. */ 204 If you don't like it pass NULL. */
211 if (tcache && tcache->blob[0] == (j = __jiffies)) { 205 if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) {
212 p = tcache->blob[1]; 206 p = tcache->blob[1];
213 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { 207 } else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
214 /* Load per CPU data from RDTSCP */ 208 /* Load per CPU data from RDTSCP */
215 native_read_tscp(&p); 209 native_read_tscp(&p);
216 } else { 210 } else {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 28418054b880..bd14bb4c8594 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3545,10 +3545,11 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3545 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list); 3545 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3546} 3546}
3547 3547
3548static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 3548static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
3549{ 3549{
3550 struct kvm *kvm; 3550 struct kvm *kvm;
3551 struct kvm *kvm_freed = NULL; 3551 struct kvm *kvm_freed = NULL;
3552 int nr_to_scan = sc->nr_to_scan;
3552 3553
3553 if (nr_to_scan == 0) 3554 if (nr_to_scan == 0)
3554 goto out; 3555 goto out;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index bcb394dfbb35..f7a2a054a3c0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -965,7 +965,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
965 struct mm_struct *mm; 965 struct mm_struct *mm;
966 int fault; 966 int fault;
967 int write = error_code & PF_WRITE; 967 int write = error_code & PF_WRITE;
968 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | 968 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
969 (write ? FAULT_FLAG_WRITE : 0); 969 (write ? FAULT_FLAG_WRITE : 0);
970 970
971 tsk = current; 971 tsk = current;
@@ -1139,6 +1139,16 @@ good_area:
1139 } 1139 }
1140 1140
1141 /* 1141 /*
1142 * Pagefault was interrupted by SIGKILL. We have no reason to
1143 * continue pagefault.
1144 */
1145 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
1146 if (!(error_code & PF_USER))
1147 no_context(regs, error_code, address);
1148 return;
1149 }
1150
1151 /*
1142 * Major/minor page fault accounting is only done on the 1152 * Major/minor page fault accounting is only done on the
1143 * initial attempt. If we go through a retry, it is extremely 1153 * initial attempt. If we go through a retry, it is extremely
1144 * likely that the page will be found in page cache at that point. 1154 * likely that the page will be found in page cache at that point.
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index d4203988504a..f581a18c0d4d 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -72,7 +72,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
72 if (!vma_shareable(vma, addr)) 72 if (!vma_shareable(vma, addr))
73 return; 73 return;
74 74
75 spin_lock(&mapping->i_mmap_lock); 75 mutex_lock(&mapping->i_mmap_mutex);
76 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { 76 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
77 if (svma == vma) 77 if (svma == vma)
78 continue; 78 continue;
@@ -97,7 +97,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
97 put_page(virt_to_page(spte)); 97 put_page(virt_to_page(spte));
98 spin_unlock(&mm->page_table_lock); 98 spin_unlock(&mm->page_table_lock);
99out: 99out:
100 spin_unlock(&mapping->i_mmap_lock); 100 mutex_unlock(&mapping->i_mmap_mutex);
101} 101}
102 102
103/* 103/*
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 37b8b0fe8320..30326443ab81 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -16,8 +16,6 @@
16#include <asm/tlb.h> 16#include <asm/tlb.h>
17#include <asm/proto.h> 17#include <asm/proto.h>
18 18
19DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
20
21unsigned long __initdata pgt_buf_start; 19unsigned long __initdata pgt_buf_start;
22unsigned long __meminitdata pgt_buf_end; 20unsigned long __meminitdata pgt_buf_end;
23unsigned long __meminitdata pgt_buf_top; 21unsigned long __meminitdata pgt_buf_top;
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index b6552b189bcd..bef0bc962400 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -11,7 +11,7 @@ vdso-install-$(VDSO32-y) += $(vdso32-images)
11 11
12 12
13# files to link into the vdso 13# files to link into the vdso
14vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vvar.o 14vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
15 15
16# files to link into kernel 16# files to link into kernel
17obj-$(VDSO64-y) += vma.o vdso.o 17obj-$(VDSO64-y) += vma.o vdso.o
@@ -37,11 +37,24 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
37$(obj)/%.so: $(obj)/%.so.dbg FORCE 37$(obj)/%.so: $(obj)/%.so.dbg FORCE
38 $(call if_changed,objcopy) 38 $(call if_changed,objcopy)
39 39
40#
41# Don't omit frame pointers for ease of userspace debugging, but do
42# optimize sibling calls.
43#
40CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 44CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
41 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) 45 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
46 -fno-omit-frame-pointer -foptimize-sibling-calls
42 47
43$(vobjs): KBUILD_CFLAGS += $(CFL) 48$(vobjs): KBUILD_CFLAGS += $(CFL)
44 49
50#
51# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
52#
53CFLAGS_REMOVE_vdso-note.o = -pg
54CFLAGS_REMOVE_vclock_gettime.o = -pg
55CFLAGS_REMOVE_vgetcpu.o = -pg
56CFLAGS_REMOVE_vvar.o = -pg
57
45targets += vdso-syms.lds 58targets += vdso-syms.lds
46obj-$(VDSO64-y) += vdso-syms.lds 59obj-$(VDSO64-y) += vdso-syms.lds
47 60
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index ee55754cc3c5..a724905fdae7 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -2,7 +2,7 @@
2 * Copyright 2006 Andi Kleen, SUSE Labs. 2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2 3 * Subject to the GNU Public License, v.2
4 * 4 *
5 * Fast user context implementation of clock_gettime and gettimeofday. 5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
6 * 6 *
7 * The code should have no internal unresolved relocations. 7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing. 8 * Check with readelf after changing.
@@ -22,9 +22,8 @@
22#include <asm/hpet.h> 22#include <asm/hpet.h>
23#include <asm/unistd.h> 23#include <asm/unistd.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include "vextern.h"
26 25
27#define gtod vdso_vsyscall_gtod_data 26#define gtod (&VVAR(vsyscall_gtod_data))
28 27
29notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) 28notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
30{ 29{
@@ -56,22 +55,6 @@ notrace static noinline int do_realtime(struct timespec *ts)
56 return 0; 55 return 0;
57} 56}
58 57
59/* Copy of the version in kernel/time.c which we cannot directly access */
60notrace static void
61vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
62{
63 while (nsec >= NSEC_PER_SEC) {
64 nsec -= NSEC_PER_SEC;
65 ++sec;
66 }
67 while (nsec < 0) {
68 nsec += NSEC_PER_SEC;
69 --sec;
70 }
71 ts->tv_sec = sec;
72 ts->tv_nsec = nsec;
73}
74
75notrace static noinline int do_monotonic(struct timespec *ts) 58notrace static noinline int do_monotonic(struct timespec *ts)
76{ 59{
77 unsigned long seq, ns, secs; 60 unsigned long seq, ns, secs;
@@ -82,7 +65,17 @@ notrace static noinline int do_monotonic(struct timespec *ts)
82 secs += gtod->wall_to_monotonic.tv_sec; 65 secs += gtod->wall_to_monotonic.tv_sec;
83 ns += gtod->wall_to_monotonic.tv_nsec; 66 ns += gtod->wall_to_monotonic.tv_nsec;
84 } while (unlikely(read_seqretry(&gtod->lock, seq))); 67 } while (unlikely(read_seqretry(&gtod->lock, seq)));
85 vset_normalized_timespec(ts, secs, ns); 68
69 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
70 * are all guaranteed to be nonnegative.
71 */
72 while (ns >= NSEC_PER_SEC) {
73 ns -= NSEC_PER_SEC;
74 ++secs;
75 }
76 ts->tv_sec = secs;
77 ts->tv_nsec = ns;
78
86 return 0; 79 return 0;
87} 80}
88 81
@@ -107,7 +100,17 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
107 secs += gtod->wall_to_monotonic.tv_sec; 100 secs += gtod->wall_to_monotonic.tv_sec;
108 ns += gtod->wall_to_monotonic.tv_nsec; 101 ns += gtod->wall_to_monotonic.tv_nsec;
109 } while (unlikely(read_seqretry(&gtod->lock, seq))); 102 } while (unlikely(read_seqretry(&gtod->lock, seq)));
110 vset_normalized_timespec(ts, secs, ns); 103
104 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
105 * guaranteed to be between 0 and NSEC_PER_SEC.
106 */
107 if (ns >= NSEC_PER_SEC) {
108 ns -= NSEC_PER_SEC;
109 ++secs;
110 }
111 ts->tv_sec = secs;
112 ts->tv_nsec = ns;
113
111 return 0; 114 return 0;
112} 115}
113 116
@@ -157,3 +160,32 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
157} 160}
158int gettimeofday(struct timeval *, struct timezone *) 161int gettimeofday(struct timeval *, struct timezone *)
159 __attribute__((weak, alias("__vdso_gettimeofday"))); 162 __attribute__((weak, alias("__vdso_gettimeofday")));
163
164/* This will break when the xtime seconds get inaccurate, but that is
165 * unlikely */
166
167static __always_inline long time_syscall(long *t)
168{
169 long secs;
170 asm volatile("syscall"
171 : "=a" (secs)
172 : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
173 return secs;
174}
175
176notrace time_t __vdso_time(time_t *t)
177{
178 time_t result;
179
180 if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
181 return time_syscall(t);
182
183 /* This is atomic on x86_64 so we don't need any locks. */
184 result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
185
186 if (t)
187 *t = result;
188 return result;
189}
190int time(time_t *t)
191 __attribute__((weak, alias("__vdso_time")));
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
index 4e5dd3b4de7f..b96b2677cad8 100644
--- a/arch/x86/vdso/vdso.lds.S
+++ b/arch/x86/vdso/vdso.lds.S
@@ -23,15 +23,10 @@ VERSION {
23 __vdso_gettimeofday; 23 __vdso_gettimeofday;
24 getcpu; 24 getcpu;
25 __vdso_getcpu; 25 __vdso_getcpu;
26 time;
27 __vdso_time;
26 local: *; 28 local: *;
27 }; 29 };
28} 30}
29 31
30VDSO64_PRELINK = VDSO_PRELINK; 32VDSO64_PRELINK = VDSO_PRELINK;
31
32/*
33 * Define VDSO64_x for each VEXTERN(x), for use via VDSO64_SYMBOL.
34 */
35#define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
36#include "vextern.h"
37#undef VEXTERN
diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
deleted file mode 100644
index 1683ba2ae3e8..000000000000
--- a/arch/x86/vdso/vextern.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef VEXTERN
2#include <asm/vsyscall.h>
3#define VEXTERN(x) \
4 extern typeof(x) *vdso_ ## x __attribute__((visibility("hidden")));
5#endif
6
7#define VMAGIC 0xfeedbabeabcdefabUL
8
9/* Any kernel variables used in the vDSO must be exported in the main
10 kernel's vmlinux.lds.S/vsyscall.h/proper __section and
11 put into vextern.h and be referenced as a pointer with vdso prefix.
12 The main kernel later fills in the values. */
13
14VEXTERN(jiffies)
15VEXTERN(vgetcpu_mode)
16VEXTERN(vsyscall_gtod_data)
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
index 9fbc6b20026b..5463ad558573 100644
--- a/arch/x86/vdso/vgetcpu.c
+++ b/arch/x86/vdso/vgetcpu.c
@@ -11,14 +11,13 @@
11#include <linux/time.h> 11#include <linux/time.h>
12#include <asm/vsyscall.h> 12#include <asm/vsyscall.h>
13#include <asm/vgtod.h> 13#include <asm/vgtod.h>
14#include "vextern.h"
15 14
16notrace long 15notrace long
17__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) 16__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
18{ 17{
19 unsigned int p; 18 unsigned int p;
20 19
21 if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { 20 if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
22 /* Load per CPU data from RDTSCP */ 21 /* Load per CPU data from RDTSCP */
23 native_read_tscp(&p); 22 native_read_tscp(&p);
24 } else { 23 } else {
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 4b5d26f108bb..7abd2be0f9b9 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -15,9 +15,6 @@
15#include <asm/proto.h> 15#include <asm/proto.h>
16#include <asm/vdso.h> 16#include <asm/vdso.h>
17 17
18#include "vextern.h" /* Just for VMAGIC. */
19#undef VEXTERN
20
21unsigned int __read_mostly vdso_enabled = 1; 18unsigned int __read_mostly vdso_enabled = 1;
22 19
23extern char vdso_start[], vdso_end[]; 20extern char vdso_start[], vdso_end[];
@@ -26,20 +23,10 @@ extern unsigned short vdso_sync_cpuid;
26static struct page **vdso_pages; 23static struct page **vdso_pages;
27static unsigned vdso_size; 24static unsigned vdso_size;
28 25
29static inline void *var_ref(void *p, char *name)
30{
31 if (*(void **)p != (void *)VMAGIC) {
32 printk("VDSO: variable %s broken\n", name);
33 vdso_enabled = 0;
34 }
35 return p;
36}
37
38static int __init init_vdso_vars(void) 26static int __init init_vdso_vars(void)
39{ 27{
40 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; 28 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
41 int i; 29 int i;
42 char *vbase;
43 30
44 vdso_size = npages << PAGE_SHIFT; 31 vdso_size = npages << PAGE_SHIFT;
45 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); 32 vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
@@ -54,20 +41,6 @@ static int __init init_vdso_vars(void)
54 copy_page(page_address(p), vdso_start + i*PAGE_SIZE); 41 copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
55 } 42 }
56 43
57 vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
58 if (!vbase)
59 goto oom;
60
61 if (memcmp(vbase, "\177ELF", 4)) {
62 printk("VDSO: I'm broken; not ELF\n");
63 vdso_enabled = 0;
64 }
65
66#define VEXTERN(x) \
67 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
68#include "vextern.h"
69#undef VEXTERN
70 vunmap(vbase);
71 return 0; 44 return 0;
72 45
73 oom: 46 oom:
diff --git a/arch/x86/vdso/vvar.c b/arch/x86/vdso/vvar.c
deleted file mode 100644
index 1b7e703684f9..000000000000
--- a/arch/x86/vdso/vvar.c
+++ /dev/null
@@ -1,12 +0,0 @@
1/* Define pointer to external vDSO variables.
2 These are part of the vDSO. The kernel fills in the real addresses
3 at boot time. This is done because when the vdso is linked the
4 kernel isn't yet and we don't know the final addresses. */
5#include <linux/kernel.h>
6#include <linux/time.h>
7#include <asm/vsyscall.h>
8#include <asm/timex.h>
9#include <asm/vgtod.h>
10
11#define VEXTERN(x) typeof (__ ## x) *const vdso_ ## x = (void *)VMAGIC;
12#include "vextern.h"
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 02d752460371..dc708dcc62f1 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -75,67 +75,12 @@
75#include "mmu.h" 75#include "mmu.h"
76#include "debugfs.h" 76#include "debugfs.h"
77 77
78#define MMU_UPDATE_HISTO 30
79
80/* 78/*
81 * Protects atomic reservation decrease/increase against concurrent increases. 79 * Protects atomic reservation decrease/increase against concurrent increases.
82 * Also protects non-atomic updates of current_pages and balloon lists. 80 * Also protects non-atomic updates of current_pages and balloon lists.
83 */ 81 */
84DEFINE_SPINLOCK(xen_reservation_lock); 82DEFINE_SPINLOCK(xen_reservation_lock);
85 83
86#ifdef CONFIG_XEN_DEBUG_FS
87
88static struct {
89 u32 pgd_update;
90 u32 pgd_update_pinned;
91 u32 pgd_update_batched;
92
93 u32 pud_update;
94 u32 pud_update_pinned;
95 u32 pud_update_batched;
96
97 u32 pmd_update;
98 u32 pmd_update_pinned;
99 u32 pmd_update_batched;
100
101 u32 pte_update;
102 u32 pte_update_pinned;
103 u32 pte_update_batched;
104
105 u32 mmu_update;
106 u32 mmu_update_extended;
107 u32 mmu_update_histo[MMU_UPDATE_HISTO];
108
109 u32 prot_commit;
110 u32 prot_commit_batched;
111
112 u32 set_pte_at;
113 u32 set_pte_at_batched;
114 u32 set_pte_at_pinned;
115 u32 set_pte_at_current;
116 u32 set_pte_at_kernel;
117} mmu_stats;
118
119static u8 zero_stats;
120
121static inline void check_zero(void)
122{
123 if (unlikely(zero_stats)) {
124 memset(&mmu_stats, 0, sizeof(mmu_stats));
125 zero_stats = 0;
126 }
127}
128
129#define ADD_STATS(elem, val) \
130 do { check_zero(); mmu_stats.elem += (val); } while(0)
131
132#else /* !CONFIG_XEN_DEBUG_FS */
133
134#define ADD_STATS(elem, val) do { (void)(val); } while(0)
135
136#endif /* CONFIG_XEN_DEBUG_FS */
137
138
139/* 84/*
140 * Identity map, in addition to plain kernel map. This needs to be 85 * Identity map, in addition to plain kernel map. This needs to be
141 * large enough to allocate page table pages to allocate the rest. 86 * large enough to allocate page table pages to allocate the rest.
@@ -243,11 +188,6 @@ static bool xen_page_pinned(void *ptr)
243 return PagePinned(page); 188 return PagePinned(page);
244} 189}
245 190
246static bool xen_iomap_pte(pte_t pte)
247{
248 return pte_flags(pte) & _PAGE_IOMAP;
249}
250
251void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) 191void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
252{ 192{
253 struct multicall_space mcs; 193 struct multicall_space mcs;
@@ -257,7 +197,7 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
257 u = mcs.args; 197 u = mcs.args;
258 198
259 /* ptep might be kmapped when using 32-bit HIGHPTE */ 199 /* ptep might be kmapped when using 32-bit HIGHPTE */
260 u->ptr = arbitrary_virt_to_machine(ptep).maddr; 200 u->ptr = virt_to_machine(ptep).maddr;
261 u->val = pte_val_ma(pteval); 201 u->val = pte_val_ma(pteval);
262 202
263 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); 203 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
@@ -266,11 +206,6 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
266} 206}
267EXPORT_SYMBOL_GPL(xen_set_domain_pte); 207EXPORT_SYMBOL_GPL(xen_set_domain_pte);
268 208
269static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
270{
271 xen_set_domain_pte(ptep, pteval, DOMID_IO);
272}
273
274static void xen_extend_mmu_update(const struct mmu_update *update) 209static void xen_extend_mmu_update(const struct mmu_update *update)
275{ 210{
276 struct multicall_space mcs; 211 struct multicall_space mcs;
@@ -279,27 +214,17 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
279 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); 214 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
280 215
281 if (mcs.mc != NULL) { 216 if (mcs.mc != NULL) {
282 ADD_STATS(mmu_update_extended, 1);
283 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
284
285 mcs.mc->args[1]++; 217 mcs.mc->args[1]++;
286
287 if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
288 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
289 else
290 ADD_STATS(mmu_update_histo[0], 1);
291 } else { 218 } else {
292 ADD_STATS(mmu_update, 1);
293 mcs = __xen_mc_entry(sizeof(*u)); 219 mcs = __xen_mc_entry(sizeof(*u));
294 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 220 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
295 ADD_STATS(mmu_update_histo[1], 1);
296 } 221 }
297 222
298 u = mcs.args; 223 u = mcs.args;
299 *u = *update; 224 *u = *update;
300} 225}
301 226
302void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) 227static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
303{ 228{
304 struct mmu_update u; 229 struct mmu_update u;
305 230
@@ -312,17 +237,13 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
312 u.val = pmd_val_ma(val); 237 u.val = pmd_val_ma(val);
313 xen_extend_mmu_update(&u); 238 xen_extend_mmu_update(&u);
314 239
315 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
316
317 xen_mc_issue(PARAVIRT_LAZY_MMU); 240 xen_mc_issue(PARAVIRT_LAZY_MMU);
318 241
319 preempt_enable(); 242 preempt_enable();
320} 243}
321 244
322void xen_set_pmd(pmd_t *ptr, pmd_t val) 245static void xen_set_pmd(pmd_t *ptr, pmd_t val)
323{ 246{
324 ADD_STATS(pmd_update, 1);
325
326 /* If page is not pinned, we can just update the entry 247 /* If page is not pinned, we can just update the entry
327 directly */ 248 directly */
328 if (!xen_page_pinned(ptr)) { 249 if (!xen_page_pinned(ptr)) {
@@ -330,8 +251,6 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)
330 return; 251 return;
331 } 252 }
332 253
333 ADD_STATS(pmd_update_pinned, 1);
334
335 xen_set_pmd_hyper(ptr, val); 254 xen_set_pmd_hyper(ptr, val);
336} 255}
337 256
@@ -344,35 +263,34 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
344 set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); 263 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
345} 264}
346 265
347void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 266static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
348 pte_t *ptep, pte_t pteval)
349{ 267{
350 if (xen_iomap_pte(pteval)) { 268 struct mmu_update u;
351 xen_set_iomap_pte(ptep, pteval);
352 goto out;
353 }
354 269
355 ADD_STATS(set_pte_at, 1); 270 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
356// ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); 271 return false;
357 ADD_STATS(set_pte_at_current, mm == current->mm);
358 ADD_STATS(set_pte_at_kernel, mm == &init_mm);
359 272
360 if (mm == current->mm || mm == &init_mm) { 273 xen_mc_batch();
361 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
362 struct multicall_space mcs;
363 mcs = xen_mc_entry(0);
364 274
365 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); 275 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
366 ADD_STATS(set_pte_at_batched, 1); 276 u.val = pte_val_ma(pteval);
367 xen_mc_issue(PARAVIRT_LAZY_MMU); 277 xen_extend_mmu_update(&u);
368 goto out; 278
369 } else 279 xen_mc_issue(PARAVIRT_LAZY_MMU);
370 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
371 goto out;
372 }
373 xen_set_pte(ptep, pteval);
374 280
375out: return; 281 return true;
282}
283
284static void xen_set_pte(pte_t *ptep, pte_t pteval)
285{
286 if (!xen_batched_set_pte(ptep, pteval))
287 native_set_pte(ptep, pteval);
288}
289
290static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
291 pte_t *ptep, pte_t pteval)
292{
293 xen_set_pte(ptep, pteval);
376} 294}
377 295
378pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 296pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
@@ -389,13 +307,10 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
389 307
390 xen_mc_batch(); 308 xen_mc_batch();
391 309
392 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; 310 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
393 u.val = pte_val_ma(pte); 311 u.val = pte_val_ma(pte);
394 xen_extend_mmu_update(&u); 312 xen_extend_mmu_update(&u);
395 313
396 ADD_STATS(prot_commit, 1);
397 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
398
399 xen_mc_issue(PARAVIRT_LAZY_MMU); 314 xen_mc_issue(PARAVIRT_LAZY_MMU);
400} 315}
401 316
@@ -463,7 +378,7 @@ static pteval_t iomap_pte(pteval_t val)
463 return val; 378 return val;
464} 379}
465 380
466pteval_t xen_pte_val(pte_t pte) 381static pteval_t xen_pte_val(pte_t pte)
467{ 382{
468 pteval_t pteval = pte.pte; 383 pteval_t pteval = pte.pte;
469 384
@@ -480,7 +395,7 @@ pteval_t xen_pte_val(pte_t pte)
480} 395}
481PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); 396PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
482 397
483pgdval_t xen_pgd_val(pgd_t pgd) 398static pgdval_t xen_pgd_val(pgd_t pgd)
484{ 399{
485 return pte_mfn_to_pfn(pgd.pgd); 400 return pte_mfn_to_pfn(pgd.pgd);
486} 401}
@@ -511,7 +426,7 @@ void xen_set_pat(u64 pat)
511 WARN_ON(pat != 0x0007010600070106ull); 426 WARN_ON(pat != 0x0007010600070106ull);
512} 427}
513 428
514pte_t xen_make_pte(pteval_t pte) 429static pte_t xen_make_pte(pteval_t pte)
515{ 430{
516 phys_addr_t addr = (pte & PTE_PFN_MASK); 431 phys_addr_t addr = (pte & PTE_PFN_MASK);
517 432
@@ -581,20 +496,20 @@ pte_t xen_make_pte_debug(pteval_t pte)
581PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); 496PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
582#endif 497#endif
583 498
584pgd_t xen_make_pgd(pgdval_t pgd) 499static pgd_t xen_make_pgd(pgdval_t pgd)
585{ 500{
586 pgd = pte_pfn_to_mfn(pgd); 501 pgd = pte_pfn_to_mfn(pgd);
587 return native_make_pgd(pgd); 502 return native_make_pgd(pgd);
588} 503}
589PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); 504PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
590 505
591pmdval_t xen_pmd_val(pmd_t pmd) 506static pmdval_t xen_pmd_val(pmd_t pmd)
592{ 507{
593 return pte_mfn_to_pfn(pmd.pmd); 508 return pte_mfn_to_pfn(pmd.pmd);
594} 509}
595PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); 510PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
596 511
597void xen_set_pud_hyper(pud_t *ptr, pud_t val) 512static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
598{ 513{
599 struct mmu_update u; 514 struct mmu_update u;
600 515
@@ -607,17 +522,13 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
607 u.val = pud_val_ma(val); 522 u.val = pud_val_ma(val);
608 xen_extend_mmu_update(&u); 523 xen_extend_mmu_update(&u);
609 524
610 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
611
612 xen_mc_issue(PARAVIRT_LAZY_MMU); 525 xen_mc_issue(PARAVIRT_LAZY_MMU);
613 526
614 preempt_enable(); 527 preempt_enable();
615} 528}
616 529
617void xen_set_pud(pud_t *ptr, pud_t val) 530static void xen_set_pud(pud_t *ptr, pud_t val)
618{ 531{
619 ADD_STATS(pud_update, 1);
620
621 /* If page is not pinned, we can just update the entry 532 /* If page is not pinned, we can just update the entry
622 directly */ 533 directly */
623 if (!xen_page_pinned(ptr)) { 534 if (!xen_page_pinned(ptr)) {
@@ -625,56 +536,28 @@ void xen_set_pud(pud_t *ptr, pud_t val)
625 return; 536 return;
626 } 537 }
627 538
628 ADD_STATS(pud_update_pinned, 1);
629
630 xen_set_pud_hyper(ptr, val); 539 xen_set_pud_hyper(ptr, val);
631} 540}
632 541
633void xen_set_pte(pte_t *ptep, pte_t pte)
634{
635 if (xen_iomap_pte(pte)) {
636 xen_set_iomap_pte(ptep, pte);
637 return;
638 }
639
640 ADD_STATS(pte_update, 1);
641// ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
642 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
643
644#ifdef CONFIG_X86_PAE 542#ifdef CONFIG_X86_PAE
645 ptep->pte_high = pte.pte_high; 543static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
646 smp_wmb();
647 ptep->pte_low = pte.pte_low;
648#else
649 *ptep = pte;
650#endif
651}
652
653#ifdef CONFIG_X86_PAE
654void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
655{ 544{
656 if (xen_iomap_pte(pte)) {
657 xen_set_iomap_pte(ptep, pte);
658 return;
659 }
660
661 set_64bit((u64 *)ptep, native_pte_val(pte)); 545 set_64bit((u64 *)ptep, native_pte_val(pte));
662} 546}
663 547
664void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 548static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
665{ 549{
666 ptep->pte_low = 0; 550 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
667 smp_wmb(); /* make sure low gets written first */ 551 native_pte_clear(mm, addr, ptep);
668 ptep->pte_high = 0;
669} 552}
670 553
671void xen_pmd_clear(pmd_t *pmdp) 554static void xen_pmd_clear(pmd_t *pmdp)
672{ 555{
673 set_pmd(pmdp, __pmd(0)); 556 set_pmd(pmdp, __pmd(0));
674} 557}
675#endif /* CONFIG_X86_PAE */ 558#endif /* CONFIG_X86_PAE */
676 559
677pmd_t xen_make_pmd(pmdval_t pmd) 560static pmd_t xen_make_pmd(pmdval_t pmd)
678{ 561{
679 pmd = pte_pfn_to_mfn(pmd); 562 pmd = pte_pfn_to_mfn(pmd);
680 return native_make_pmd(pmd); 563 return native_make_pmd(pmd);
@@ -682,13 +565,13 @@ pmd_t xen_make_pmd(pmdval_t pmd)
682PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 565PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
683 566
684#if PAGETABLE_LEVELS == 4 567#if PAGETABLE_LEVELS == 4
685pudval_t xen_pud_val(pud_t pud) 568static pudval_t xen_pud_val(pud_t pud)
686{ 569{
687 return pte_mfn_to_pfn(pud.pud); 570 return pte_mfn_to_pfn(pud.pud);
688} 571}
689PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); 572PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
690 573
691pud_t xen_make_pud(pudval_t pud) 574static pud_t xen_make_pud(pudval_t pud)
692{ 575{
693 pud = pte_pfn_to_mfn(pud); 576 pud = pte_pfn_to_mfn(pud);
694 577
@@ -696,7 +579,7 @@ pud_t xen_make_pud(pudval_t pud)
696} 579}
697PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); 580PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
698 581
699pgd_t *xen_get_user_pgd(pgd_t *pgd) 582static pgd_t *xen_get_user_pgd(pgd_t *pgd)
700{ 583{
701 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); 584 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
702 unsigned offset = pgd - pgd_page; 585 unsigned offset = pgd - pgd_page;
@@ -728,7 +611,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
728 * 2. It is always pinned 611 * 2. It is always pinned
729 * 3. It has no user pagetable attached to it 612 * 3. It has no user pagetable attached to it
730 */ 613 */
731void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) 614static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
732{ 615{
733 preempt_disable(); 616 preempt_disable();
734 617
@@ -741,12 +624,10 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
741 preempt_enable(); 624 preempt_enable();
742} 625}
743 626
744void xen_set_pgd(pgd_t *ptr, pgd_t val) 627static void xen_set_pgd(pgd_t *ptr, pgd_t val)
745{ 628{
746 pgd_t *user_ptr = xen_get_user_pgd(ptr); 629 pgd_t *user_ptr = xen_get_user_pgd(ptr);
747 630
748 ADD_STATS(pgd_update, 1);
749
750 /* If page is not pinned, we can just update the entry 631 /* If page is not pinned, we can just update the entry
751 directly */ 632 directly */
752 if (!xen_page_pinned(ptr)) { 633 if (!xen_page_pinned(ptr)) {
@@ -758,9 +639,6 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
758 return; 639 return;
759 } 640 }
760 641
761 ADD_STATS(pgd_update_pinned, 1);
762 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
763
764 /* If it's pinned, then we can at least batch the kernel and 642 /* If it's pinned, then we can at least batch the kernel and
765 user updates together. */ 643 user updates together. */
766 xen_mc_batch(); 644 xen_mc_batch();
@@ -1162,14 +1040,14 @@ void xen_mm_unpin_all(void)
1162 spin_unlock(&pgd_lock); 1040 spin_unlock(&pgd_lock);
1163} 1041}
1164 1042
1165void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 1043static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1166{ 1044{
1167 spin_lock(&next->page_table_lock); 1045 spin_lock(&next->page_table_lock);
1168 xen_pgd_pin(next); 1046 xen_pgd_pin(next);
1169 spin_unlock(&next->page_table_lock); 1047 spin_unlock(&next->page_table_lock);
1170} 1048}
1171 1049
1172void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 1050static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1173{ 1051{
1174 spin_lock(&mm->page_table_lock); 1052 spin_lock(&mm->page_table_lock);
1175 xen_pgd_pin(mm); 1053 xen_pgd_pin(mm);
@@ -1256,7 +1134,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
1256 * pagetable because of lazy tlb flushing. This means we need need to 1134 * pagetable because of lazy tlb flushing. This means we need need to
1257 * switch all CPUs off this pagetable before we can unpin it. 1135 * switch all CPUs off this pagetable before we can unpin it.
1258 */ 1136 */
1259void xen_exit_mmap(struct mm_struct *mm) 1137static void xen_exit_mmap(struct mm_struct *mm)
1260{ 1138{
1261 get_cpu(); /* make sure we don't move around */ 1139 get_cpu(); /* make sure we don't move around */
1262 xen_drop_mm_ref(mm); 1140 xen_drop_mm_ref(mm);
@@ -2371,7 +2249,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2371 struct remap_data *rmd = data; 2249 struct remap_data *rmd = data;
2372 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); 2250 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2373 2251
2374 rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; 2252 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2375 rmd->mmu_update->val = pte_val_ma(pte); 2253 rmd->mmu_update->val = pte_val_ma(pte);
2376 rmd->mmu_update++; 2254 rmd->mmu_update++;
2377 2255
@@ -2425,7 +2303,6 @@ out:
2425EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2303EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2426 2304
2427#ifdef CONFIG_XEN_DEBUG_FS 2305#ifdef CONFIG_XEN_DEBUG_FS
2428
2429static int p2m_dump_open(struct inode *inode, struct file *filp) 2306static int p2m_dump_open(struct inode *inode, struct file *filp)
2430{ 2307{
2431 return single_open(filp, p2m_dump_show, NULL); 2308 return single_open(filp, p2m_dump_show, NULL);
@@ -2437,65 +2314,4 @@ static const struct file_operations p2m_dump_fops = {
2437 .llseek = seq_lseek, 2314 .llseek = seq_lseek,
2438 .release = single_release, 2315 .release = single_release,
2439}; 2316};
2440 2317#endif /* CONFIG_XEN_DEBUG_FS */
2441static struct dentry *d_mmu_debug;
2442
2443static int __init xen_mmu_debugfs(void)
2444{
2445 struct dentry *d_xen = xen_init_debugfs();
2446
2447 if (d_xen == NULL)
2448 return -ENOMEM;
2449
2450 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
2451
2452 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
2453
2454 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
2455 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
2456 &mmu_stats.pgd_update_pinned);
2457 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
2458 &mmu_stats.pgd_update_pinned);
2459
2460 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
2461 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
2462 &mmu_stats.pud_update_pinned);
2463 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
2464 &mmu_stats.pud_update_pinned);
2465
2466 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
2467 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
2468 &mmu_stats.pmd_update_pinned);
2469 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
2470 &mmu_stats.pmd_update_pinned);
2471
2472 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
2473// debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
2474// &mmu_stats.pte_update_pinned);
2475 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
2476 &mmu_stats.pte_update_pinned);
2477
2478 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
2479 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
2480 &mmu_stats.mmu_update_extended);
2481 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2482 mmu_stats.mmu_update_histo, 20);
2483
2484 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2485 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2486 &mmu_stats.set_pte_at_batched);
2487 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2488 &mmu_stats.set_pte_at_current);
2489 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2490 &mmu_stats.set_pte_at_kernel);
2491
2492 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2493 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2494 &mmu_stats.prot_commit_batched);
2495
2496 debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
2497 return 0;
2498}
2499fs_initcall(xen_mmu_debugfs);
2500
2501#endif /* CONFIG_XEN_DEBUG_FS */
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h
index 537bb9aab777..73809bb951b4 100644
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
15 15
16void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 16void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
17 17
18
19void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
20void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
21void xen_exit_mmap(struct mm_struct *mm);
22
23pteval_t xen_pte_val(pte_t);
24pmdval_t xen_pmd_val(pmd_t);
25pgdval_t xen_pgd_val(pgd_t);
26
27pte_t xen_make_pte(pteval_t);
28pmd_t xen_make_pmd(pmdval_t);
29pgd_t xen_make_pgd(pgdval_t);
30
31void xen_set_pte(pte_t *ptep, pte_t pteval);
32void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
33 pte_t *ptep, pte_t pteval);
34
35#ifdef CONFIG_X86_PAE
36void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
37void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
38void xen_pmd_clear(pmd_t *pmdp);
39#endif /* CONFIG_X86_PAE */
40
41void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
42void xen_set_pud(pud_t *ptr, pud_t val);
43void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
44void xen_set_pud_hyper(pud_t *ptr, pud_t val);
45
46#if PAGETABLE_LEVELS == 4
47pudval_t xen_pud_val(pud_t pud);
48pud_t xen_make_pud(pudval_t pudval);
49void xen_set_pgd(pgd_t *pgdp, pgd_t pgd);
50void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd);
51#endif
52
53pgd_t *xen_get_user_pgd(pgd_t *pgd);
54
55pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 18pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
56void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 19void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
57 pte_t *ptep, pte_t pte); 20 pte_t *ptep, pte_t pte);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 7c275f5d0df0..5d43c1f8ada8 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -20,12 +20,6 @@ config XTENSA
20config RWSEM_XCHGADD_ALGORITHM 20config RWSEM_XCHGADD_ALGORITHM
21 def_bool y 21 def_bool y
22 22
23config GENERIC_FIND_NEXT_BIT
24 def_bool y
25
26config GENERIC_FIND_BIT_LE
27 def_bool y
28
29config GENERIC_HWEIGHT 23config GENERIC_HWEIGHT
30 def_bool y 24 def_bool y
31 25
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 161bb89e98c8..7a5591a71f85 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -171,10 +171,6 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
171#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 171#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
172#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 172#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
173 173
174#ifdef CONFIG_MMU
175#define WANT_PAGE_VIRTUAL
176#endif
177
178#endif /* __ASSEMBLY__ */ 174#endif /* __ASSEMBLY__ */
179 175
180#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 176#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 4bb91a970f1f..ca81654f3ec2 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -14,8 +14,6 @@
14#include <asm/mmu_context.h> 14#include <asm/mmu_context.h>
15#include <asm/page.h> 15#include <asm/page.h>
16 16
17DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
18
19void __init paging_init(void) 17void __init paging_init(void)
20{ 18{
21 memset(swapper_pg_dir, 0, PAGE_SIZE); 19 memset(swapper_pg_dir, 0, PAGE_SIZE);
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c
deleted file mode 100644
index 697992738205..000000000000
--- a/arch/xtensa/mm/pgtable.c
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * arch/xtensa/mm/pgtable.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 */
12
13#if (DCACHE_SIZE > PAGE_SIZE)
14
15pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
16{
17 pte_t *pte = NULL, *p;
18 int color = ADDR_COLOR(address);
19 int i;
20
21 p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
22
23 if (likely(p)) {
24 split_page(virt_to_page(p), COLOR_ORDER);
25
26 for (i = 0; i < COLOR_SIZE; i++) {
27 if (ADDR_COLOR(p) == color)
28 pte = p;
29 else
30 free_page(p);
31 p += PTRS_PER_PTE;
32 }
33 clear_page(pte);
34 }
35 return pte;
36}
37
38#ifdef PROFILING
39
40int mask;
41int hit;
42int flush;
43
44#endif
45
46struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
47{
48 struct page *page = NULL, *p;
49 int color = ADDR_COLOR(address);
50
51 p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
52
53 if (likely(p)) {
54 split_page(p, COLOR_ORDER);
55
56 for (i = 0; i < PAGE_ORDER; i++) {
57 if (PADDR_COLOR(page_address(p)) == color)
58 page = p;
59 else
60 __free_page(p);
61 p++;
62 }
63 clear_highpage(page);
64 }
65
66 return page;
67}
68
69#endif
70
71
72
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 471fdcc5df85..bcaf16ee6ad1 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -30,10 +30,8 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30 30
31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, 31static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 struct cgroup *); 32 struct cgroup *);
33static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, 33static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
34 struct task_struct *, bool); 34static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
35static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); 35static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); 36static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
39 37
@@ -46,8 +44,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
46struct cgroup_subsys blkio_subsys = { 44struct cgroup_subsys blkio_subsys = {
47 .name = "blkio", 45 .name = "blkio",
48 .create = blkiocg_create, 46 .create = blkiocg_create,
49 .can_attach = blkiocg_can_attach, 47 .can_attach_task = blkiocg_can_attach_task,
50 .attach = blkiocg_attach, 48 .attach_task = blkiocg_attach_task,
51 .destroy = blkiocg_destroy, 49 .destroy = blkiocg_destroy,
52 .populate = blkiocg_populate, 50 .populate = blkiocg_populate,
53#ifdef CONFIG_BLK_CGROUP 51#ifdef CONFIG_BLK_CGROUP
@@ -385,25 +383,40 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
385 383
386 spin_lock_irqsave(&blkg->stats_lock, flags); 384 spin_lock_irqsave(&blkg->stats_lock, flags);
387 blkg->stats.time += time; 385 blkg->stats.time += time;
386#ifdef CONFIG_DEBUG_BLK_CGROUP
388 blkg->stats.unaccounted_time += unaccounted_time; 387 blkg->stats.unaccounted_time += unaccounted_time;
388#endif
389 spin_unlock_irqrestore(&blkg->stats_lock, flags); 389 spin_unlock_irqrestore(&blkg->stats_lock, flags);
390} 390}
391EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); 391EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
392 392
393/*
394 * should be called under rcu read lock or queue lock to make sure blkg pointer
395 * is valid.
396 */
393void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 397void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
394 uint64_t bytes, bool direction, bool sync) 398 uint64_t bytes, bool direction, bool sync)
395{ 399{
396 struct blkio_group_stats *stats; 400 struct blkio_group_stats_cpu *stats_cpu;
397 unsigned long flags; 401 unsigned long flags;
398 402
399 spin_lock_irqsave(&blkg->stats_lock, flags); 403 /*
400 stats = &blkg->stats; 404 * Disabling interrupts to provide mutual exclusion between two
401 stats->sectors += bytes >> 9; 405 * writes on same cpu. It probably is not needed for 64bit. Not
402 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction, 406 * optimizing that case yet.
403 sync); 407 */
404 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes, 408 local_irq_save(flags);
405 direction, sync); 409
406 spin_unlock_irqrestore(&blkg->stats_lock, flags); 410 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
411
412 u64_stats_update_begin(&stats_cpu->syncp);
413 stats_cpu->sectors += bytes >> 9;
414 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
415 1, direction, sync);
416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
417 bytes, direction, sync);
418 u64_stats_update_end(&stats_cpu->syncp);
419 local_irq_restore(flags);
407} 420}
408EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); 421EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
409 422
@@ -426,18 +439,44 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg,
426} 439}
427EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); 440EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
428 441
442/* Merged stats are per cpu. */
429void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, 443void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
430 bool sync) 444 bool sync)
431{ 445{
446 struct blkio_group_stats_cpu *stats_cpu;
432 unsigned long flags; 447 unsigned long flags;
433 448
434 spin_lock_irqsave(&blkg->stats_lock, flags); 449 /*
435 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction, 450 * Disabling interrupts to provide mutual exclusion between two
436 sync); 451 * writes on same cpu. It probably is not needed for 64bit. Not
437 spin_unlock_irqrestore(&blkg->stats_lock, flags); 452 * optimizing that case yet.
453 */
454 local_irq_save(flags);
455
456 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
457
458 u64_stats_update_begin(&stats_cpu->syncp);
459 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
460 direction, sync);
461 u64_stats_update_end(&stats_cpu->syncp);
462 local_irq_restore(flags);
438} 463}
439EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); 464EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
440 465
466/*
467 * This function allocates the per cpu stats for blkio_group. Should be called
468 * from sleepable context as alloc_per_cpu() requires that.
469 */
470int blkio_alloc_blkg_stats(struct blkio_group *blkg)
471{
472 /* Allocate memory for per cpu stats */
473 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
474 if (!blkg->stats_cpu)
475 return -ENOMEM;
476 return 0;
477}
478EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
479
441void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 480void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
442 struct blkio_group *blkg, void *key, dev_t dev, 481 struct blkio_group *blkg, void *key, dev_t dev,
443 enum blkio_policy_id plid) 482 enum blkio_policy_id plid)
@@ -508,6 +547,30 @@ struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
508} 547}
509EXPORT_SYMBOL_GPL(blkiocg_lookup_group); 548EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
510 549
550static void blkio_reset_stats_cpu(struct blkio_group *blkg)
551{
552 struct blkio_group_stats_cpu *stats_cpu;
553 int i, j, k;
554 /*
555 * Note: On 64 bit arch this should not be an issue. This has the
556 * possibility of returning some inconsistent value on 32bit arch
557 * as 64bit update on 32bit is non atomic. Taking care of this
558 * corner case makes code very complicated, like sending IPIs to
559 * cpus, taking care of stats of offline cpus etc.
560 *
561 * reset stats is anyway more of a debug feature and this sounds a
562 * corner case. So I am not complicating the code yet until and
563 * unless this becomes a real issue.
564 */
565 for_each_possible_cpu(i) {
566 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
567 stats_cpu->sectors = 0;
568 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
569 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
570 stats_cpu->stat_arr_cpu[j][k] = 0;
571 }
572}
573
511static int 574static int
512blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) 575blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
513{ 576{
@@ -552,7 +615,11 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
552 } 615 }
553#endif 616#endif
554 spin_unlock(&blkg->stats_lock); 617 spin_unlock(&blkg->stats_lock);
618
619 /* Reset Per cpu stats which don't take blkg->stats_lock */
620 blkio_reset_stats_cpu(blkg);
555 } 621 }
622
556 spin_unlock_irq(&blkcg->lock); 623 spin_unlock_irq(&blkcg->lock);
557 return 0; 624 return 0;
558} 625}
@@ -598,6 +665,59 @@ static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
598 return val; 665 return val;
599} 666}
600 667
668
669static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
670 enum stat_type_cpu type, enum stat_sub_type sub_type)
671{
672 int cpu;
673 struct blkio_group_stats_cpu *stats_cpu;
674 u64 val = 0, tval;
675
676 for_each_possible_cpu(cpu) {
677 unsigned int start;
678 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
679
680 do {
681 start = u64_stats_fetch_begin(&stats_cpu->syncp);
682 if (type == BLKIO_STAT_CPU_SECTORS)
683 tval = stats_cpu->sectors;
684 else
685 tval = stats_cpu->stat_arr_cpu[type][sub_type];
686 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
687
688 val += tval;
689 }
690
691 return val;
692}
693
694static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
695 struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
696{
697 uint64_t disk_total, val;
698 char key_str[MAX_KEY_LEN];
699 enum stat_sub_type sub_type;
700
701 if (type == BLKIO_STAT_CPU_SECTORS) {
702 val = blkio_read_stat_cpu(blkg, type, 0);
703 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
704 }
705
706 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
707 sub_type++) {
708 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
709 val = blkio_read_stat_cpu(blkg, type, sub_type);
710 cb->fill(cb, key_str, val);
711 }
712
713 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
714 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
715
716 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
717 cb->fill(cb, key_str, disk_total);
718 return disk_total;
719}
720
601/* This should be called with blkg->stats_lock held */ 721/* This should be called with blkg->stats_lock held */
602static uint64_t blkio_get_stat(struct blkio_group *blkg, 722static uint64_t blkio_get_stat(struct blkio_group *blkg,
603 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type) 723 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
@@ -609,9 +729,6 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg,
609 if (type == BLKIO_STAT_TIME) 729 if (type == BLKIO_STAT_TIME)
610 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 730 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
611 blkg->stats.time, cb, dev); 731 blkg->stats.time, cb, dev);
612 if (type == BLKIO_STAT_SECTORS)
613 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
614 blkg->stats.sectors, cb, dev);
615#ifdef CONFIG_DEBUG_BLK_CGROUP 732#ifdef CONFIG_DEBUG_BLK_CGROUP
616 if (type == BLKIO_STAT_UNACCOUNTED_TIME) 733 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
617 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 734 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
@@ -1075,8 +1192,8 @@ static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1075} 1192}
1076 1193
1077static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, 1194static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1078 struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type, 1195 struct cftype *cft, struct cgroup_map_cb *cb,
1079 bool show_total) 1196 enum stat_type type, bool show_total, bool pcpu)
1080{ 1197{
1081 struct blkio_group *blkg; 1198 struct blkio_group *blkg;
1082 struct hlist_node *n; 1199 struct hlist_node *n;
@@ -1087,10 +1204,15 @@ static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1087 if (blkg->dev) { 1204 if (blkg->dev) {
1088 if (!cftype_blkg_same_policy(cft, blkg)) 1205 if (!cftype_blkg_same_policy(cft, blkg))
1089 continue; 1206 continue;
1090 spin_lock_irq(&blkg->stats_lock); 1207 if (pcpu)
1091 cgroup_total += blkio_get_stat(blkg, cb, blkg->dev, 1208 cgroup_total += blkio_get_stat_cpu(blkg, cb,
1092 type); 1209 blkg->dev, type);
1093 spin_unlock_irq(&blkg->stats_lock); 1210 else {
1211 spin_lock_irq(&blkg->stats_lock);
1212 cgroup_total += blkio_get_stat(blkg, cb,
1213 blkg->dev, type);
1214 spin_unlock_irq(&blkg->stats_lock);
1215 }
1094 } 1216 }
1095 } 1217 }
1096 if (show_total) 1218 if (show_total)
@@ -1114,47 +1236,47 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1114 switch(name) { 1236 switch(name) {
1115 case BLKIO_PROP_time: 1237 case BLKIO_PROP_time:
1116 return blkio_read_blkg_stats(blkcg, cft, cb, 1238 return blkio_read_blkg_stats(blkcg, cft, cb,
1117 BLKIO_STAT_TIME, 0); 1239 BLKIO_STAT_TIME, 0, 0);
1118 case BLKIO_PROP_sectors: 1240 case BLKIO_PROP_sectors:
1119 return blkio_read_blkg_stats(blkcg, cft, cb, 1241 return blkio_read_blkg_stats(blkcg, cft, cb,
1120 BLKIO_STAT_SECTORS, 0); 1242 BLKIO_STAT_CPU_SECTORS, 0, 1);
1121 case BLKIO_PROP_io_service_bytes: 1243 case BLKIO_PROP_io_service_bytes:
1122 return blkio_read_blkg_stats(blkcg, cft, cb, 1244 return blkio_read_blkg_stats(blkcg, cft, cb,
1123 BLKIO_STAT_SERVICE_BYTES, 1); 1245 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1124 case BLKIO_PROP_io_serviced: 1246 case BLKIO_PROP_io_serviced:
1125 return blkio_read_blkg_stats(blkcg, cft, cb, 1247 return blkio_read_blkg_stats(blkcg, cft, cb,
1126 BLKIO_STAT_SERVICED, 1); 1248 BLKIO_STAT_CPU_SERVICED, 1, 1);
1127 case BLKIO_PROP_io_service_time: 1249 case BLKIO_PROP_io_service_time:
1128 return blkio_read_blkg_stats(blkcg, cft, cb, 1250 return blkio_read_blkg_stats(blkcg, cft, cb,
1129 BLKIO_STAT_SERVICE_TIME, 1); 1251 BLKIO_STAT_SERVICE_TIME, 1, 0);
1130 case BLKIO_PROP_io_wait_time: 1252 case BLKIO_PROP_io_wait_time:
1131 return blkio_read_blkg_stats(blkcg, cft, cb, 1253 return blkio_read_blkg_stats(blkcg, cft, cb,
1132 BLKIO_STAT_WAIT_TIME, 1); 1254 BLKIO_STAT_WAIT_TIME, 1, 0);
1133 case BLKIO_PROP_io_merged: 1255 case BLKIO_PROP_io_merged:
1134 return blkio_read_blkg_stats(blkcg, cft, cb, 1256 return blkio_read_blkg_stats(blkcg, cft, cb,
1135 BLKIO_STAT_MERGED, 1); 1257 BLKIO_STAT_CPU_MERGED, 1, 1);
1136 case BLKIO_PROP_io_queued: 1258 case BLKIO_PROP_io_queued:
1137 return blkio_read_blkg_stats(blkcg, cft, cb, 1259 return blkio_read_blkg_stats(blkcg, cft, cb,
1138 BLKIO_STAT_QUEUED, 1); 1260 BLKIO_STAT_QUEUED, 1, 0);
1139#ifdef CONFIG_DEBUG_BLK_CGROUP 1261#ifdef CONFIG_DEBUG_BLK_CGROUP
1140 case BLKIO_PROP_unaccounted_time: 1262 case BLKIO_PROP_unaccounted_time:
1141 return blkio_read_blkg_stats(blkcg, cft, cb, 1263 return blkio_read_blkg_stats(blkcg, cft, cb,
1142 BLKIO_STAT_UNACCOUNTED_TIME, 0); 1264 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1143 case BLKIO_PROP_dequeue: 1265 case BLKIO_PROP_dequeue:
1144 return blkio_read_blkg_stats(blkcg, cft, cb, 1266 return blkio_read_blkg_stats(blkcg, cft, cb,
1145 BLKIO_STAT_DEQUEUE, 0); 1267 BLKIO_STAT_DEQUEUE, 0, 0);
1146 case BLKIO_PROP_avg_queue_size: 1268 case BLKIO_PROP_avg_queue_size:
1147 return blkio_read_blkg_stats(blkcg, cft, cb, 1269 return blkio_read_blkg_stats(blkcg, cft, cb,
1148 BLKIO_STAT_AVG_QUEUE_SIZE, 0); 1270 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1149 case BLKIO_PROP_group_wait_time: 1271 case BLKIO_PROP_group_wait_time:
1150 return blkio_read_blkg_stats(blkcg, cft, cb, 1272 return blkio_read_blkg_stats(blkcg, cft, cb,
1151 BLKIO_STAT_GROUP_WAIT_TIME, 0); 1273 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1152 case BLKIO_PROP_idle_time: 1274 case BLKIO_PROP_idle_time:
1153 return blkio_read_blkg_stats(blkcg, cft, cb, 1275 return blkio_read_blkg_stats(blkcg, cft, cb,
1154 BLKIO_STAT_IDLE_TIME, 0); 1276 BLKIO_STAT_IDLE_TIME, 0, 0);
1155 case BLKIO_PROP_empty_time: 1277 case BLKIO_PROP_empty_time:
1156 return blkio_read_blkg_stats(blkcg, cft, cb, 1278 return blkio_read_blkg_stats(blkcg, cft, cb,
1157 BLKIO_STAT_EMPTY_TIME, 0); 1279 BLKIO_STAT_EMPTY_TIME, 0, 0);
1158#endif 1280#endif
1159 default: 1281 default:
1160 BUG(); 1282 BUG();
@@ -1164,10 +1286,10 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1164 switch(name){ 1286 switch(name){
1165 case BLKIO_THROTL_io_service_bytes: 1287 case BLKIO_THROTL_io_service_bytes:
1166 return blkio_read_blkg_stats(blkcg, cft, cb, 1288 return blkio_read_blkg_stats(blkcg, cft, cb,
1167 BLKIO_STAT_SERVICE_BYTES, 1); 1289 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1168 case BLKIO_THROTL_io_serviced: 1290 case BLKIO_THROTL_io_serviced:
1169 return blkio_read_blkg_stats(blkcg, cft, cb, 1291 return blkio_read_blkg_stats(blkcg, cft, cb,
1170 BLKIO_STAT_SERVICED, 1); 1292 BLKIO_STAT_CPU_SERVICED, 1, 1);
1171 default: 1293 default:
1172 BUG(); 1294 BUG();
1173 } 1295 }
@@ -1492,9 +1614,7 @@ done:
1492 * of the main cic data structures. For now we allow a task to change 1614 * of the main cic data structures. For now we allow a task to change
1493 * its cgroup only if it's the only owner of its ioc. 1615 * its cgroup only if it's the only owner of its ioc.
1494 */ 1616 */
1495static int blkiocg_can_attach(struct cgroup_subsys *subsys, 1617static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1496 struct cgroup *cgroup, struct task_struct *tsk,
1497 bool threadgroup)
1498{ 1618{
1499 struct io_context *ioc; 1619 struct io_context *ioc;
1500 int ret = 0; 1620 int ret = 0;
@@ -1509,9 +1629,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1509 return ret; 1629 return ret;
1510} 1630}
1511 1631
1512static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, 1632static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1513 struct cgroup *prev, struct task_struct *tsk,
1514 bool threadgroup)
1515{ 1633{
1516 struct io_context *ioc; 1634 struct io_context *ioc;
1517 1635
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index c774930cc206..a71d2904ffb9 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/cgroup.h> 16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h>
17 18
18enum blkio_policy_id { 19enum blkio_policy_id {
19 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */ 20 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
@@ -36,22 +37,15 @@ enum stat_type {
36 * request completion for IOs doen by this cgroup. This may not be 37 * request completion for IOs doen by this cgroup. This may not be
37 * accurate when NCQ is turned on. */ 38 * accurate when NCQ is turned on. */
38 BLKIO_STAT_SERVICE_TIME = 0, 39 BLKIO_STAT_SERVICE_TIME = 0,
39 /* Total bytes transferred */
40 BLKIO_STAT_SERVICE_BYTES,
41 /* Total IOs serviced, post merge */
42 BLKIO_STAT_SERVICED,
43 /* Total time spent waiting in scheduler queue in ns */ 40 /* Total time spent waiting in scheduler queue in ns */
44 BLKIO_STAT_WAIT_TIME, 41 BLKIO_STAT_WAIT_TIME,
45 /* Number of IOs merged */
46 BLKIO_STAT_MERGED,
47 /* Number of IOs queued up */ 42 /* Number of IOs queued up */
48 BLKIO_STAT_QUEUED, 43 BLKIO_STAT_QUEUED,
49 /* All the single valued stats go below this */ 44 /* All the single valued stats go below this */
50 BLKIO_STAT_TIME, 45 BLKIO_STAT_TIME,
51 BLKIO_STAT_SECTORS, 46#ifdef CONFIG_DEBUG_BLK_CGROUP
52 /* Time not charged to this cgroup */ 47 /* Time not charged to this cgroup */
53 BLKIO_STAT_UNACCOUNTED_TIME, 48 BLKIO_STAT_UNACCOUNTED_TIME,
54#ifdef CONFIG_DEBUG_BLK_CGROUP
55 BLKIO_STAT_AVG_QUEUE_SIZE, 49 BLKIO_STAT_AVG_QUEUE_SIZE,
56 BLKIO_STAT_IDLE_TIME, 50 BLKIO_STAT_IDLE_TIME,
57 BLKIO_STAT_EMPTY_TIME, 51 BLKIO_STAT_EMPTY_TIME,
@@ -60,6 +54,18 @@ enum stat_type {
60#endif 54#endif
61}; 55};
62 56
57/* Per cpu stats */
58enum stat_type_cpu {
59 BLKIO_STAT_CPU_SECTORS,
60 /* Total bytes transferred */
61 BLKIO_STAT_CPU_SERVICE_BYTES,
62 /* Total IOs serviced, post merge */
63 BLKIO_STAT_CPU_SERVICED,
64 /* Number of IOs merged */
65 BLKIO_STAT_CPU_MERGED,
66 BLKIO_STAT_CPU_NR
67};
68
63enum stat_sub_type { 69enum stat_sub_type {
64 BLKIO_STAT_READ = 0, 70 BLKIO_STAT_READ = 0,
65 BLKIO_STAT_WRITE, 71 BLKIO_STAT_WRITE,
@@ -116,11 +122,11 @@ struct blkio_cgroup {
116struct blkio_group_stats { 122struct blkio_group_stats {
117 /* total disk time and nr sectors dispatched by this group */ 123 /* total disk time and nr sectors dispatched by this group */
118 uint64_t time; 124 uint64_t time;
119 uint64_t sectors;
120 /* Time not charged to this cgroup */
121 uint64_t unaccounted_time;
122 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; 125 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
123#ifdef CONFIG_DEBUG_BLK_CGROUP 126#ifdef CONFIG_DEBUG_BLK_CGROUP
127 /* Time not charged to this cgroup */
128 uint64_t unaccounted_time;
129
124 /* Sum of number of IOs queued across all samples */ 130 /* Sum of number of IOs queued across all samples */
125 uint64_t avg_queue_size_sum; 131 uint64_t avg_queue_size_sum;
126 /* Count of samples taken for average */ 132 /* Count of samples taken for average */
@@ -145,6 +151,13 @@ struct blkio_group_stats {
145#endif 151#endif
146}; 152};
147 153
154/* Per cpu blkio group stats */
155struct blkio_group_stats_cpu {
156 uint64_t sectors;
157 uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
158 struct u64_stats_sync syncp;
159};
160
148struct blkio_group { 161struct blkio_group {
149 /* An rcu protected unique identifier for the group */ 162 /* An rcu protected unique identifier for the group */
150 void *key; 163 void *key;
@@ -160,6 +173,8 @@ struct blkio_group {
160 /* Need to serialize the stats in the case of reset/update */ 173 /* Need to serialize the stats in the case of reset/update */
161 spinlock_t stats_lock; 174 spinlock_t stats_lock;
162 struct blkio_group_stats stats; 175 struct blkio_group_stats stats;
176 /* Per cpu stats pointer */
177 struct blkio_group_stats_cpu __percpu *stats_cpu;
163}; 178};
164 179
165struct blkio_policy_node { 180struct blkio_policy_node {
@@ -295,6 +310,7 @@ extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
295extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 310extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
296 struct blkio_group *blkg, void *key, dev_t dev, 311 struct blkio_group *blkg, void *key, dev_t dev,
297 enum blkio_policy_id plid); 312 enum blkio_policy_id plid);
313extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
298extern int blkiocg_del_blkio_group(struct blkio_group *blkg); 314extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
299extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 315extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
300 void *key); 316 void *key);
@@ -322,6 +338,8 @@ static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
322 struct blkio_group *blkg, void *key, dev_t dev, 338 struct blkio_group *blkg, void *key, dev_t dev,
323 enum blkio_policy_id plid) {} 339 enum blkio_policy_id plid) {}
324 340
341static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
342
325static inline int 343static inline int
326blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } 344blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
327 345
diff --git a/block/blk-core.c b/block/blk-core.c
index 3fe00a14822a..c8303e9d919d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -569,8 +569,6 @@ int blk_get_queue(struct request_queue *q)
569 569
570static inline void blk_free_request(struct request_queue *q, struct request *rq) 570static inline void blk_free_request(struct request_queue *q, struct request *rq)
571{ 571{
572 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
573
574 if (rq->cmd_flags & REQ_ELVPRIV) 572 if (rq->cmd_flags & REQ_ELVPRIV)
575 elv_put_request(q, rq); 573 elv_put_request(q, rq);
576 mempool_free(rq, q->rq.rq_pool); 574 mempool_free(rq, q->rq.rq_pool);
@@ -1110,14 +1108,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1110{ 1108{
1111 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1109 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1112 1110
1113 /*
1114 * Debug stuff, kill later
1115 */
1116 if (!rq_mergeable(req)) {
1117 blk_dump_rq_flags(req, "back");
1118 return false;
1119 }
1120
1121 if (!ll_back_merge_fn(q, req, bio)) 1111 if (!ll_back_merge_fn(q, req, bio))
1122 return false; 1112 return false;
1123 1113
@@ -1132,6 +1122,7 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1132 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1122 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1133 1123
1134 drive_stat_acct(req, 0); 1124 drive_stat_acct(req, 0);
1125 elv_bio_merged(q, req, bio);
1135 return true; 1126 return true;
1136} 1127}
1137 1128
@@ -1141,14 +1132,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1141 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1132 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1142 sector_t sector; 1133 sector_t sector;
1143 1134
1144 /*
1145 * Debug stuff, kill later
1146 */
1147 if (!rq_mergeable(req)) {
1148 blk_dump_rq_flags(req, "front");
1149 return false;
1150 }
1151
1152 if (!ll_front_merge_fn(q, req, bio)) 1135 if (!ll_front_merge_fn(q, req, bio))
1153 return false; 1136 return false;
1154 1137
@@ -1173,6 +1156,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1173 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1156 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1174 1157
1175 drive_stat_acct(req, 0); 1158 drive_stat_acct(req, 0);
1159 elv_bio_merged(q, req, bio);
1176 return true; 1160 return true;
1177} 1161}
1178 1162
@@ -1258,14 +1242,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1258 1242
1259 el_ret = elv_merge(q, &req, bio); 1243 el_ret = elv_merge(q, &req, bio);
1260 if (el_ret == ELEVATOR_BACK_MERGE) { 1244 if (el_ret == ELEVATOR_BACK_MERGE) {
1261 BUG_ON(req->cmd_flags & REQ_ON_PLUG);
1262 if (bio_attempt_back_merge(q, req, bio)) { 1245 if (bio_attempt_back_merge(q, req, bio)) {
1263 if (!attempt_back_merge(q, req)) 1246 if (!attempt_back_merge(q, req))
1264 elv_merged_request(q, req, el_ret); 1247 elv_merged_request(q, req, el_ret);
1265 goto out_unlock; 1248 goto out_unlock;
1266 } 1249 }
1267 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1250 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1268 BUG_ON(req->cmd_flags & REQ_ON_PLUG);
1269 if (bio_attempt_front_merge(q, req, bio)) { 1251 if (bio_attempt_front_merge(q, req, bio)) {
1270 if (!attempt_front_merge(q, req)) 1252 if (!attempt_front_merge(q, req))
1271 elv_merged_request(q, req, el_ret); 1253 elv_merged_request(q, req, el_ret);
@@ -1320,10 +1302,6 @@ get_rq:
1320 if (__rq->q != q) 1302 if (__rq->q != q)
1321 plug->should_sort = 1; 1303 plug->should_sort = 1;
1322 } 1304 }
1323 /*
1324 * Debug flag, kill later
1325 */
1326 req->cmd_flags |= REQ_ON_PLUG;
1327 list_add_tail(&req->queuelist, &plug->list); 1305 list_add_tail(&req->queuelist, &plug->list);
1328 drive_stat_acct(req, 1); 1306 drive_stat_acct(req, 1);
1329 } else { 1307 } else {
@@ -1550,7 +1528,8 @@ static inline void __generic_make_request(struct bio *bio)
1550 goto end_io; 1528 goto end_io;
1551 } 1529 }
1552 1530
1553 blk_throtl_bio(q, &bio); 1531 if (blk_throtl_bio(q, &bio))
1532 goto end_io;
1554 1533
1555 /* 1534 /*
1556 * If bio = NULL, bio has been throttled and will be submitted 1535 * If bio = NULL, bio has been throttled and will be submitted
@@ -2748,7 +2727,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2748 while (!list_empty(&list)) { 2727 while (!list_empty(&list)) {
2749 rq = list_entry_rq(list.next); 2728 rq = list_entry_rq(list.next);
2750 list_del_init(&rq->queuelist); 2729 list_del_init(&rq->queuelist);
2751 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2752 BUG_ON(!rq->q); 2730 BUG_ON(!rq->q);
2753 if (rq->q != q) { 2731 if (rq->q != q) {
2754 /* 2732 /*
@@ -2760,8 +2738,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2760 depth = 0; 2738 depth = 0;
2761 spin_lock(q->queue_lock); 2739 spin_lock(q->queue_lock);
2762 } 2740 }
2763 rq->cmd_flags &= ~REQ_ON_PLUG;
2764
2765 /* 2741 /*
2766 * rq is already accounted, so use raw insert 2742 * rq is already accounted, so use raw insert
2767 */ 2743 */
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 81e31819a597..8a0e7ec056e7 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -56,7 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where); 57 __elv_add_request(q, rq, where);
58 __blk_run_queue(q); 58 __blk_run_queue(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be run */
60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
62 spin_unlock_irq(q->queue_lock); 62 spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6c9b5e189e62..bb21e4c36f70 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -212,13 +212,19 @@ static void flush_end_io(struct request *flush_rq, int error)
212 } 212 }
213 213
214 /* 214 /*
215 * Moving a request silently to empty queue_head may stall the 215 * Kick the queue to avoid stall for two cases:
216 * queue. Kick the queue in those cases. This function is called 216 * 1. Moving a request silently to empty queue_head may stall the
217 * from request completion path and calling directly into 217 * queue.
218 * request_fn may confuse the driver. Always use kblockd. 218 * 2. When flush request is running in non-queueable queue, the
219 * queue is hold. Restart the queue after flush request is finished
220 * to avoid stall.
221 * This function is called from request completion path and calling
222 * directly into request_fn may confuse the driver. Always use
223 * kblockd.
219 */ 224 */
220 if (queued) 225 if (queued || q->flush_queue_delayed)
221 blk_run_queue_async(q); 226 blk_run_queue_async(q);
227 q->flush_queue_delayed = 0;
222} 228}
223 229
224/** 230/**
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index b791022beef3..c898049dafd5 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -96,6 +96,9 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
96 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 96 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
97 INIT_HLIST_HEAD(&ret->cic_list); 97 INIT_HLIST_HEAD(&ret->cic_list);
98 ret->ioc_data = NULL; 98 ret->ioc_data = NULL;
99#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
100 ret->cgroup_changed = 0;
101#endif
99 } 102 }
100 103
101 return ret; 104 return ret;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 25de73e4759b..78e627e2581d 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -9,17 +9,20 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12static void blkdev_discard_end_io(struct bio *bio, int err) 12struct bio_batch {
13{ 13 atomic_t done;
14 if (err) { 14 unsigned long flags;
15 if (err == -EOPNOTSUPP) 15 struct completion *wait;
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 16};
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 }
19 17
20 if (bio->bi_private) 18static void bio_batch_end_io(struct bio *bio, int err)
21 complete(bio->bi_private); 19{
20 struct bio_batch *bb = bio->bi_private;
22 21
22 if (err && (err != -EOPNOTSUPP))
23 clear_bit(BIO_UPTODATE, &bb->flags);
24 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
23 bio_put(bio); 26 bio_put(bio);
24} 27}
25 28
@@ -41,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
42 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
43 unsigned int max_discard_sectors; 46 unsigned int max_discard_sectors;
47 struct bio_batch bb;
44 struct bio *bio; 48 struct bio *bio;
45 int ret = 0; 49 int ret = 0;
46 50
@@ -67,7 +71,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
67 type |= REQ_SECURE; 71 type |= REQ_SECURE;
68 } 72 }
69 73
70 while (nr_sects && !ret) { 74 atomic_set(&bb.done, 1);
75 bb.flags = 1 << BIO_UPTODATE;
76 bb.wait = &wait;
77
78 while (nr_sects) {
71 bio = bio_alloc(gfp_mask, 1); 79 bio = bio_alloc(gfp_mask, 1);
72 if (!bio) { 80 if (!bio) {
73 ret = -ENOMEM; 81 ret = -ENOMEM;
@@ -75,9 +83,9 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
75 } 83 }
76 84
77 bio->bi_sector = sector; 85 bio->bi_sector = sector;
78 bio->bi_end_io = blkdev_discard_end_io; 86 bio->bi_end_io = bio_batch_end_io;
79 bio->bi_bdev = bdev; 87 bio->bi_bdev = bdev;
80 bio->bi_private = &wait; 88 bio->bi_private = &bb;
81 89
82 if (nr_sects > max_discard_sectors) { 90 if (nr_sects > max_discard_sectors) {
83 bio->bi_size = max_discard_sectors << 9; 91 bio->bi_size = max_discard_sectors << 9;
@@ -88,45 +96,21 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
88 nr_sects = 0; 96 nr_sects = 0;
89 } 97 }
90 98
91 bio_get(bio); 99 atomic_inc(&bb.done);
92 submit_bio(type, bio); 100 submit_bio(type, bio);
101 }
93 102
103 /* Wait for bios in-flight */
104 if (!atomic_dec_and_test(&bb.done))
94 wait_for_completion(&wait); 105 wait_for_completion(&wait);
95 106
96 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 107 if (!test_bit(BIO_UPTODATE, &bb.flags))
97 ret = -EOPNOTSUPP; 108 ret = -EIO;
98 else if (!bio_flagged(bio, BIO_UPTODATE))
99 ret = -EIO;
100 bio_put(bio);
101 }
102 109
103 return ret; 110 return ret;
104} 111}
105EXPORT_SYMBOL(blkdev_issue_discard); 112EXPORT_SYMBOL(blkdev_issue_discard);
106 113
107struct bio_batch
108{
109 atomic_t done;
110 unsigned long flags;
111 struct completion *wait;
112};
113
114static void bio_batch_end_io(struct bio *bio, int err)
115{
116 struct bio_batch *bb = bio->bi_private;
117
118 if (err) {
119 if (err == -EOPNOTSUPP)
120 set_bit(BIO_EOPNOTSUPP, &bb->flags);
121 else
122 clear_bit(BIO_UPTODATE, &bb->flags);
123 }
124 if (bb)
125 if (atomic_dec_and_test(&bb->done))
126 complete(bb->wait);
127 bio_put(bio);
128}
129
130/** 114/**
131 * blkdev_issue_zeroout - generate number of zero filed write bios 115 * blkdev_issue_zeroout - generate number of zero filed write bios
132 * @bdev: blockdev to issue 116 * @bdev: blockdev to issue
@@ -151,7 +135,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
151 bb.flags = 1 << BIO_UPTODATE; 135 bb.flags = 1 << BIO_UPTODATE;
152 bb.wait = &wait; 136 bb.wait = &wait;
153 137
154submit:
155 ret = 0; 138 ret = 0;
156 while (nr_sects != 0) { 139 while (nr_sects != 0) {
157 bio = bio_alloc(gfp_mask, 140 bio = bio_alloc(gfp_mask,
@@ -168,9 +151,6 @@ submit:
168 151
169 while (nr_sects != 0) { 152 while (nr_sects != 0) {
170 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 153 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
171 if (sz == 0)
172 /* bio has maximum size possible */
173 break;
174 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); 154 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
175 nr_sects -= ret >> 9; 155 nr_sects -= ret >> 9;
176 sector += ret >> 9; 156 sector += ret >> 9;
@@ -190,16 +170,6 @@ submit:
190 /* One of bios in the batch was completed with error.*/ 170 /* One of bios in the batch was completed with error.*/
191 ret = -EIO; 171 ret = -EIO;
192 172
193 if (ret)
194 goto out;
195
196 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
197 ret = -EOPNOTSUPP;
198 goto out;
199 }
200 if (nr_sects != 0)
201 goto submit;
202out:
203 return ret; 173 return ret;
204} 174}
205EXPORT_SYMBOL(blkdev_issue_zeroout); 175EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 1fa769293597..fa1eb0449a05 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -120,7 +120,7 @@ void blk_set_default_limits(struct queue_limits *lim)
120 lim->discard_granularity = 0; 120 lim->discard_granularity = 0;
121 lim->discard_alignment = 0; 121 lim->discard_alignment = 0;
122 lim->discard_misaligned = 0; 122 lim->discard_misaligned = 0;
123 lim->discard_zeroes_data = -1; 123 lim->discard_zeroes_data = 1;
124 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 124 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
125 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 125 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
126 lim->alignment_offset = 0; 126 lim->alignment_offset = 0;
@@ -166,6 +166,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
166 166
167 blk_set_default_limits(&q->limits); 167 blk_set_default_limits(&q->limits);
168 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); 168 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
169 q->limits.discard_zeroes_data = 0;
169 170
170 /* 171 /*
171 * by default assume old behaviour and bounce for any highmem page 172 * by default assume old behaviour and bounce for any highmem page
@@ -790,6 +791,12 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
790} 791}
791EXPORT_SYMBOL_GPL(blk_queue_flush); 792EXPORT_SYMBOL_GPL(blk_queue_flush);
792 793
794void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
795{
796 q->flush_not_queueable = !queueable;
797}
798EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
799
793static int __init blk_settings_init(void) 800static int __init blk_settings_init(void)
794{ 801{
795 blk_max_low_pfn = max_low_pfn - 1; 802 blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index bd236313f35d..d935bd859c87 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -152,7 +152,8 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag
152 152
153static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 153static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
154{ 154{
155 return queue_var_show(q->limits.max_discard_sectors << 9, page); 155 return sprintf(page, "%llu\n",
156 (unsigned long long)q->limits.max_discard_sectors << 9);
156} 157}
157 158
158static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 159static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 252a81a306f7..a62be8d0dc1b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -78,6 +78,8 @@ struct throtl_grp {
78 78
79 /* Some throttle limits got updated for the group */ 79 /* Some throttle limits got updated for the group */
80 int limits_changed; 80 int limits_changed;
81
82 struct rcu_head rcu_head;
81}; 83};
82 84
83struct throtl_data 85struct throtl_data
@@ -88,7 +90,7 @@ struct throtl_data
88 /* service tree for active throtl groups */ 90 /* service tree for active throtl groups */
89 struct throtl_rb_root tg_service_tree; 91 struct throtl_rb_root tg_service_tree;
90 92
91 struct throtl_grp root_tg; 93 struct throtl_grp *root_tg;
92 struct request_queue *queue; 94 struct request_queue *queue;
93 95
94 /* Total Number of queued bios on READ and WRITE lists */ 96 /* Total Number of queued bios on READ and WRITE lists */
@@ -151,56 +153,44 @@ static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
151 return tg; 153 return tg;
152} 154}
153 155
154static void throtl_put_tg(struct throtl_grp *tg) 156static void throtl_free_tg(struct rcu_head *head)
155{ 157{
156 BUG_ON(atomic_read(&tg->ref) <= 0); 158 struct throtl_grp *tg;
157 if (!atomic_dec_and_test(&tg->ref)) 159
158 return; 160 tg = container_of(head, struct throtl_grp, rcu_head);
161 free_percpu(tg->blkg.stats_cpu);
159 kfree(tg); 162 kfree(tg);
160} 163}
161 164
162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 165static void throtl_put_tg(struct throtl_grp *tg)
163 struct blkio_cgroup *blkcg)
164{ 166{
165 struct throtl_grp *tg = NULL; 167 BUG_ON(atomic_read(&tg->ref) <= 0);
166 void *key = td; 168 if (!atomic_dec_and_test(&tg->ref))
167 struct backing_dev_info *bdi = &td->queue->backing_dev_info; 169 return;
168 unsigned int major, minor;
169 170
170 /* 171 /*
171 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix 172 * A group is freed in rcu manner. But having an rcu lock does not
172 * tree of blkg (instead of traversing through hash list all 173 * mean that one can access all the fields of blkg and assume these
173 * the time. 174 * are valid. For example, don't try to follow throtl_data and
175 * request queue links.
176 *
177 * Having a reference to blkg under an rcu allows acess to only
178 * values local to groups like group stats and group rate limits
174 */ 179 */
180 call_rcu(&tg->rcu_head, throtl_free_tg);
181}
175 182
176 /* 183static void throtl_init_group(struct throtl_grp *tg)
177 * This is the common case when there are no blkio cgroups. 184{
178 * Avoid lookup in this case
179 */
180 if (blkcg == &blkio_root_cgroup)
181 tg = &td->root_tg;
182 else
183 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
184
185 /* Fill in device details for root group */
186 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
187 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
188 tg->blkg.dev = MKDEV(major, minor);
189 goto done;
190 }
191
192 if (tg)
193 goto done;
194
195 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
196 if (!tg)
197 goto done;
198
199 INIT_HLIST_NODE(&tg->tg_node); 185 INIT_HLIST_NODE(&tg->tg_node);
200 RB_CLEAR_NODE(&tg->rb_node); 186 RB_CLEAR_NODE(&tg->rb_node);
201 bio_list_init(&tg->bio_lists[0]); 187 bio_list_init(&tg->bio_lists[0]);
202 bio_list_init(&tg->bio_lists[1]); 188 bio_list_init(&tg->bio_lists[1]);
203 td->limits_changed = false; 189 tg->limits_changed = false;
190
191 /* Practically unlimited BW */
192 tg->bps[0] = tg->bps[1] = -1;
193 tg->iops[0] = tg->iops[1] = -1;
204 194
205 /* 195 /*
206 * Take the initial reference that will be released on destroy 196 * Take the initial reference that will be released on destroy
@@ -209,33 +199,181 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
209 * exit or cgroup deletion path depending on who is exiting first. 199 * exit or cgroup deletion path depending on who is exiting first.
210 */ 200 */
211 atomic_set(&tg->ref, 1); 201 atomic_set(&tg->ref, 1);
202}
203
204/* Should be called with rcu read lock held (needed for blkcg) */
205static void
206throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
207{
208 hlist_add_head(&tg->tg_node, &td->tg_list);
209 td->nr_undestroyed_grps++;
210}
211
212static void
213__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
214{
215 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
216 unsigned int major, minor;
217
218 if (!tg || tg->blkg.dev)
219 return;
220
221 /*
222 * Fill in device details for a group which might not have been
223 * filled at group creation time as queue was being instantiated
224 * and driver had not attached a device yet
225 */
226 if (bdi->dev && dev_name(bdi->dev)) {
227 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
228 tg->blkg.dev = MKDEV(major, minor);
229 }
230}
231
232/*
233 * Should be called with without queue lock held. Here queue lock will be
234 * taken rarely. It will be taken only once during life time of a group
235 * if need be
236 */
237static void
238throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
239{
240 if (!tg || tg->blkg.dev)
241 return;
242
243 spin_lock_irq(td->queue->queue_lock);
244 __throtl_tg_fill_dev_details(td, tg);
245 spin_unlock_irq(td->queue->queue_lock);
246}
247
248static void throtl_init_add_tg_lists(struct throtl_data *td,
249 struct throtl_grp *tg, struct blkio_cgroup *blkcg)
250{
251 __throtl_tg_fill_dev_details(td, tg);
212 252
213 /* Add group onto cgroup list */ 253 /* Add group onto cgroup list */
214 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
215 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td, 254 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
216 MKDEV(major, minor), BLKIO_POLICY_THROTL); 255 tg->blkg.dev, BLKIO_POLICY_THROTL);
217 256
218 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev); 257 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
219 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev); 258 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
220 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev); 259 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
221 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev); 260 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
222 261
223 hlist_add_head(&tg->tg_node, &td->tg_list); 262 throtl_add_group_to_td_list(td, tg);
224 td->nr_undestroyed_grps++; 263}
225done: 264
265/* Should be called without queue lock and outside of rcu period */
266static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
267{
268 struct throtl_grp *tg = NULL;
269 int ret;
270
271 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
272 if (!tg)
273 return NULL;
274
275 ret = blkio_alloc_blkg_stats(&tg->blkg);
276
277 if (ret) {
278 kfree(tg);
279 return NULL;
280 }
281
282 throtl_init_group(tg);
226 return tg; 283 return tg;
227} 284}
228 285
229static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 286static struct
287throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
230{ 288{
231 struct throtl_grp *tg = NULL; 289 struct throtl_grp *tg = NULL;
290 void *key = td;
291
292 /*
293 * This is the common case when there are no blkio cgroups.
294 * Avoid lookup in this case
295 */
296 if (blkcg == &blkio_root_cgroup)
297 tg = td->root_tg;
298 else
299 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
300
301 __throtl_tg_fill_dev_details(td, tg);
302 return tg;
303}
304
305/*
306 * This function returns with queue lock unlocked in case of error, like
307 * request queue is no more
308 */
309static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
310{
311 struct throtl_grp *tg = NULL, *__tg = NULL;
232 struct blkio_cgroup *blkcg; 312 struct blkio_cgroup *blkcg;
313 struct request_queue *q = td->queue;
233 314
234 rcu_read_lock(); 315 rcu_read_lock();
235 blkcg = task_blkio_cgroup(current); 316 blkcg = task_blkio_cgroup(current);
236 tg = throtl_find_alloc_tg(td, blkcg); 317 tg = throtl_find_tg(td, blkcg);
237 if (!tg) 318 if (tg) {
238 tg = &td->root_tg; 319 rcu_read_unlock();
320 return tg;
321 }
322
323 /*
324 * Need to allocate a group. Allocation of group also needs allocation
325 * of per cpu stats which in-turn takes a mutex() and can block. Hence
326 * we need to drop rcu lock and queue_lock before we call alloc
327 *
328 * Take the request queue reference to make sure queue does not
329 * go away once we return from allocation.
330 */
331 blk_get_queue(q);
332 rcu_read_unlock();
333 spin_unlock_irq(q->queue_lock);
334
335 tg = throtl_alloc_tg(td);
336 /*
337 * We might have slept in group allocation. Make sure queue is not
338 * dead
339 */
340 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
341 blk_put_queue(q);
342 if (tg)
343 kfree(tg);
344
345 return ERR_PTR(-ENODEV);
346 }
347 blk_put_queue(q);
348
349 /* Group allocated and queue is still alive. take the lock */
350 spin_lock_irq(q->queue_lock);
351
352 /*
353 * Initialize the new group. After sleeping, read the blkcg again.
354 */
355 rcu_read_lock();
356 blkcg = task_blkio_cgroup(current);
357
358 /*
359 * If some other thread already allocated the group while we were
360 * not holding queue lock, free up the group
361 */
362 __tg = throtl_find_tg(td, blkcg);
363
364 if (__tg) {
365 kfree(tg);
366 rcu_read_unlock();
367 return __tg;
368 }
369
370 /* Group allocation failed. Account the IO to root group */
371 if (!tg) {
372 tg = td->root_tg;
373 return tg;
374 }
375
376 throtl_init_add_tg_lists(td, tg, blkcg);
239 rcu_read_unlock(); 377 rcu_read_unlock();
240 return tg; 378 return tg;
241} 379}
@@ -544,6 +682,12 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
544 return 0; 682 return 0;
545} 683}
546 684
685static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
686 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
687 return 1;
688 return 0;
689}
690
547/* 691/*
548 * Returns whether one can dispatch a bio or not. Also returns approx number 692 * Returns whether one can dispatch a bio or not. Also returns approx number
549 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 693 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
@@ -608,10 +752,6 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
608 tg->bytes_disp[rw] += bio->bi_size; 752 tg->bytes_disp[rw] += bio->bi_size;
609 tg->io_disp[rw]++; 753 tg->io_disp[rw]++;
610 754
611 /*
612 * TODO: This will take blkg->stats_lock. Figure out a way
613 * to avoid this cost.
614 */
615 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync); 755 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
616} 756}
617 757
@@ -989,15 +1129,51 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
989 struct throtl_grp *tg; 1129 struct throtl_grp *tg;
990 struct bio *bio = *biop; 1130 struct bio *bio = *biop;
991 bool rw = bio_data_dir(bio), update_disptime = true; 1131 bool rw = bio_data_dir(bio), update_disptime = true;
1132 struct blkio_cgroup *blkcg;
992 1133
993 if (bio->bi_rw & REQ_THROTTLED) { 1134 if (bio->bi_rw & REQ_THROTTLED) {
994 bio->bi_rw &= ~REQ_THROTTLED; 1135 bio->bi_rw &= ~REQ_THROTTLED;
995 return 0; 1136 return 0;
996 } 1137 }
997 1138
1139 /*
1140 * A throtl_grp pointer retrieved under rcu can be used to access
1141 * basic fields like stats and io rates. If a group has no rules,
1142 * just update the dispatch stats in lockless manner and return.
1143 */
1144
1145 rcu_read_lock();
1146 blkcg = task_blkio_cgroup(current);
1147 tg = throtl_find_tg(td, blkcg);
1148 if (tg) {
1149 throtl_tg_fill_dev_details(td, tg);
1150
1151 if (tg_no_rule_group(tg, rw)) {
1152 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1153 rw, bio->bi_rw & REQ_SYNC);
1154 rcu_read_unlock();
1155 return 0;
1156 }
1157 }
1158 rcu_read_unlock();
1159
1160 /*
1161 * Either group has not been allocated yet or it is not an unlimited
1162 * IO group
1163 */
1164
998 spin_lock_irq(q->queue_lock); 1165 spin_lock_irq(q->queue_lock);
999 tg = throtl_get_tg(td); 1166 tg = throtl_get_tg(td);
1000 1167
1168 if (IS_ERR(tg)) {
1169 if (PTR_ERR(tg) == -ENODEV) {
1170 /*
1171 * Queue is gone. No queue lock held here.
1172 */
1173 return -ENODEV;
1174 }
1175 }
1176
1001 if (tg->nr_queued[rw]) { 1177 if (tg->nr_queued[rw]) {
1002 /* 1178 /*
1003 * There is already another bio queued in same dir. No 1179 * There is already another bio queued in same dir. No
@@ -1060,39 +1236,24 @@ int blk_throtl_init(struct request_queue *q)
1060 INIT_HLIST_HEAD(&td->tg_list); 1236 INIT_HLIST_HEAD(&td->tg_list);
1061 td->tg_service_tree = THROTL_RB_ROOT; 1237 td->tg_service_tree = THROTL_RB_ROOT;
1062 td->limits_changed = false; 1238 td->limits_changed = false;
1239 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1063 1240
1064 /* Init root group */ 1241 /* alloc and Init root group. */
1065 tg = &td->root_tg; 1242 td->queue = q;
1066 INIT_HLIST_NODE(&tg->tg_node); 1243 tg = throtl_alloc_tg(td);
1067 RB_CLEAR_NODE(&tg->rb_node);
1068 bio_list_init(&tg->bio_lists[0]);
1069 bio_list_init(&tg->bio_lists[1]);
1070
1071 /* Practically unlimited BW */
1072 tg->bps[0] = tg->bps[1] = -1;
1073 tg->iops[0] = tg->iops[1] = -1;
1074 td->limits_changed = false;
1075 1244
1076 /* 1245 if (!tg) {
1077 * Set root group reference to 2. One reference will be dropped when 1246 kfree(td);
1078 * all groups on tg_list are being deleted during queue exit. Other 1247 return -ENOMEM;
1079 * reference will remain there as we don't want to delete this group 1248 }
1080 * as it is statically allocated and gets destroyed when throtl_data
1081 * goes away.
1082 */
1083 atomic_set(&tg->ref, 2);
1084 hlist_add_head(&tg->tg_node, &td->tg_list);
1085 td->nr_undestroyed_grps++;
1086 1249
1087 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); 1250 td->root_tg = tg;
1088 1251
1089 rcu_read_lock(); 1252 rcu_read_lock();
1090 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td, 1253 throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
1091 0, BLKIO_POLICY_THROTL);
1092 rcu_read_unlock(); 1254 rcu_read_unlock();
1093 1255
1094 /* Attach throtl data to request queue */ 1256 /* Attach throtl data to request queue */
1095 td->queue = q;
1096 q->td = td; 1257 q->td = td;
1097 return 0; 1258 return 0;
1098} 1259}
diff --git a/block/blk.h b/block/blk.h
index 61263463e38e..d6586287adc9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -62,7 +62,28 @@ static inline struct request *__elv_next_request(struct request_queue *q)
62 return rq; 62 return rq;
63 } 63 }
64 64
65 if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) 65 /*
66 * Flush request is running and flush request isn't queueable
67 * in the drive, we can hold the queue till flush request is
68 * finished. Even we don't do this, driver can't dispatch next
69 * requests and will requeue them. And this can improve
70 * throughput too. For example, we have request flush1, write1,
71 * flush 2. flush1 is dispatched, then queue is hold, write1
72 * isn't inserted to queue. After flush1 is finished, flush2
73 * will be dispatched. Since disk cache is already clean,
74 * flush2 will be finished very soon, so looks like flush2 is
75 * folded to flush1.
76 * Since the queue is hold, a flag is set to indicate the queue
77 * should be restarted later. Please see flush_end_io() for
78 * details.
79 */
80 if (q->flush_pending_idx != q->flush_running_idx &&
81 !queue_flush_queueable(q)) {
82 q->flush_queue_delayed = 1;
83 return NULL;
84 }
85 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
86 !q->elevator->ops->elevator_dispatch_fn(q, 0))
66 return NULL; 87 return NULL;
67 } 88 }
68} 89}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ab7a9e6a9b1c..7c52d6888924 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -300,7 +300,9 @@ struct cfq_data {
300 300
301 /* List of cfq groups being managed on this device*/ 301 /* List of cfq groups being managed on this device*/
302 struct hlist_head cfqg_list; 302 struct hlist_head cfqg_list;
303 struct rcu_head rcu; 303
304 /* Number of groups which are on blkcg->blkg_list */
305 unsigned int nr_blkcg_linked_grps;
304}; 306};
305 307
306static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); 308static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -665,15 +667,11 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
665 if (rq2 == NULL) 667 if (rq2 == NULL)
666 return rq1; 668 return rq1;
667 669
668 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 670 if (rq_is_sync(rq1) != rq_is_sync(rq2))
669 return rq1; 671 return rq_is_sync(rq1) ? rq1 : rq2;
670 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 672
671 return rq2; 673 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
672 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) 674 return rq1->cmd_flags & REQ_META ? rq1 : rq2;
673 return rq1;
674 else if ((rq2->cmd_flags & REQ_META) &&
675 !(rq1->cmd_flags & REQ_META))
676 return rq2;
677 675
678 s1 = blk_rq_pos(rq1); 676 s1 = blk_rq_pos(rq1);
679 s2 = blk_rq_pos(rq2); 677 s2 = blk_rq_pos(rq2);
@@ -1014,28 +1012,47 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1014 cfqg->needs_update = true; 1012 cfqg->needs_update = true;
1015} 1013}
1016 1014
1017static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, 1015static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1018 struct blkio_cgroup *blkcg, int create) 1016 struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
1019{ 1017{
1020 struct cfq_group *cfqg = NULL;
1021 void *key = cfqd;
1022 int i, j;
1023 struct cfq_rb_root *st;
1024 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; 1018 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1025 unsigned int major, minor; 1019 unsigned int major, minor;
1026 1020
1027 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); 1021 /*
1028 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { 1022 * Add group onto cgroup list. It might happen that bdi->dev is
1023 * not initialized yet. Initialize this new group without major
1024 * and minor info and this info will be filled in once a new thread
1025 * comes for IO.
1026 */
1027 if (bdi->dev) {
1029 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1028 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1030 cfqg->blkg.dev = MKDEV(major, minor); 1029 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1031 goto done; 1030 (void *)cfqd, MKDEV(major, minor));
1032 } 1031 } else
1033 if (cfqg || !create) 1032 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1034 goto done; 1033 (void *)cfqd, 0);
1034
1035 cfqd->nr_blkcg_linked_grps++;
1036 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1037
1038 /* Add group on cfqd list */
1039 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1040}
1041
1042/*
1043 * Should be called from sleepable context. No request queue lock as per
1044 * cpu stats are allocated dynamically and alloc_percpu needs to be called
1045 * from sleepable context.
1046 */
1047static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1048{
1049 struct cfq_group *cfqg = NULL;
1050 int i, j, ret;
1051 struct cfq_rb_root *st;
1035 1052
1036 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); 1053 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1037 if (!cfqg) 1054 if (!cfqg)
1038 goto done; 1055 return NULL;
1039 1056
1040 for_each_cfqg_st(cfqg, i, j, st) 1057 for_each_cfqg_st(cfqg, i, j, st)
1041 *st = CFQ_RB_ROOT; 1058 *st = CFQ_RB_ROOT;
@@ -1049,43 +1066,94 @@ static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
1049 */ 1066 */
1050 cfqg->ref = 1; 1067 cfqg->ref = 1;
1051 1068
1069 ret = blkio_alloc_blkg_stats(&cfqg->blkg);
1070 if (ret) {
1071 kfree(cfqg);
1072 return NULL;
1073 }
1074
1075 return cfqg;
1076}
1077
1078static struct cfq_group *
1079cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1080{
1081 struct cfq_group *cfqg = NULL;
1082 void *key = cfqd;
1083 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1084 unsigned int major, minor;
1085
1052 /* 1086 /*
1053 * Add group onto cgroup list. It might happen that bdi->dev is 1087 * This is the common case when there are no blkio cgroups.
1054 * not initialized yet. Initialize this new group without major 1088 * Avoid lookup in this case
1055 * and minor info and this info will be filled in once a new thread
1056 * comes for IO. See code above.
1057 */ 1089 */
1058 if (bdi->dev) { 1090 if (blkcg == &blkio_root_cgroup)
1059 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1091 cfqg = &cfqd->root_group;
1060 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 1092 else
1061 MKDEV(major, minor)); 1093 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1062 } else
1063 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1064 0);
1065
1066 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1067 1094
1068 /* Add group on cfqd list */ 1095 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1069 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); 1096 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1097 cfqg->blkg.dev = MKDEV(major, minor);
1098 }
1070 1099
1071done:
1072 return cfqg; 1100 return cfqg;
1073} 1101}
1074 1102
1075/* 1103/*
1076 * Search for the cfq group current task belongs to. If create = 1, then also 1104 * Search for the cfq group current task belongs to. request_queue lock must
1077 * create the cfq group if it does not exist. request_queue lock must be held. 1105 * be held.
1078 */ 1106 */
1079static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1107static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1080{ 1108{
1081 struct blkio_cgroup *blkcg; 1109 struct blkio_cgroup *blkcg;
1082 struct cfq_group *cfqg = NULL; 1110 struct cfq_group *cfqg = NULL, *__cfqg = NULL;
1111 struct request_queue *q = cfqd->queue;
1083 1112
1084 rcu_read_lock(); 1113 rcu_read_lock();
1085 blkcg = task_blkio_cgroup(current); 1114 blkcg = task_blkio_cgroup(current);
1086 cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create); 1115 cfqg = cfq_find_cfqg(cfqd, blkcg);
1087 if (!cfqg && create) 1116 if (cfqg) {
1117 rcu_read_unlock();
1118 return cfqg;
1119 }
1120
1121 /*
1122 * Need to allocate a group. Allocation of group also needs allocation
1123 * of per cpu stats which in-turn takes a mutex() and can block. Hence
1124 * we need to drop rcu lock and queue_lock before we call alloc.
1125 *
1126 * Not taking any queue reference here and assuming that queue is
1127 * around by the time we return. CFQ queue allocation code does
1128 * the same. It might be racy though.
1129 */
1130
1131 rcu_read_unlock();
1132 spin_unlock_irq(q->queue_lock);
1133
1134 cfqg = cfq_alloc_cfqg(cfqd);
1135
1136 spin_lock_irq(q->queue_lock);
1137
1138 rcu_read_lock();
1139 blkcg = task_blkio_cgroup(current);
1140
1141 /*
1142 * If some other thread already allocated the group while we were
1143 * not holding queue lock, free up the group
1144 */
1145 __cfqg = cfq_find_cfqg(cfqd, blkcg);
1146
1147 if (__cfqg) {
1148 kfree(cfqg);
1149 rcu_read_unlock();
1150 return __cfqg;
1151 }
1152
1153 if (!cfqg)
1088 cfqg = &cfqd->root_group; 1154 cfqg = &cfqd->root_group;
1155
1156 cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
1089 rcu_read_unlock(); 1157 rcu_read_unlock();
1090 return cfqg; 1158 return cfqg;
1091} 1159}
@@ -1118,6 +1186,7 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
1118 return; 1186 return;
1119 for_each_cfqg_st(cfqg, i, j, st) 1187 for_each_cfqg_st(cfqg, i, j, st)
1120 BUG_ON(!RB_EMPTY_ROOT(&st->rb)); 1188 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1189 free_percpu(cfqg->blkg.stats_cpu);
1121 kfree(cfqg); 1190 kfree(cfqg);
1122} 1191}
1123 1192
@@ -1176,7 +1245,7 @@ void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1176} 1245}
1177 1246
1178#else /* GROUP_IOSCHED */ 1247#else /* GROUP_IOSCHED */
1179static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1248static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1180{ 1249{
1181 return &cfqd->root_group; 1250 return &cfqd->root_group;
1182} 1251}
@@ -1210,7 +1279,6 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1210 struct cfq_rb_root *service_tree; 1279 struct cfq_rb_root *service_tree;
1211 int left; 1280 int left;
1212 int new_cfqq = 1; 1281 int new_cfqq = 1;
1213 int group_changed = 0;
1214 1282
1215 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), 1283 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1216 cfqq_type(cfqq)); 1284 cfqq_type(cfqq));
@@ -1281,7 +1349,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1281 rb_link_node(&cfqq->rb_node, parent, p); 1349 rb_link_node(&cfqq->rb_node, parent, p);
1282 rb_insert_color(&cfqq->rb_node, &service_tree->rb); 1350 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1283 service_tree->count++; 1351 service_tree->count++;
1284 if ((add_front || !new_cfqq) && !group_changed) 1352 if (add_front || !new_cfqq)
1285 return; 1353 return;
1286 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); 1354 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1287} 1355}
@@ -2029,7 +2097,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2029 2097
2030 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 2098 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2031 2099
2032 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 2100 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2033} 2101}
2034 2102
2035/* 2103/*
@@ -2911,7 +2979,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2911 struct cfq_group *cfqg; 2979 struct cfq_group *cfqg;
2912 2980
2913retry: 2981retry:
2914 cfqg = cfq_get_cfqg(cfqd, 1); 2982 cfqg = cfq_get_cfqg(cfqd);
2915 cic = cfq_cic_lookup(cfqd, ioc); 2983 cic = cfq_cic_lookup(cfqd, ioc);
2916 /* cic always exists here */ 2984 /* cic always exists here */
2917 cfqq = cic_to_cfqq(cic, is_sync); 2985 cfqq = cic_to_cfqq(cic, is_sync);
@@ -3815,15 +3883,11 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
3815 cfq_put_queue(cfqd->async_idle_cfqq); 3883 cfq_put_queue(cfqd->async_idle_cfqq);
3816} 3884}
3817 3885
3818static void cfq_cfqd_free(struct rcu_head *head)
3819{
3820 kfree(container_of(head, struct cfq_data, rcu));
3821}
3822
3823static void cfq_exit_queue(struct elevator_queue *e) 3886static void cfq_exit_queue(struct elevator_queue *e)
3824{ 3887{
3825 struct cfq_data *cfqd = e->elevator_data; 3888 struct cfq_data *cfqd = e->elevator_data;
3826 struct request_queue *q = cfqd->queue; 3889 struct request_queue *q = cfqd->queue;
3890 bool wait = false;
3827 3891
3828 cfq_shutdown_timer_wq(cfqd); 3892 cfq_shutdown_timer_wq(cfqd);
3829 3893
@@ -3842,7 +3906,13 @@ static void cfq_exit_queue(struct elevator_queue *e)
3842 3906
3843 cfq_put_async_queues(cfqd); 3907 cfq_put_async_queues(cfqd);
3844 cfq_release_cfq_groups(cfqd); 3908 cfq_release_cfq_groups(cfqd);
3845 cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg); 3909
3910 /*
3911 * If there are groups which we could not unlink from blkcg list,
3912 * wait for a rcu period for them to be freed.
3913 */
3914 if (cfqd->nr_blkcg_linked_grps)
3915 wait = true;
3846 3916
3847 spin_unlock_irq(q->queue_lock); 3917 spin_unlock_irq(q->queue_lock);
3848 3918
@@ -3852,8 +3922,25 @@ static void cfq_exit_queue(struct elevator_queue *e)
3852 ida_remove(&cic_index_ida, cfqd->cic_index); 3922 ida_remove(&cic_index_ida, cfqd->cic_index);
3853 spin_unlock(&cic_index_lock); 3923 spin_unlock(&cic_index_lock);
3854 3924
3855 /* Wait for cfqg->blkg->key accessors to exit their grace periods. */ 3925 /*
3856 call_rcu(&cfqd->rcu, cfq_cfqd_free); 3926 * Wait for cfqg->blkg->key accessors to exit their grace periods.
3927 * Do this wait only if there are other unlinked groups out
3928 * there. This can happen if cgroup deletion path claimed the
3929 * responsibility of cleaning up a group before queue cleanup code
3930 * get to the group.
3931 *
3932 * Do not call synchronize_rcu() unconditionally as there are drivers
3933 * which create/delete request queue hundreds of times during scan/boot
3934 * and synchronize_rcu() can take significant time and slow down boot.
3935 */
3936 if (wait)
3937 synchronize_rcu();
3938
3939#ifdef CONFIG_CFQ_GROUP_IOSCHED
3940 /* Free up per cpu stats for root group */
3941 free_percpu(cfqd->root_group.blkg.stats_cpu);
3942#endif
3943 kfree(cfqd);
3857} 3944}
3858 3945
3859static int cfq_alloc_cic_index(void) 3946static int cfq_alloc_cic_index(void)
@@ -3886,8 +3973,12 @@ static void *cfq_init_queue(struct request_queue *q)
3886 return NULL; 3973 return NULL;
3887 3974
3888 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 3975 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3889 if (!cfqd) 3976 if (!cfqd) {
3977 spin_lock(&cic_index_lock);
3978 ida_remove(&cic_index_ida, i);
3979 spin_unlock(&cic_index_lock);
3890 return NULL; 3980 return NULL;
3981 }
3891 3982
3892 /* 3983 /*
3893 * Don't need take queue_lock in the routine, since we are 3984 * Don't need take queue_lock in the routine, since we are
@@ -3909,14 +4000,29 @@ static void *cfq_init_queue(struct request_queue *q)
3909 4000
3910#ifdef CONFIG_CFQ_GROUP_IOSCHED 4001#ifdef CONFIG_CFQ_GROUP_IOSCHED
3911 /* 4002 /*
3912 * Take a reference to root group which we never drop. This is just 4003 * Set root group reference to 2. One reference will be dropped when
3913 * to make sure that cfq_put_cfqg() does not try to kfree root group 4004 * all groups on cfqd->cfqg_list are being deleted during queue exit.
4005 * Other reference will remain there as we don't want to delete this
4006 * group as it is statically allocated and gets destroyed when
4007 * throtl_data goes away.
3914 */ 4008 */
3915 cfqg->ref = 1; 4009 cfqg->ref = 2;
4010
4011 if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
4012 kfree(cfqg);
4013 kfree(cfqd);
4014 return NULL;
4015 }
4016
3916 rcu_read_lock(); 4017 rcu_read_lock();
4018
3917 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, 4019 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3918 (void *)cfqd, 0); 4020 (void *)cfqd, 0);
3919 rcu_read_unlock(); 4021 rcu_read_unlock();
4022 cfqd->nr_blkcg_linked_grps++;
4023
4024 /* Add group on cfqd->cfqg_list */
4025 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
3920#endif 4026#endif
3921 /* 4027 /*
3922 * Not strictly needed (since RB_ROOT just clears the node and we 4028 * Not strictly needed (since RB_ROOT just clears the node and we
diff --git a/block/elevator.c b/block/elevator.c
index 45ca1e34f582..b0b38ce0dcb6 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -155,13 +155,8 @@ static struct elevator_type *elevator_get(const char *name)
155 155
156 e = elevator_find(name); 156 e = elevator_find(name);
157 if (!e) { 157 if (!e) {
158 char elv[ELV_NAME_MAX + strlen("-iosched")];
159
160 spin_unlock(&elv_list_lock); 158 spin_unlock(&elv_list_lock);
161 159 request_module("%s-iosched", name);
162 snprintf(elv, sizeof(elv), "%s-iosched", name);
163
164 request_module("%s", elv);
165 spin_lock(&elv_list_lock); 160 spin_lock(&elv_list_lock);
166 e = elevator_find(name); 161 e = elevator_find(name);
167 } 162 }
@@ -421,8 +416,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
421 struct list_head *entry; 416 struct list_head *entry;
422 int stop_flags; 417 int stop_flags;
423 418
424 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
425
426 if (q->last_merge == rq) 419 if (q->last_merge == rq)
427 q->last_merge = NULL; 420 q->last_merge = NULL;
428 421
@@ -661,8 +654,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
661 654
662 rq->q = q; 655 rq->q = q;
663 656
664 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
665
666 if (rq->cmd_flags & REQ_SOFTBARRIER) { 657 if (rq->cmd_flags & REQ_SOFTBARRIER) {
667 /* barriers are scheduling boundary, update end_sector */ 658 /* barriers are scheduling boundary, update end_sector */
668 if (rq->cmd_type == REQ_TYPE_FS || 659 if (rq->cmd_type == REQ_TYPE_FS ||
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 61631edfecc2..3bb154d8c8cc 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -54,6 +54,8 @@ source "drivers/spi/Kconfig"
54 54
55source "drivers/pps/Kconfig" 55source "drivers/pps/Kconfig"
56 56
57source "drivers/ptp/Kconfig"
58
57source "drivers/gpio/Kconfig" 59source "drivers/gpio/Kconfig"
58 60
59source "drivers/w1/Kconfig" 61source "drivers/w1/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 145aeadb6c03..6b17f5864340 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_I2O) += message/
75obj-$(CONFIG_RTC_LIB) += rtc/ 75obj-$(CONFIG_RTC_LIB) += rtc/
76obj-y += i2c/ media/ 76obj-y += i2c/ media/
77obj-$(CONFIG_PPS) += pps/ 77obj-$(CONFIG_PPS) += pps/
78obj-$(CONFIG_PTP_1588_CLOCK) += ptp/
78obj-$(CONFIG_W1) += w1/ 79obj-$(CONFIG_W1) += w1/
79obj-$(CONFIG_POWER_SUPPLY) += power/ 80obj-$(CONFIG_POWER_SUPPLY) += power/
80obj-$(CONFIG_HWMON) += hwmon/ 81obj-$(CONFIG_HWMON) += hwmon/
@@ -94,7 +95,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
94obj-$(CONFIG_DMA_ENGINE) += dma/ 95obj-$(CONFIG_DMA_ENGINE) += dma/
95obj-$(CONFIG_MMC) += mmc/ 96obj-$(CONFIG_MMC) += mmc/
96obj-$(CONFIG_MEMSTICK) += memstick/ 97obj-$(CONFIG_MEMSTICK) += memstick/
97obj-$(CONFIG_NEW_LEDS) += leds/ 98obj-y += leds/
98obj-$(CONFIG_INFINIBAND) += infiniband/ 99obj-$(CONFIG_INFINIBAND) += infiniband/
99obj-$(CONFIG_SGI_SN) += sn/ 100obj-$(CONFIG_SGI_SN) += sn/
100obj-y += firmware/ 101obj-y += firmware/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 3a17ca5fff6f..bc2218db5ba9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -73,17 +73,6 @@ config ACPI_PROCFS_POWER
73 73
74 Say N to delete power /proc/acpi/ directories that have moved to /sys/ 74 Say N to delete power /proc/acpi/ directories that have moved to /sys/
75 75
76config ACPI_POWER_METER
77 tristate "ACPI 4.0 power meter"
78 depends on HWMON
79 help
80 This driver exposes ACPI 4.0 power meters as hardware monitoring
81 devices. Say Y (or M) if you have a computer with ACPI 4.0 firmware
82 and a power meter.
83
84 To compile this driver as a module, choose M here:
85 the module will be called power-meter.
86
87config ACPI_EC_DEBUGFS 76config ACPI_EC_DEBUGFS
88 tristate "EC read/write access through /sys/kernel/debug/ec" 77 tristate "EC read/write access through /sys/kernel/debug/ec"
89 default n 78 default n
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index d113fa5100b2..b66fbb2fc85f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -59,7 +59,6 @@ obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
59obj-$(CONFIG_ACPI_BATTERY) += battery.o 59obj-$(CONFIG_ACPI_BATTERY) += battery.o
60obj-$(CONFIG_ACPI_SBS) += sbshc.o 60obj-$(CONFIG_ACPI_SBS) += sbshc.o
61obj-$(CONFIG_ACPI_SBS) += sbs.o 61obj-$(CONFIG_ACPI_SBS) += sbs.o
62obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
63obj-$(CONFIG_ACPI_HED) += hed.o 62obj-$(CONFIG_ACPI_HED) += hed.o
64obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o 63obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
65 64
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 096aebfe7f32..f74b2ea11f21 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -101,6 +101,14 @@ static DEFINE_MUTEX(einj_mutex);
101 101
102static struct einj_parameter *einj_param; 102static struct einj_parameter *einj_param;
103 103
104#ifndef writeq
105static inline void writeq(__u64 val, volatile void __iomem *addr)
106{
107 writel(val, addr);
108 writel(val >> 32, addr+4);
109}
110#endif
111
104static void einj_exec_ctx_init(struct apei_exec_context *ctx) 112static void einj_exec_ctx_init(struct apei_exec_context *ctx)
105{ 113{
106 apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), 114 apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 542e53903891..7489b89c300f 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -280,9 +280,11 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
280 case 32: 280 case 32:
281 *val = readl(addr); 281 *val = readl(addr);
282 break; 282 break;
283#ifdef readq
283 case 64: 284 case 64:
284 *val = readq(addr); 285 *val = readq(addr);
285 break; 286 break;
287#endif
286 default: 288 default:
287 return -EINVAL; 289 return -EINVAL;
288 } 290 }
@@ -307,9 +309,11 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
307 case 32: 309 case 32:
308 writel(val, addr); 310 writel(val, addr);
309 break; 311 break;
312#ifdef writeq
310 case 64: 313 case 64:
311 writeq(val, addr); 314 writeq(val, addr);
312 break; 315 break;
316#endif
313 default: 317 default:
314 return -EINVAL; 318 return -EINVAL;
315 } 319 }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 30ea95f43e79..d51f9795c064 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1089,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq)
1089static int ata_scsi_dev_config(struct scsi_device *sdev, 1089static int ata_scsi_dev_config(struct scsi_device *sdev,
1090 struct ata_device *dev) 1090 struct ata_device *dev)
1091{ 1091{
1092 struct request_queue *q = sdev->request_queue;
1093
1092 if (!ata_id_has_unload(dev->id)) 1094 if (!ata_id_has_unload(dev->id))
1093 dev->flags |= ATA_DFLAG_NO_UNLOAD; 1095 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1094 1096
1095 /* configure max sectors */ 1097 /* configure max sectors */
1096 blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors); 1098 blk_queue_max_hw_sectors(q, dev->max_sectors);
1097 1099
1098 if (dev->class == ATA_DEV_ATAPI) { 1100 if (dev->class == ATA_DEV_ATAPI) {
1099 struct request_queue *q = sdev->request_queue;
1100 void *buf; 1101 void *buf;
1101 1102
1102 sdev->sector_size = ATA_SECT_SIZE; 1103 sdev->sector_size = ATA_SECT_SIZE;
1103 1104
1104 /* set DMA padding */ 1105 /* set DMA padding */
1105 blk_queue_update_dma_pad(sdev->request_queue, 1106 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
1106 ATA_DMA_PAD_SZ - 1);
1107 1107
1108 /* configure draining */ 1108 /* configure draining */
1109 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); 1109 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
@@ -1131,8 +1131,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1131 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", 1131 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1132 sdev->sector_size); 1132 sdev->sector_size);
1133 1133
1134 blk_queue_update_dma_alignment(sdev->request_queue, 1134 blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
1135 sdev->sector_size - 1);
1136 1135
1137 if (dev->flags & ATA_DFLAG_AN) 1136 if (dev->flags & ATA_DFLAG_AN)
1138 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 1137 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -1145,6 +1144,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1145 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 1144 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1146 } 1145 }
1147 1146
1147 blk_queue_flush_queueable(q, false);
1148
1148 dev->sdev = sdev; 1149 dev->sdev = sdev;
1149 return 0; 1150 return 0;
1150} 1151}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index b3b72d64e805..793f796c4da3 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/memory.h> 9#include <linux/memory.h>
10#include <linux/vmstat.h>
10#include <linux/node.h> 11#include <linux/node.h>
11#include <linux/hugetlb.h> 12#include <linux/hugetlb.h>
12#include <linux/compaction.h> 13#include <linux/compaction.h>
@@ -179,11 +180,14 @@ static ssize_t node_read_vmstat(struct sys_device *dev,
179 struct sysdev_attribute *attr, char *buf) 180 struct sysdev_attribute *attr, char *buf)
180{ 181{
181 int nid = dev->id; 182 int nid = dev->id;
182 return sprintf(buf, 183 int i;
183 "nr_written %lu\n" 184 int n = 0;
184 "nr_dirtied %lu\n", 185
185 node_page_state(nid, NR_WRITTEN), 186 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
186 node_page_state(nid, NR_DIRTIED)); 187 n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
188 node_page_state(nid, i));
189
190 return n;
187} 191}
188static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); 192static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
189 193
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 99dd36e8500b..471a04013fe0 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include "bcma_private.h" 8#include "bcma_private.h"
9#include <linux/slab.h>
9#include <linux/bcma/bcma.h> 10#include <linux/bcma/bcma.h>
10#include <linux/pci.h> 11#include <linux/pci.h>
11 12
@@ -171,6 +172,7 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
171} 172}
172 173
173static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { 174static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
175 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
174 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 176 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
175 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 177 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
176 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 178 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 83c32cb72582..717d6e4e18d3 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -470,6 +470,27 @@ config XEN_BLKDEV_FRONTEND
470 block device driver. It communicates with a back-end driver 470 block device driver. It communicates with a back-end driver
471 in another domain which drives the actual block device. 471 in another domain which drives the actual block device.
472 472
473config XEN_BLKDEV_BACKEND
474 tristate "Block-device backend driver"
475 depends on XEN_BACKEND
476 help
477 The block-device backend driver allows the kernel to export its
478 block devices to other guests via a high-performance shared-memory
479 interface.
480
481 The corresponding Linux frontend driver is enabled by the
482 CONFIG_XEN_BLKDEV_FRONTEND configuration option.
483
484 The backend driver attaches itself to a any block device specified
485 in the XenBus configuration. There are no limits to what the block
486 device as long as it has a major and minor.
487
488 If you are compiling a kernel to run in a Xen block backend driver
489 domain (often this is domain 0) you should say Y here. To
490 compile this driver as a module, chose M here: the module
491 will be called xen-blkback.
492
493
473config VIRTIO_BLK 494config VIRTIO_BLK
474 tristate "Virtio block driver (EXPERIMENTAL)" 495 tristate "Virtio block driver (EXPERIMENTAL)"
475 depends on EXPERIMENTAL && VIRTIO 496 depends on EXPERIMENTAL && VIRTIO
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 40528ba56d1b..76646e9a1c91 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_BLK_DEV_UB) += ub.o
36obj-$(CONFIG_BLK_DEV_HD) += hd.o 36obj-$(CONFIG_BLK_DEV_HD) += hd.o
37 37
38obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o 38obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
39obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
39obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ 40obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
40obj-$(CONFIG_BLK_DEV_RBD) += rbd.o 41obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
41 42
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9bf13988f1a2..8f4ef656a1af 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -64,6 +64,10 @@ MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
64MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 64MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
65MODULE_VERSION("3.6.26"); 65MODULE_VERSION("3.6.26");
66MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
67static int cciss_tape_cmds = 6;
68module_param(cciss_tape_cmds, int, 0644);
69MODULE_PARM_DESC(cciss_tape_cmds,
70 "number of commands to allocate for tape devices (default: 6)");
67 71
68static DEFINE_MUTEX(cciss_mutex); 72static DEFINE_MUTEX(cciss_mutex);
69static struct proc_dir_entry *proc_cciss; 73static struct proc_dir_entry *proc_cciss;
@@ -194,6 +198,8 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
194static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, 198static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
195 unsigned long *memory_bar); 199 unsigned long *memory_bar);
196static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); 200static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
201static __devinit int write_driver_ver_to_cfgtable(
202 CfgTable_struct __iomem *cfgtable);
197 203
198/* performant mode helper functions */ 204/* performant mode helper functions */
199static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, 205static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
@@ -556,7 +562,7 @@ static void __devinit cciss_procinit(ctlr_info_t *h)
556#define to_hba(n) container_of(n, struct ctlr_info, dev) 562#define to_hba(n) container_of(n, struct ctlr_info, dev)
557#define to_drv(n) container_of(n, drive_info_struct, dev) 563#define to_drv(n) container_of(n, drive_info_struct, dev)
558 564
559/* List of controllers which cannot be reset on kexec with reset_devices */ 565/* List of controllers which cannot be hard reset on kexec with reset_devices */
560static u32 unresettable_controller[] = { 566static u32 unresettable_controller[] = {
561 0x324a103C, /* Smart Array P712m */ 567 0x324a103C, /* Smart Array P712m */
562 0x324b103C, /* SmartArray P711m */ 568 0x324b103C, /* SmartArray P711m */
@@ -574,23 +580,45 @@ static u32 unresettable_controller[] = {
574 0x409D0E11, /* Smart Array 6400 EM */ 580 0x409D0E11, /* Smart Array 6400 EM */
575}; 581};
576 582
577static int ctlr_is_resettable(struct ctlr_info *h) 583/* List of controllers which cannot even be soft reset */
584static u32 soft_unresettable_controller[] = {
585 0x409C0E11, /* Smart Array 6400 */
586 0x409D0E11, /* Smart Array 6400 EM */
587};
588
589static int ctlr_is_hard_resettable(u32 board_id)
578{ 590{
579 int i; 591 int i;
580 592
581 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 593 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
582 if (unresettable_controller[i] == h->board_id) 594 if (unresettable_controller[i] == board_id)
583 return 0; 595 return 0;
584 return 1; 596 return 1;
585} 597}
586 598
599static int ctlr_is_soft_resettable(u32 board_id)
600{
601 int i;
602
603 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
604 if (soft_unresettable_controller[i] == board_id)
605 return 0;
606 return 1;
607}
608
609static int ctlr_is_resettable(u32 board_id)
610{
611 return ctlr_is_hard_resettable(board_id) ||
612 ctlr_is_soft_resettable(board_id);
613}
614
587static ssize_t host_show_resettable(struct device *dev, 615static ssize_t host_show_resettable(struct device *dev,
588 struct device_attribute *attr, 616 struct device_attribute *attr,
589 char *buf) 617 char *buf)
590{ 618{
591 struct ctlr_info *h = to_hba(dev); 619 struct ctlr_info *h = to_hba(dev);
592 620
593 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); 621 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
594} 622}
595static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); 623static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
596 624
@@ -2567,7 +2595,7 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
2567 } 2595 }
2568 } else if (cmd_type == TYPE_MSG) { 2596 } else if (cmd_type == TYPE_MSG) {
2569 switch (cmd) { 2597 switch (cmd) {
2570 case 0: /* ABORT message */ 2598 case CCISS_ABORT_MSG:
2571 c->Request.CDBLen = 12; 2599 c->Request.CDBLen = 12;
2572 c->Request.Type.Attribute = ATTR_SIMPLE; 2600 c->Request.Type.Attribute = ATTR_SIMPLE;
2573 c->Request.Type.Direction = XFER_WRITE; 2601 c->Request.Type.Direction = XFER_WRITE;
@@ -2577,16 +2605,16 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
2577 /* buff contains the tag of the command to abort */ 2605 /* buff contains the tag of the command to abort */
2578 memcpy(&c->Request.CDB[4], buff, 8); 2606 memcpy(&c->Request.CDB[4], buff, 8);
2579 break; 2607 break;
2580 case 1: /* RESET message */ 2608 case CCISS_RESET_MSG:
2581 c->Request.CDBLen = 16; 2609 c->Request.CDBLen = 16;
2582 c->Request.Type.Attribute = ATTR_SIMPLE; 2610 c->Request.Type.Attribute = ATTR_SIMPLE;
2583 c->Request.Type.Direction = XFER_NONE; 2611 c->Request.Type.Direction = XFER_NONE;
2584 c->Request.Timeout = 0; 2612 c->Request.Timeout = 0;
2585 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2613 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2586 c->Request.CDB[0] = cmd; /* reset */ 2614 c->Request.CDB[0] = cmd; /* reset */
2587 c->Request.CDB[1] = 0x03; /* reset a target */ 2615 c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET;
2588 break; 2616 break;
2589 case 3: /* No-Op message */ 2617 case CCISS_NOOP_MSG:
2590 c->Request.CDBLen = 1; 2618 c->Request.CDBLen = 1;
2591 c->Request.Type.Attribute = ATTR_SIMPLE; 2619 c->Request.Type.Attribute = ATTR_SIMPLE;
2592 c->Request.Type.Direction = XFER_WRITE; 2620 c->Request.Type.Direction = XFER_WRITE;
@@ -2615,6 +2643,31 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
2615 return status; 2643 return status;
2616} 2644}
2617 2645
2646static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
2647 u8 reset_type)
2648{
2649 CommandList_struct *c;
2650 int return_status;
2651
2652 c = cmd_alloc(h);
2653 if (!c)
2654 return -ENOMEM;
2655 return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0,
2656 CTLR_LUNID, TYPE_MSG);
2657 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2658 if (return_status != IO_OK) {
2659 cmd_special_free(h, c);
2660 return return_status;
2661 }
2662 c->waiting = NULL;
2663 enqueue_cmd_and_start_io(h, c);
2664 /* Don't wait for completion, the reset won't complete. Don't free
2665 * the command either. This is the last command we will send before
2666 * re-initializing everything, so it doesn't matter and won't leak.
2667 */
2668 return 0;
2669}
2670
2618static int check_target_status(ctlr_info_t *h, CommandList_struct *c) 2671static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
2619{ 2672{
2620 switch (c->err_info->ScsiStatus) { 2673 switch (c->err_info->ScsiStatus) {
@@ -3461,6 +3514,63 @@ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
3461 return next_command(h); 3514 return next_command(h);
3462} 3515}
3463 3516
3517/* Some controllers, like p400, will give us one interrupt
3518 * after a soft reset, even if we turned interrupts off.
3519 * Only need to check for this in the cciss_xxx_discard_completions
3520 * functions.
3521 */
3522static int ignore_bogus_interrupt(ctlr_info_t *h)
3523{
3524 if (likely(!reset_devices))
3525 return 0;
3526
3527 if (likely(h->interrupts_enabled))
3528 return 0;
3529
3530 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3531 "(known firmware bug.) Ignoring.\n");
3532
3533 return 1;
3534}
3535
3536static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id)
3537{
3538 ctlr_info_t *h = dev_id;
3539 unsigned long flags;
3540 u32 raw_tag;
3541
3542 if (ignore_bogus_interrupt(h))
3543 return IRQ_NONE;
3544
3545 if (interrupt_not_for_us(h))
3546 return IRQ_NONE;
3547 spin_lock_irqsave(&h->lock, flags);
3548 while (interrupt_pending(h)) {
3549 raw_tag = get_next_completion(h);
3550 while (raw_tag != FIFO_EMPTY)
3551 raw_tag = next_command(h);
3552 }
3553 spin_unlock_irqrestore(&h->lock, flags);
3554 return IRQ_HANDLED;
3555}
3556
3557static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id)
3558{
3559 ctlr_info_t *h = dev_id;
3560 unsigned long flags;
3561 u32 raw_tag;
3562
3563 if (ignore_bogus_interrupt(h))
3564 return IRQ_NONE;
3565
3566 spin_lock_irqsave(&h->lock, flags);
3567 raw_tag = get_next_completion(h);
3568 while (raw_tag != FIFO_EMPTY)
3569 raw_tag = next_command(h);
3570 spin_unlock_irqrestore(&h->lock, flags);
3571 return IRQ_HANDLED;
3572}
3573
3464static irqreturn_t do_cciss_intx(int irq, void *dev_id) 3574static irqreturn_t do_cciss_intx(int irq, void *dev_id)
3465{ 3575{
3466 ctlr_info_t *h = dev_id; 3576 ctlr_info_t *h = dev_id;
@@ -4078,6 +4188,9 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
4078 cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); 4188 cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
4079 if (!h->cfgtable) 4189 if (!h->cfgtable)
4080 return -ENOMEM; 4190 return -ENOMEM;
4191 rc = write_driver_ver_to_cfgtable(h->cfgtable);
4192 if (rc)
4193 return rc;
4081 /* Find performant mode table. */ 4194 /* Find performant mode table. */
4082 trans_offset = readl(&h->cfgtable->TransMethodOffset); 4195 trans_offset = readl(&h->cfgtable->TransMethodOffset);
4083 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 4196 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
@@ -4112,7 +4225,7 @@ static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
4112static void __devinit cciss_find_board_params(ctlr_info_t *h) 4225static void __devinit cciss_find_board_params(ctlr_info_t *h)
4113{ 4226{
4114 cciss_get_max_perf_mode_cmds(h); 4227 cciss_get_max_perf_mode_cmds(h);
4115 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 4228 h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
4116 h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); 4229 h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
4117 /* 4230 /*
4118 * Limit in-command s/g elements to 32 save dma'able memory. 4231 * Limit in-command s/g elements to 32 save dma'able memory.
@@ -4348,7 +4461,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
4348 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 4461 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
4349 if ((tag & ~3) == paddr32) 4462 if ((tag & ~3) == paddr32)
4350 break; 4463 break;
4351 schedule_timeout_uninterruptible(HZ); 4464 msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS);
4352 } 4465 }
4353 4466
4354 iounmap(vaddr); 4467 iounmap(vaddr);
@@ -4375,11 +4488,10 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
4375 return 0; 4488 return 0;
4376} 4489}
4377 4490
4378#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
4379#define cciss_noop(p) cciss_message(p, 3, 0) 4491#define cciss_noop(p) cciss_message(p, 3, 0)
4380 4492
4381static int cciss_controller_hard_reset(struct pci_dev *pdev, 4493static int cciss_controller_hard_reset(struct pci_dev *pdev,
4382 void * __iomem vaddr, bool use_doorbell) 4494 void * __iomem vaddr, u32 use_doorbell)
4383{ 4495{
4384 u16 pmcsr; 4496 u16 pmcsr;
4385 int pos; 4497 int pos;
@@ -4390,8 +4502,7 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
4390 * other way using the doorbell register. 4502 * other way using the doorbell register.
4391 */ 4503 */
4392 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 4504 dev_info(&pdev->dev, "using doorbell to reset controller\n");
4393 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); 4505 writel(use_doorbell, vaddr + SA5_DOORBELL);
4394 msleep(1000);
4395 } else { /* Try to do it the PCI power state way */ 4506 } else { /* Try to do it the PCI power state way */
4396 4507
4397 /* Quoting from the Open CISS Specification: "The Power 4508 /* Quoting from the Open CISS Specification: "The Power
@@ -4422,12 +4533,64 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
4422 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4533 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4423 pmcsr |= PCI_D0; 4534 pmcsr |= PCI_D0;
4424 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4535 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
4425
4426 msleep(500);
4427 } 4536 }
4428 return 0; 4537 return 0;
4429} 4538}
4430 4539
4540static __devinit void init_driver_version(char *driver_version, int len)
4541{
4542 memset(driver_version, 0, len);
4543 strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
4544}
4545
4546static __devinit int write_driver_ver_to_cfgtable(
4547 CfgTable_struct __iomem *cfgtable)
4548{
4549 char *driver_version;
4550 int i, size = sizeof(cfgtable->driver_version);
4551
4552 driver_version = kmalloc(size, GFP_KERNEL);
4553 if (!driver_version)
4554 return -ENOMEM;
4555
4556 init_driver_version(driver_version, size);
4557 for (i = 0; i < size; i++)
4558 writeb(driver_version[i], &cfgtable->driver_version[i]);
4559 kfree(driver_version);
4560 return 0;
4561}
4562
4563static __devinit void read_driver_ver_from_cfgtable(
4564 CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver)
4565{
4566 int i;
4567
4568 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
4569 driver_ver[i] = readb(&cfgtable->driver_version[i]);
4570}
4571
4572static __devinit int controller_reset_failed(
4573 CfgTable_struct __iomem *cfgtable)
4574{
4575
4576 char *driver_ver, *old_driver_ver;
4577 int rc, size = sizeof(cfgtable->driver_version);
4578
4579 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
4580 if (!old_driver_ver)
4581 return -ENOMEM;
4582 driver_ver = old_driver_ver + size;
4583
4584 /* After a reset, the 32 bytes of "driver version" in the cfgtable
4585 * should have been changed, otherwise we know the reset failed.
4586 */
4587 init_driver_version(old_driver_ver, size);
4588 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
4589 rc = !memcmp(driver_ver, old_driver_ver, size);
4590 kfree(old_driver_ver);
4591 return rc;
4592}
4593
4431/* This does a hard reset of the controller using PCI power management 4594/* This does a hard reset of the controller using PCI power management
4432 * states or using the doorbell register. */ 4595 * states or using the doorbell register. */
4433static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) 4596static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
@@ -4437,10 +4600,10 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4437 u64 cfg_base_addr_index; 4600 u64 cfg_base_addr_index;
4438 void __iomem *vaddr; 4601 void __iomem *vaddr;
4439 unsigned long paddr; 4602 unsigned long paddr;
4440 u32 misc_fw_support, active_transport; 4603 u32 misc_fw_support;
4441 int rc; 4604 int rc;
4442 CfgTable_struct __iomem *cfgtable; 4605 CfgTable_struct __iomem *cfgtable;
4443 bool use_doorbell; 4606 u32 use_doorbell;
4444 u32 board_id; 4607 u32 board_id;
4445 u16 command_register; 4608 u16 command_register;
4446 4609
@@ -4464,12 +4627,16 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4464 * likely not be happy. Just forbid resetting this conjoined mess. 4627 * likely not be happy. Just forbid resetting this conjoined mess.
4465 */ 4628 */
4466 cciss_lookup_board_id(pdev, &board_id); 4629 cciss_lookup_board_id(pdev, &board_id);
4467 if (board_id == 0x409C0E11 || board_id == 0x409D0E11) { 4630 if (!ctlr_is_resettable(board_id)) {
4468 dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " 4631 dev_warn(&pdev->dev, "Cannot reset Smart Array 640x "
4469 "due to shared cache module."); 4632 "due to shared cache module.");
4470 return -ENODEV; 4633 return -ENODEV;
4471 } 4634 }
4472 4635
4636 /* if controller is soft- but not hard resettable... */
4637 if (!ctlr_is_hard_resettable(board_id))
4638 return -ENOTSUPP; /* try soft reset later. */
4639
4473 /* Save the PCI command register */ 4640 /* Save the PCI command register */
4474 pci_read_config_word(pdev, 4, &command_register); 4641 pci_read_config_word(pdev, 4, &command_register);
4475 /* Turn the board off. This is so that later pci_restore_state() 4642 /* Turn the board off. This is so that later pci_restore_state()
@@ -4497,16 +4664,28 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4497 rc = -ENOMEM; 4664 rc = -ENOMEM;
4498 goto unmap_vaddr; 4665 goto unmap_vaddr;
4499 } 4666 }
4667 rc = write_driver_ver_to_cfgtable(cfgtable);
4668 if (rc)
4669 goto unmap_vaddr;
4500 4670
4501 /* If reset via doorbell register is supported, use that. */ 4671 /* If reset via doorbell register is supported, use that.
4502 misc_fw_support = readl(&cfgtable->misc_fw_support); 4672 * There are two such methods. Favor the newest method.
4503 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
4504
4505 /* The doorbell reset seems to cause lockups on some Smart
4506 * Arrays (e.g. P410, P410i, maybe others). Until this is
4507 * fixed or at least isolated, avoid the doorbell reset.
4508 */ 4673 */
4509 use_doorbell = 0; 4674 misc_fw_support = readl(&cfgtable->misc_fw_support);
4675 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
4676 if (use_doorbell) {
4677 use_doorbell = DOORBELL_CTLR_RESET2;
4678 } else {
4679 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
4680 if (use_doorbell) {
4681 dev_warn(&pdev->dev, "Controller claims that "
4682 "'Bit 2 doorbell reset' is "
4683 "supported, but not 'bit 5 doorbell reset'. "
4684 "Firmware update is recommended.\n");
4685 rc = -ENOTSUPP; /* use the soft reset */
4686 goto unmap_cfgtable;
4687 }
4688 }
4510 4689
4511 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4690 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4512 if (rc) 4691 if (rc)
@@ -4524,30 +4703,31 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4524 msleep(CCISS_POST_RESET_PAUSE_MSECS); 4703 msleep(CCISS_POST_RESET_PAUSE_MSECS);
4525 4704
4526 /* Wait for board to become not ready, then ready. */ 4705 /* Wait for board to become not ready, then ready. */
4527 dev_info(&pdev->dev, "Waiting for board to become ready.\n"); 4706 dev_info(&pdev->dev, "Waiting for board to reset.\n");
4528 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 4707 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
4529 if (rc) /* Don't bail, might be E500, etc. which can't be reset */ 4708 if (rc) {
4530 dev_warn(&pdev->dev, 4709 dev_warn(&pdev->dev, "Failed waiting for board to hard reset."
4531 "failed waiting for board to become not ready\n"); 4710 " Will try soft reset.\n");
4711 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
4712 goto unmap_cfgtable;
4713 }
4532 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); 4714 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
4533 if (rc) { 4715 if (rc) {
4534 dev_warn(&pdev->dev, 4716 dev_warn(&pdev->dev,
4535 "failed waiting for board to become ready\n"); 4717 "failed waiting for board to become ready "
4718 "after hard reset\n");
4536 goto unmap_cfgtable; 4719 goto unmap_cfgtable;
4537 } 4720 }
4538 dev_info(&pdev->dev, "board ready.\n");
4539 4721
4540 /* Controller should be in simple mode at this point. If it's not, 4722 rc = controller_reset_failed(vaddr);
4541 * It means we're on one of those controllers which doesn't support 4723 if (rc < 0)
4542 * the doorbell reset method and on which the PCI power management reset 4724 goto unmap_cfgtable;
4543 * method doesn't work (P800, for example.) 4725 if (rc) {
4544 * In those cases, don't try to proceed, as it generally doesn't work. 4726 dev_warn(&pdev->dev, "Unable to successfully hard reset "
4545 */ 4727 "controller. Will try soft reset.\n");
4546 active_transport = readl(&cfgtable->TransportActive); 4728 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
4547 if (active_transport & PERFORMANT_MODE) { 4729 } else {
4548 dev_warn(&pdev->dev, "Unable to successfully reset controller," 4730 dev_info(&pdev->dev, "Board ready after hard reset.\n");
4549 " Ignoring controller.\n");
4550 rc = -ENODEV;
4551 } 4731 }
4552 4732
4553unmap_cfgtable: 4733unmap_cfgtable:
@@ -4574,11 +4754,12 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
4574 * due to concerns about shared bbwc between 6402/6404 pair. 4754 * due to concerns about shared bbwc between 6402/6404 pair.
4575 */ 4755 */
4576 if (rc == -ENOTSUPP) 4756 if (rc == -ENOTSUPP)
4577 return 0; /* just try to do the kdump anyhow. */ 4757 return rc; /* just try to do the kdump anyhow. */
4578 if (rc) 4758 if (rc)
4579 return -ENODEV; 4759 return -ENODEV;
4580 4760
4581 /* Now try to get the controller to respond to a no-op */ 4761 /* Now try to get the controller to respond to a no-op */
4762 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4582 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { 4763 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
4583 if (cciss_noop(pdev) == 0) 4764 if (cciss_noop(pdev) == 0)
4584 break; 4765 break;
@@ -4591,6 +4772,148 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
4591 return 0; 4772 return 0;
4592} 4773}
4593 4774
4775static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
4776{
4777 h->cmd_pool_bits = kmalloc(
4778 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
4779 sizeof(unsigned long), GFP_KERNEL);
4780 h->cmd_pool = pci_alloc_consistent(h->pdev,
4781 h->nr_cmds * sizeof(CommandList_struct),
4782 &(h->cmd_pool_dhandle));
4783 h->errinfo_pool = pci_alloc_consistent(h->pdev,
4784 h->nr_cmds * sizeof(ErrorInfo_struct),
4785 &(h->errinfo_pool_dhandle));
4786 if ((h->cmd_pool_bits == NULL)
4787 || (h->cmd_pool == NULL)
4788 || (h->errinfo_pool == NULL)) {
4789 dev_err(&h->pdev->dev, "out of memory");
4790 return -ENOMEM;
4791 }
4792 return 0;
4793}
4794
4795static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h)
4796{
4797 int i;
4798
4799 /* zero it, so that on free we need not know how many were alloc'ed */
4800 h->scatter_list = kzalloc(h->max_commands *
4801 sizeof(struct scatterlist *), GFP_KERNEL);
4802 if (!h->scatter_list)
4803 return -ENOMEM;
4804
4805 for (i = 0; i < h->nr_cmds; i++) {
4806 h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) *
4807 h->maxsgentries, GFP_KERNEL);
4808 if (h->scatter_list[i] == NULL) {
4809 dev_err(&h->pdev->dev, "could not allocate "
4810 "s/g lists\n");
4811 return -ENOMEM;
4812 }
4813 }
4814 return 0;
4815}
4816
4817static void cciss_free_scatterlists(ctlr_info_t *h)
4818{
4819 int i;
4820
4821 if (h->scatter_list) {
4822 for (i = 0; i < h->nr_cmds; i++)
4823 kfree(h->scatter_list[i]);
4824 kfree(h->scatter_list);
4825 }
4826}
4827
4828static void cciss_free_cmd_pool(ctlr_info_t *h)
4829{
4830 kfree(h->cmd_pool_bits);
4831 if (h->cmd_pool)
4832 pci_free_consistent(h->pdev,
4833 h->nr_cmds * sizeof(CommandList_struct),
4834 h->cmd_pool, h->cmd_pool_dhandle);
4835 if (h->errinfo_pool)
4836 pci_free_consistent(h->pdev,
4837 h->nr_cmds * sizeof(ErrorInfo_struct),
4838 h->errinfo_pool, h->errinfo_pool_dhandle);
4839}
4840
4841static int cciss_request_irq(ctlr_info_t *h,
4842 irqreturn_t (*msixhandler)(int, void *),
4843 irqreturn_t (*intxhandler)(int, void *))
4844{
4845 if (h->msix_vector || h->msi_vector) {
4846 if (!request_irq(h->intr[PERF_MODE_INT], msixhandler,
4847 IRQF_DISABLED, h->devname, h))
4848 return 0;
4849 dev_err(&h->pdev->dev, "Unable to get msi irq %d"
4850 " for %s\n", h->intr[PERF_MODE_INT],
4851 h->devname);
4852 return -1;
4853 }
4854
4855 if (!request_irq(h->intr[PERF_MODE_INT], intxhandler,
4856 IRQF_DISABLED, h->devname, h))
4857 return 0;
4858 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
4859 h->intr[PERF_MODE_INT], h->devname);
4860 return -1;
4861}
4862
4863static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h)
4864{
4865 if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
4866 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4867 return -EIO;
4868 }
4869
4870 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4871 if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4872 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4873 return -1;
4874 }
4875
4876 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4877 if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4878 dev_warn(&h->pdev->dev, "Board failed to become ready "
4879 "after soft reset.\n");
4880 return -1;
4881 }
4882
4883 return 0;
4884}
4885
4886static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
4887{
4888 int ctlr = h->ctlr;
4889
4890 free_irq(h->intr[PERF_MODE_INT], h);
4891#ifdef CONFIG_PCI_MSI
4892 if (h->msix_vector)
4893 pci_disable_msix(h->pdev);
4894 else if (h->msi_vector)
4895 pci_disable_msi(h->pdev);
4896#endif /* CONFIG_PCI_MSI */
4897 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
4898 cciss_free_scatterlists(h);
4899 cciss_free_cmd_pool(h);
4900 kfree(h->blockFetchTable);
4901 if (h->reply_pool)
4902 pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
4903 h->reply_pool, h->reply_pool_dhandle);
4904 if (h->transtable)
4905 iounmap(h->transtable);
4906 if (h->cfgtable)
4907 iounmap(h->cfgtable);
4908 if (h->vaddr)
4909 iounmap(h->vaddr);
4910 unregister_blkdev(h->major, h->devname);
4911 cciss_destroy_hba_sysfs_entry(h);
4912 pci_release_regions(h->pdev);
4913 kfree(h);
4914 hba[ctlr] = NULL;
4915}
4916
4594/* 4917/*
4595 * This is it. Find all the controllers and register them. I really hate 4918 * This is it. Find all the controllers and register them. I really hate
4596 * stealing all these major device numbers. 4919 * stealing all these major device numbers.
@@ -4601,15 +4924,28 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4601{ 4924{
4602 int i; 4925 int i;
4603 int j = 0; 4926 int j = 0;
4604 int k = 0;
4605 int rc; 4927 int rc;
4928 int try_soft_reset = 0;
4606 int dac, return_code; 4929 int dac, return_code;
4607 InquiryData_struct *inq_buff; 4930 InquiryData_struct *inq_buff;
4608 ctlr_info_t *h; 4931 ctlr_info_t *h;
4932 unsigned long flags;
4609 4933
4610 rc = cciss_init_reset_devices(pdev); 4934 rc = cciss_init_reset_devices(pdev);
4611 if (rc) 4935 if (rc) {
4612 return rc; 4936 if (rc != -ENOTSUPP)
4937 return rc;
4938 /* If the reset fails in a particular way (it has no way to do
4939 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4940 * a soft reset once we get the controller configured up to the
4941 * point that it can accept a command.
4942 */
4943 try_soft_reset = 1;
4944 rc = 0;
4945 }
4946
4947reinit_after_soft_reset:
4948
4613 i = alloc_cciss_hba(pdev); 4949 i = alloc_cciss_hba(pdev);
4614 if (i < 0) 4950 if (i < 0)
4615 return -1; 4951 return -1;
@@ -4627,6 +4963,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4627 sprintf(h->devname, "cciss%d", i); 4963 sprintf(h->devname, "cciss%d", i);
4628 h->ctlr = i; 4964 h->ctlr = i;
4629 4965
4966 if (cciss_tape_cmds < 2)
4967 cciss_tape_cmds = 2;
4968 if (cciss_tape_cmds > 16)
4969 cciss_tape_cmds = 16;
4970
4630 init_completion(&h->scan_wait); 4971 init_completion(&h->scan_wait);
4631 4972
4632 if (cciss_create_hba_sysfs_entry(h)) 4973 if (cciss_create_hba_sysfs_entry(h))
@@ -4662,62 +5003,20 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4662 5003
4663 /* make sure the board interrupts are off */ 5004 /* make sure the board interrupts are off */
4664 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5005 h->access.set_intr_mask(h, CCISS_INTR_OFF);
4665 if (h->msi_vector || h->msix_vector) { 5006 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
4666 if (request_irq(h->intr[PERF_MODE_INT], 5007 if (rc)
4667 do_cciss_msix_intr, 5008 goto clean2;
4668 IRQF_DISABLED, h->devname, h)) {
4669 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
4670 h->intr[PERF_MODE_INT], h->devname);
4671 goto clean2;
4672 }
4673 } else {
4674 if (request_irq(h->intr[PERF_MODE_INT], do_cciss_intx,
4675 IRQF_DISABLED, h->devname, h)) {
4676 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
4677 h->intr[PERF_MODE_INT], h->devname);
4678 goto clean2;
4679 }
4680 }
4681 5009
4682 dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", 5010 dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
4683 h->devname, pdev->device, pci_name(pdev), 5011 h->devname, pdev->device, pci_name(pdev),
4684 h->intr[PERF_MODE_INT], dac ? "" : " not"); 5012 h->intr[PERF_MODE_INT], dac ? "" : " not");
4685 5013
4686 h->cmd_pool_bits = 5014 if (cciss_allocate_cmd_pool(h))
4687 kmalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
4688 * sizeof(unsigned long), GFP_KERNEL);
4689 h->cmd_pool = (CommandList_struct *)
4690 pci_alloc_consistent(h->pdev,
4691 h->nr_cmds * sizeof(CommandList_struct),
4692 &(h->cmd_pool_dhandle));
4693 h->errinfo_pool = (ErrorInfo_struct *)
4694 pci_alloc_consistent(h->pdev,
4695 h->nr_cmds * sizeof(ErrorInfo_struct),
4696 &(h->errinfo_pool_dhandle));
4697 if ((h->cmd_pool_bits == NULL)
4698 || (h->cmd_pool == NULL)
4699 || (h->errinfo_pool == NULL)) {
4700 dev_err(&h->pdev->dev, "out of memory");
4701 goto clean4; 5015 goto clean4;
4702 }
4703 5016
4704 /* Need space for temp scatter list */ 5017 if (cciss_allocate_scatterlists(h))
4705 h->scatter_list = kmalloc(h->max_commands *
4706 sizeof(struct scatterlist *),
4707 GFP_KERNEL);
4708 if (!h->scatter_list)
4709 goto clean4; 5018 goto clean4;
4710 5019
4711 for (k = 0; k < h->nr_cmds; k++) {
4712 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
4713 h->maxsgentries,
4714 GFP_KERNEL);
4715 if (h->scatter_list[k] == NULL) {
4716 dev_err(&h->pdev->dev,
4717 "could not allocate s/g lists\n");
4718 goto clean4;
4719 }
4720 }
4721 h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, 5020 h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
4722 h->chainsize, h->nr_cmds); 5021 h->chainsize, h->nr_cmds);
4723 if (!h->cmd_sg_list && h->chainsize > 0) 5022 if (!h->cmd_sg_list && h->chainsize > 0)
@@ -4741,6 +5040,62 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4741 h->gendisk[j] = NULL; 5040 h->gendisk[j] = NULL;
4742 } 5041 }
4743 5042
5043 /* At this point, the controller is ready to take commands.
5044 * Now, if reset_devices and the hard reset didn't work, try
5045 * the soft reset and see if that works.
5046 */
5047 if (try_soft_reset) {
5048
5049 /* This is kind of gross. We may or may not get a completion
5050 * from the soft reset command, and if we do, then the value
5051 * from the fifo may or may not be valid. So, we wait 10 secs
5052 * after the reset throwing away any completions we get during
5053 * that time. Unregister the interrupt handler and register
5054 * fake ones to scoop up any residual completions.
5055 */
5056 spin_lock_irqsave(&h->lock, flags);
5057 h->access.set_intr_mask(h, CCISS_INTR_OFF);
5058 spin_unlock_irqrestore(&h->lock, flags);
5059 free_irq(h->intr[PERF_MODE_INT], h);
5060 rc = cciss_request_irq(h, cciss_msix_discard_completions,
5061 cciss_intx_discard_completions);
5062 if (rc) {
5063 dev_warn(&h->pdev->dev, "Failed to request_irq after "
5064 "soft reset.\n");
5065 goto clean4;
5066 }
5067
5068 rc = cciss_kdump_soft_reset(h);
5069 if (rc) {
5070 dev_warn(&h->pdev->dev, "Soft reset failed.\n");
5071 goto clean4;
5072 }
5073
5074 dev_info(&h->pdev->dev, "Board READY.\n");
5075 dev_info(&h->pdev->dev,
5076 "Waiting for stale completions to drain.\n");
5077 h->access.set_intr_mask(h, CCISS_INTR_ON);
5078 msleep(10000);
5079 h->access.set_intr_mask(h, CCISS_INTR_OFF);
5080
5081 rc = controller_reset_failed(h->cfgtable);
5082 if (rc)
5083 dev_info(&h->pdev->dev,
5084 "Soft reset appears to have failed.\n");
5085
5086 /* since the controller's reset, we have to go back and re-init
5087 * everything. Easiest to just forget what we've done and do it
5088 * all over again.
5089 */
5090 cciss_undo_allocations_after_kdump_soft_reset(h);
5091 try_soft_reset = 0;
5092 if (rc)
5093 /* don't go to clean4, we already unallocated */
5094 return -ENODEV;
5095
5096 goto reinit_after_soft_reset;
5097 }
5098
4744 cciss_scsi_setup(h); 5099 cciss_scsi_setup(h);
4745 5100
4746 /* Turn the interrupts on so we can service requests */ 5101 /* Turn the interrupts on so we can service requests */
@@ -4775,21 +5130,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4775 return 1; 5130 return 1;
4776 5131
4777clean4: 5132clean4:
4778 kfree(h->cmd_pool_bits); 5133 cciss_free_cmd_pool(h);
4779 /* Free up sg elements */ 5134 cciss_free_scatterlists(h);
4780 for (k-- ; k >= 0; k--)
4781 kfree(h->scatter_list[k]);
4782 kfree(h->scatter_list);
4783 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 5135 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
4784 if (h->cmd_pool)
4785 pci_free_consistent(h->pdev,
4786 h->nr_cmds * sizeof(CommandList_struct),
4787 h->cmd_pool, h->cmd_pool_dhandle);
4788 if (h->errinfo_pool)
4789 pci_free_consistent(h->pdev,
4790 h->nr_cmds * sizeof(ErrorInfo_struct),
4791 h->errinfo_pool,
4792 h->errinfo_pool_dhandle);
4793 free_irq(h->intr[PERF_MODE_INT], h); 5136 free_irq(h->intr[PERF_MODE_INT], h);
4794clean2: 5137clean2:
4795 unregister_blkdev(h->major, h->devname); 5138 unregister_blkdev(h->major, h->devname);
@@ -4887,16 +5230,16 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4887 iounmap(h->cfgtable); 5230 iounmap(h->cfgtable);
4888 iounmap(h->vaddr); 5231 iounmap(h->vaddr);
4889 5232
4890 pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), 5233 cciss_free_cmd_pool(h);
4891 h->cmd_pool, h->cmd_pool_dhandle);
4892 pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct),
4893 h->errinfo_pool, h->errinfo_pool_dhandle);
4894 kfree(h->cmd_pool_bits);
4895 /* Free up sg elements */ 5234 /* Free up sg elements */
4896 for (j = 0; j < h->nr_cmds; j++) 5235 for (j = 0; j < h->nr_cmds; j++)
4897 kfree(h->scatter_list[j]); 5236 kfree(h->scatter_list[j]);
4898 kfree(h->scatter_list); 5237 kfree(h->scatter_list);
4899 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 5238 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
5239 kfree(h->blockFetchTable);
5240 if (h->reply_pool)
5241 pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
5242 h->reply_pool, h->reply_pool_dhandle);
4900 /* 5243 /*
4901 * Deliberately omit pci_disable_device(): it does something nasty to 5244 * Deliberately omit pci_disable_device(): it does something nasty to
4902 * Smart Array controllers that pci_enable_device does not undo 5245 * Smart Array controllers that pci_enable_device does not undo
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 554bbd907d14..16b4d58d84dd 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,7 +200,7 @@ struct ctlr_info
200 * the above. 200 * the above.
201 */ 201 */
202#define CCISS_BOARD_READY_WAIT_SECS (120) 202#define CCISS_BOARD_READY_WAIT_SECS (120)
203#define CCISS_BOARD_NOT_READY_WAIT_SECS (10) 203#define CCISS_BOARD_NOT_READY_WAIT_SECS (100)
204#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) 204#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
205#define CCISS_BOARD_READY_ITERATIONS \ 205#define CCISS_BOARD_READY_ITERATIONS \
206 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ 206 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
@@ -209,8 +209,9 @@ struct ctlr_info
209 ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 209 ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
210 CCISS_BOARD_READY_POLL_INTERVAL_MSECS) 210 CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
211#define CCISS_POST_RESET_PAUSE_MSECS (3000) 211#define CCISS_POST_RESET_PAUSE_MSECS (3000)
212#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) 212#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (4000)
213#define CCISS_POST_RESET_NOOP_RETRIES (12) 213#define CCISS_POST_RESET_NOOP_RETRIES (12)
214#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000)
214 215
215/* 216/*
216 Send the command to the hardware 217 Send the command to the hardware
@@ -239,11 +240,13 @@ static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
239 { /* Turn interrupts on */ 240 { /* Turn interrupts on */
240 h->interrupts_enabled = 1; 241 h->interrupts_enabled = 1;
241 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 242 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
243 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
242 } else /* Turn them off */ 244 } else /* Turn them off */
243 { 245 {
244 h->interrupts_enabled = 0; 246 h->interrupts_enabled = 0;
245 writel( SA5_INTR_OFF, 247 writel( SA5_INTR_OFF,
246 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 248 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
249 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
247 } 250 }
248} 251}
249/* 252/*
@@ -257,11 +260,13 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
257 { /* Turn interrupts on */ 260 { /* Turn interrupts on */
258 h->interrupts_enabled = 1; 261 h->interrupts_enabled = 1;
259 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 262 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
263 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
260 } else /* Turn them off */ 264 } else /* Turn them off */
261 { 265 {
262 h->interrupts_enabled = 0; 266 h->interrupts_enabled = 0;
263 writel( SA5B_INTR_OFF, 267 writel( SA5B_INTR_OFF,
264 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 268 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
269 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
265 } 270 }
266} 271}
267 272
@@ -271,10 +276,12 @@ static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
271 if (val) { /* turn on interrupts */ 276 if (val) { /* turn on interrupts */
272 h->interrupts_enabled = 1; 277 h->interrupts_enabled = 1;
273 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 278 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
279 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
274 } else { 280 } else {
275 h->interrupts_enabled = 0; 281 h->interrupts_enabled = 0;
276 writel(SA5_PERF_INTR_OFF, 282 writel(SA5_PERF_INTR_OFF,
277 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 283 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
284 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
278 } 285 }
279} 286}
280 287
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index cd441bef031f..d9be6b4d49a6 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -53,6 +53,7 @@
53#define CFGTBL_ChangeReq 0x00000001l 53#define CFGTBL_ChangeReq 0x00000001l
54#define CFGTBL_AccCmds 0x00000001l 54#define CFGTBL_AccCmds 0x00000001l
55#define DOORBELL_CTLR_RESET 0x00000004l 55#define DOORBELL_CTLR_RESET 0x00000004l
56#define DOORBELL_CTLR_RESET2 0x00000020l
56 57
57#define CFGTBL_Trans_Simple 0x00000002l 58#define CFGTBL_Trans_Simple 0x00000002l
58#define CFGTBL_Trans_Performant 0x00000004l 59#define CFGTBL_Trans_Performant 0x00000004l
@@ -142,6 +143,14 @@ typedef struct _ReadCapdata_struct_16
142#define BMIC_CACHE_FLUSH 0xc2 143#define BMIC_CACHE_FLUSH 0xc2
143#define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */ 144#define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */
144 145
146#define CCISS_ABORT_MSG 0x00
147#define CCISS_RESET_MSG 0x01
148#define CCISS_RESET_TYPE_CONTROLLER 0x00
149#define CCISS_RESET_TYPE_BUS 0x01
150#define CCISS_RESET_TYPE_TARGET 0x03
151#define CCISS_RESET_TYPE_LUN 0x04
152#define CCISS_NOOP_MSG 0x03
153
145/* Command List Structure */ 154/* Command List Structure */
146#define CTLR_LUNID "\0\0\0\0\0\0\0\0" 155#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
147 156
@@ -235,6 +244,8 @@ typedef struct _CfgTable_struct {
235 u8 reserved[0x78 - 0x58]; 244 u8 reserved[0x78 - 0x58];
236 u32 misc_fw_support; /* offset 0x78 */ 245 u32 misc_fw_support; /* offset 0x78 */
237#define MISC_FW_DOORBELL_RESET (0x02) 246#define MISC_FW_DOORBELL_RESET (0x02)
247#define MISC_FW_DOORBELL_RESET2 (0x10)
248 u8 driver_version[32];
238} CfgTable_struct; 249} CfgTable_struct;
239 250
240struct TransTable_struct { 251struct TransTable_struct {
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index df793803f5ae..696100241a6f 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -84,7 +84,6 @@ static struct scsi_host_template cciss_driver_template = {
84 .proc_name = "cciss", 84 .proc_name = "cciss",
85 .proc_info = cciss_scsi_proc_info, 85 .proc_info = cciss_scsi_proc_info,
86 .queuecommand = cciss_scsi_queue_command, 86 .queuecommand = cciss_scsi_queue_command,
87 .can_queue = SCSI_CCISS_CAN_QUEUE,
88 .this_id = 7, 87 .this_id = 7,
89 .cmd_per_lun = 1, 88 .cmd_per_lun = 1,
90 .use_clustering = DISABLE_CLUSTERING, 89 .use_clustering = DISABLE_CLUSTERING,
@@ -108,16 +107,13 @@ struct cciss_scsi_cmd_stack_elem_t {
108 107
109#pragma pack() 108#pragma pack()
110 109
111#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \
112 CCISS_MAX_SCSI_DEVS_PER_HBA + 2)
113 // plus two for init time usage
114
115#pragma pack(1) 110#pragma pack(1)
116struct cciss_scsi_cmd_stack_t { 111struct cciss_scsi_cmd_stack_t {
117 struct cciss_scsi_cmd_stack_elem_t *pool; 112 struct cciss_scsi_cmd_stack_elem_t *pool;
118 struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE]; 113 struct cciss_scsi_cmd_stack_elem_t **elem;
119 dma_addr_t cmd_pool_handle; 114 dma_addr_t cmd_pool_handle;
120 int top; 115 int top;
116 int nelems;
121}; 117};
122#pragma pack() 118#pragma pack()
123 119
@@ -191,7 +187,7 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
191 sa = h->scsi_ctlr; 187 sa = h->scsi_ctlr;
192 stk = &sa->cmd_stack; 188 stk = &sa->cmd_stack;
193 stk->top++; 189 stk->top++;
194 if (stk->top >= CMD_STACK_SIZE) { 190 if (stk->top >= stk->nelems) {
195 dev_err(&h->pdev->dev, 191 dev_err(&h->pdev->dev,
196 "scsi_cmd_free called too many times.\n"); 192 "scsi_cmd_free called too many times.\n");
197 BUG(); 193 BUG();
@@ -206,13 +202,14 @@ scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
206 struct cciss_scsi_cmd_stack_t *stk; 202 struct cciss_scsi_cmd_stack_t *stk;
207 size_t size; 203 size_t size;
208 204
205 stk = &sa->cmd_stack;
206 stk->nelems = cciss_tape_cmds + 2;
209 sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, 207 sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
210 h->chainsize, CMD_STACK_SIZE); 208 h->chainsize, stk->nelems);
211 if (!sa->cmd_sg_list && h->chainsize > 0) 209 if (!sa->cmd_sg_list && h->chainsize > 0)
212 return -ENOMEM; 210 return -ENOMEM;
213 211
214 stk = &sa->cmd_stack; 212 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
215 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
216 213
217 /* Check alignment, see cciss_cmd.h near CommandList_struct def. */ 214 /* Check alignment, see cciss_cmd.h near CommandList_struct def. */
218 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); 215 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
@@ -221,18 +218,23 @@ scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
221 pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle); 218 pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
222 219
223 if (stk->pool == NULL) { 220 if (stk->pool == NULL) {
224 cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); 221 cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
225 sa->cmd_sg_list = NULL; 222 sa->cmd_sg_list = NULL;
226 return -ENOMEM; 223 return -ENOMEM;
227 } 224 }
228 225 stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL);
229 for (i=0; i<CMD_STACK_SIZE; i++) { 226 if (!stk->elem) {
227 pci_free_consistent(h->pdev, size, stk->pool,
228 stk->cmd_pool_handle);
229 return -1;
230 }
231 for (i = 0; i < stk->nelems; i++) {
230 stk->elem[i] = &stk->pool[i]; 232 stk->elem[i] = &stk->pool[i];
231 stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + 233 stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
232 (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); 234 (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
233 stk->elem[i]->cmdindex = i; 235 stk->elem[i]->cmdindex = i;
234 } 236 }
235 stk->top = CMD_STACK_SIZE-1; 237 stk->top = stk->nelems-1;
236 return 0; 238 return 0;
237} 239}
238 240
@@ -245,16 +247,18 @@ scsi_cmd_stack_free(ctlr_info_t *h)
245 247
246 sa = h->scsi_ctlr; 248 sa = h->scsi_ctlr;
247 stk = &sa->cmd_stack; 249 stk = &sa->cmd_stack;
248 if (stk->top != CMD_STACK_SIZE-1) { 250 if (stk->top != stk->nelems-1) {
249 dev_warn(&h->pdev->dev, 251 dev_warn(&h->pdev->dev,
250 "bug: %d scsi commands are still outstanding.\n", 252 "bug: %d scsi commands are still outstanding.\n",
251 CMD_STACK_SIZE - stk->top); 253 stk->nelems - stk->top);
252 } 254 }
253 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; 255 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
254 256
255 pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); 257 pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
256 stk->pool = NULL; 258 stk->pool = NULL;
257 cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); 259 cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
260 kfree(stk->elem);
261 stk->elem = NULL;
258} 262}
259 263
260#if 0 264#if 0
@@ -859,6 +863,7 @@ cciss_scsi_detect(ctlr_info_t *h)
859 sh->io_port = 0; // good enough? FIXME, 863 sh->io_port = 0; // good enough? FIXME,
860 sh->n_io_port = 0; // I don't think we use these two... 864 sh->n_io_port = 0; // I don't think we use these two...
861 sh->this_id = SELF_SCSI_ID; 865 sh->this_id = SELF_SCSI_ID;
866 sh->can_queue = cciss_tape_cmds;
862 sh->sg_tablesize = h->maxsgentries; 867 sh->sg_tablesize = h->maxsgentries;
863 sh->max_cmd_len = MAX_COMMAND_SIZE; 868 sh->max_cmd_len = MAX_COMMAND_SIZE;
864 869
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
index 6d5822fe851a..e71d986727ca 100644
--- a/drivers/block/cciss_scsi.h
+++ b/drivers/block/cciss_scsi.h
@@ -36,13 +36,9 @@
36 addressible natively, and may in fact turn 36 addressible natively, and may in fact turn
37 out to be not scsi at all. */ 37 out to be not scsi at all. */
38 38
39#define SCSI_CCISS_CAN_QUEUE 2
40 39
41/* 40/*
42 41
43Note, cmd_per_lun could give us some trouble, so I'm setting it very low.
44Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively.
45
46If the upper scsi layer tries to track how many commands we have 42If the upper scsi layer tries to track how many commands we have
47outstanding, it will be operating under the misapprehension that it is 43outstanding, it will be operating under the misapprehension that it is
48the only one sending us requests. We also have the block interface, 44the only one sending us requests. We also have the block interface,
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index c6828b68d77b..09ef9a878ef0 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -28,7 +28,7 @@
28#include "drbd_int.h" 28#include "drbd_int.h"
29#include "drbd_wrappers.h" 29#include "drbd_wrappers.h"
30 30
31/* We maintain a trivial check sum in our on disk activity log. 31/* We maintain a trivial checksum in our on disk activity log.
32 * With that we can ensure correct operation even when the storage 32 * With that we can ensure correct operation even when the storage
33 * device might do a partial (last) sector write while losing power. 33 * device might do a partial (last) sector write while losing power.
34 */ 34 */
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 76210ba401ac..f440a02dfdb1 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -74,7 +74,7 @@
74 * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage 74 * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
75 * seems excessive. 75 * seems excessive.
76 * 76 *
77 * We plan to reduce the amount of in-core bitmap pages by pageing them in 77 * We plan to reduce the amount of in-core bitmap pages by paging them in
78 * and out against their on-disk location as necessary, but need to make 78 * and out against their on-disk location as necessary, but need to make
79 * sure we don't cause too much meta data IO, and must not deadlock in 79 * sure we don't cause too much meta data IO, and must not deadlock in
80 * tight memory situations. This needs some more work. 80 * tight memory situations. This needs some more work.
@@ -200,7 +200,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
200 * we if bits have been cleared since last IO. */ 200 * we if bits have been cleared since last IO. */
201#define BM_PAGE_LAZY_WRITEOUT 28 201#define BM_PAGE_LAZY_WRITEOUT 28
202 202
203/* store_page_idx uses non-atomic assingment. It is only used directly after 203/* store_page_idx uses non-atomic assignment. It is only used directly after
204 * allocating the page. All other bm_set_page_* and bm_clear_page_* need to 204 * allocating the page. All other bm_set_page_* and bm_clear_page_* need to
205 * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap 205 * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
206 * changes) may happen from various contexts, and wait_on_bit/wake_up_bit 206 * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
@@ -318,7 +318,7 @@ static void bm_unmap(unsigned long *p_addr)
318/* word offset from start of bitmap to word number _in_page_ 318/* word offset from start of bitmap to word number _in_page_
319 * modulo longs per page 319 * modulo longs per page
320#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 320#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
321 hm, well, Philipp thinks gcc might not optimze the % into & (... - 1) 321 hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
322 so do it explicitly: 322 so do it explicitly:
323 */ 323 */
324#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) 324#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d871b14ed5a1..ef2ceed3be4b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -700,7 +700,7 @@ struct drbd_request {
700 * see drbd_endio_pri(). */ 700 * see drbd_endio_pri(). */
701 struct bio *private_bio; 701 struct bio *private_bio;
702 702
703 struct hlist_node colision; 703 struct hlist_node collision;
704 sector_t sector; 704 sector_t sector;
705 unsigned int size; 705 unsigned int size;
706 unsigned int epoch; /* barrier_nr */ 706 unsigned int epoch; /* barrier_nr */
@@ -766,7 +766,7 @@ struct digest_info {
766 766
767struct drbd_epoch_entry { 767struct drbd_epoch_entry {
768 struct drbd_work w; 768 struct drbd_work w;
769 struct hlist_node colision; 769 struct hlist_node collision;
770 struct drbd_epoch *epoch; /* for writes */ 770 struct drbd_epoch *epoch; /* for writes */
771 struct drbd_conf *mdev; 771 struct drbd_conf *mdev;
772 struct page *pages; 772 struct page *pages;
@@ -1129,6 +1129,8 @@ struct drbd_conf {
1129 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 1129 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
1130 int rs_planed; /* resync sectors already planned */ 1130 int rs_planed; /* resync sectors already planned */
1131 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 1131 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
1132 int peer_max_bio_size;
1133 int local_max_bio_size;
1132}; 1134};
1133 1135
1134static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1136static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1218,8 +1220,6 @@ extern void drbd_free_resources(struct drbd_conf *mdev);
1218extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, 1220extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
1219 unsigned int set_size); 1221 unsigned int set_size);
1220extern void tl_clear(struct drbd_conf *mdev); 1222extern void tl_clear(struct drbd_conf *mdev);
1221enum drbd_req_event;
1222extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
1223extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); 1223extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
1224extern void drbd_free_sock(struct drbd_conf *mdev); 1224extern void drbd_free_sock(struct drbd_conf *mdev);
1225extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, 1225extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
@@ -1434,6 +1434,7 @@ struct bm_extent {
1434 * hash table. */ 1434 * hash table. */
1435#define HT_SHIFT 8 1435#define HT_SHIFT 8
1436#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) 1436#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
1437#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */
1437 1438
1438#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ 1439#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
1439 1440
@@ -1518,9 +1519,9 @@ extern void drbd_resume_io(struct drbd_conf *mdev);
1518extern char *ppsize(char *buf, unsigned long long size); 1519extern char *ppsize(char *buf, unsigned long long size);
1519extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); 1520extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
1520enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1521enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
1521extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); 1522extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
1522extern void resync_after_online_grow(struct drbd_conf *); 1523extern void resync_after_online_grow(struct drbd_conf *);
1523extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1524extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
1524extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, 1525extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
1525 enum drbd_role new_role, 1526 enum drbd_role new_role,
1526 int force); 1527 int force);
@@ -1828,6 +1829,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1828 if (!forcedetach) { 1829 if (!forcedetach) {
1829 if (__ratelimit(&drbd_ratelimit_state)) 1830 if (__ratelimit(&drbd_ratelimit_state))
1830 dev_err(DEV, "Local IO failed in %s.\n", where); 1831 dev_err(DEV, "Local IO failed in %s.\n", where);
1832 if (mdev->state.disk > D_INCONSISTENT)
1833 _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
1831 break; 1834 break;
1832 } 1835 }
1833 /* NOTE fall through to detach case if forcedetach set */ 1836 /* NOTE fall through to detach case if forcedetach set */
@@ -2153,6 +2156,10 @@ static inline int get_net_conf(struct drbd_conf *mdev)
2153static inline void put_ldev(struct drbd_conf *mdev) 2156static inline void put_ldev(struct drbd_conf *mdev)
2154{ 2157{
2155 int i = atomic_dec_return(&mdev->local_cnt); 2158 int i = atomic_dec_return(&mdev->local_cnt);
2159
2160 /* This may be called from some endio handler,
2161 * so we must not sleep here. */
2162
2156 __release(local); 2163 __release(local);
2157 D_ASSERT(i >= 0); 2164 D_ASSERT(i >= 0);
2158 if (i == 0) { 2165 if (i == 0) {
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 5b525c179f39..0358e55356c8 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -745,6 +745,9 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
745 mdev->agreed_pro_version < 88) 745 mdev->agreed_pro_version < 88)
746 rv = SS_NOT_SUPPORTED; 746 rv = SS_NOT_SUPPORTED;
747 747
748 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
749 rv = SS_CONNECTED_OUTDATES;
750
748 return rv; 751 return rv;
749} 752}
750 753
@@ -1565,6 +1568,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1565 put_ldev(mdev); 1568 put_ldev(mdev);
1566 } 1569 }
1567 1570
1571 /* Notify peer that I had a local IO error, and did not detached.. */
1572 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1573 drbd_send_state(mdev);
1574
1568 /* Disks got bigger while they were detached */ 1575 /* Disks got bigger while they were detached */
1569 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && 1576 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1570 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) { 1577 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
@@ -2064,7 +2071,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2064{ 2071{
2065 struct p_sizes p; 2072 struct p_sizes p;
2066 sector_t d_size, u_size; 2073 sector_t d_size, u_size;
2067 int q_order_type; 2074 int q_order_type, max_bio_size;
2068 int ok; 2075 int ok;
2069 2076
2070 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 2077 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -2072,17 +2079,20 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2072 d_size = drbd_get_max_capacity(mdev->ldev); 2079 d_size = drbd_get_max_capacity(mdev->ldev);
2073 u_size = mdev->ldev->dc.disk_size; 2080 u_size = mdev->ldev->dc.disk_size;
2074 q_order_type = drbd_queue_order_type(mdev); 2081 q_order_type = drbd_queue_order_type(mdev);
2082 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2083 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
2075 put_ldev(mdev); 2084 put_ldev(mdev);
2076 } else { 2085 } else {
2077 d_size = 0; 2086 d_size = 0;
2078 u_size = 0; 2087 u_size = 0;
2079 q_order_type = QUEUE_ORDERED_NONE; 2088 q_order_type = QUEUE_ORDERED_NONE;
2089 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
2080 } 2090 }
2081 2091
2082 p.d_size = cpu_to_be64(d_size); 2092 p.d_size = cpu_to_be64(d_size);
2083 p.u_size = cpu_to_be64(u_size); 2093 p.u_size = cpu_to_be64(u_size);
2084 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); 2094 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
2085 p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9); 2095 p.max_bio_size = cpu_to_be32(max_bio_size);
2086 p.queue_order_type = cpu_to_be16(q_order_type); 2096 p.queue_order_type = cpu_to_be16(q_order_type);
2087 p.dds_flags = cpu_to_be16(flags); 2097 p.dds_flags = cpu_to_be16(flags);
2088 2098
@@ -2722,7 +2732,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2722 2732
2723 /* double check digest, sometimes buffers have been modified in flight. */ 2733 /* double check digest, sometimes buffers have been modified in flight. */
2724 if (dgs > 0 && dgs <= 64) { 2734 if (dgs > 0 && dgs <= 64) {
2725 /* 64 byte, 512 bit, is the larges digest size 2735 /* 64 byte, 512 bit, is the largest digest size
2726 * currently supported in kernel crypto. */ 2736 * currently supported in kernel crypto. */
2727 unsigned char digest[64]; 2737 unsigned char digest[64];
2728 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); 2738 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
@@ -3041,6 +3051,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
3041 mdev->agreed_pro_version = PRO_VERSION_MAX; 3051 mdev->agreed_pro_version = PRO_VERSION_MAX;
3042 mdev->write_ordering = WO_bdev_flush; 3052 mdev->write_ordering = WO_bdev_flush;
3043 mdev->resync_wenr = LC_FREE; 3053 mdev->resync_wenr = LC_FREE;
3054 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3055 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3044} 3056}
3045 3057
3046void drbd_mdev_cleanup(struct drbd_conf *mdev) 3058void drbd_mdev_cleanup(struct drbd_conf *mdev)
@@ -3275,7 +3287,7 @@ static void drbd_delete_device(unsigned int minor)
3275 3287
3276 drbd_release_ee_lists(mdev); 3288 drbd_release_ee_lists(mdev);
3277 3289
3278 /* should be free'd on disconnect? */ 3290 /* should be freed on disconnect? */
3279 kfree(mdev->ee_hash); 3291 kfree(mdev->ee_hash);
3280 /* 3292 /*
3281 mdev->ee_hash_s = 0; 3293 mdev->ee_hash_s = 0;
@@ -3415,7 +3427,9 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3415 q->backing_dev_info.congested_data = mdev; 3427 q->backing_dev_info.congested_data = mdev;
3416 3428
3417 blk_queue_make_request(q, drbd_make_request); 3429 blk_queue_make_request(q, drbd_make_request);
3418 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9); 3430 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3431 This triggers a max_bio_size message upon first attach or connect */
3432 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
3419 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3433 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3420 blk_queue_merge_bvec(q, drbd_merge_bvec); 3434 blk_queue_merge_bvec(q, drbd_merge_bvec);
3421 q->queue_lock = &mdev->req_lock; 3435 q->queue_lock = &mdev->req_lock;
@@ -3627,7 +3641,8 @@ struct meta_data_on_disk {
3627 /* `-- act_log->nr_elements <-- sync_conf.al_extents */ 3641 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3628 u32 bm_offset; /* offset to the bitmap, from here */ 3642 u32 bm_offset; /* offset to the bitmap, from here */
3629 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ 3643 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3630 u32 reserved_u32[4]; 3644 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3645 u32 reserved_u32[3];
3631 3646
3632} __packed; 3647} __packed;
3633 3648
@@ -3668,6 +3683,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
3668 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid); 3683 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3669 3684
3670 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); 3685 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3686 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
3671 3687
3672 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); 3688 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3673 sector = mdev->ldev->md.md_offset; 3689 sector = mdev->ldev->md.md_offset;
@@ -3751,6 +3767,15 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3751 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); 3767 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3752 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); 3768 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3753 3769
3770 spin_lock_irq(&mdev->req_lock);
3771 if (mdev->state.conn < C_CONNECTED) {
3772 int peer;
3773 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3774 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3775 mdev->peer_max_bio_size = peer;
3776 }
3777 spin_unlock_irq(&mdev->req_lock);
3778
3754 if (mdev->sync_conf.al_extents < 7) 3779 if (mdev->sync_conf.al_extents < 7)
3755 mdev->sync_conf.al_extents = 127; 3780 mdev->sync_conf.al_extents = 127;
3756 3781
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 03b29f78a37d..515bcd948a43 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -272,9 +272,28 @@ static int _try_outdate_peer_async(void *data)
272{ 272{
273 struct drbd_conf *mdev = (struct drbd_conf *)data; 273 struct drbd_conf *mdev = (struct drbd_conf *)data;
274 enum drbd_disk_state nps; 274 enum drbd_disk_state nps;
275 union drbd_state ns;
275 276
276 nps = drbd_try_outdate_peer(mdev); 277 nps = drbd_try_outdate_peer(mdev);
277 drbd_request_state(mdev, NS(pdsk, nps)); 278
279 /* Not using
280 drbd_request_state(mdev, NS(pdsk, nps));
281 here, because we might were able to re-establish the connection
282 in the meantime. This can only partially be solved in the state's
283 engine is_valid_state() and is_valid_state_transition()
284 functions.
285
286 nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
287 pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
288 therefore we have to have the pre state change check here.
289 */
290 spin_lock_irq(&mdev->req_lock);
291 ns = mdev->state;
292 if (ns.conn < C_WF_REPORT_PARAMS) {
293 ns.pdsk = nps;
294 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
295 }
296 spin_unlock_irq(&mdev->req_lock);
278 297
279 return 0; 298 return 0;
280} 299}
@@ -577,7 +596,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
577 * Returns 0 on success, negative return values indicate errors. 596 * Returns 0 on success, negative return values indicate errors.
578 * You should call drbd_md_sync() after calling this function. 597 * You should call drbd_md_sync() after calling this function.
579 */ 598 */
580enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 599enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
581{ 600{
582 sector_t prev_first_sect, prev_size; /* previous meta location */ 601 sector_t prev_first_sect, prev_size; /* previous meta location */
583 sector_t la_size; 602 sector_t la_size;
@@ -773,30 +792,78 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
773 return 0; 792 return 0;
774} 793}
775 794
776void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local) 795static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
777{ 796{
778 struct request_queue * const q = mdev->rq_queue; 797 struct request_queue * const q = mdev->rq_queue;
779 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 798 int max_hw_sectors = max_bio_size >> 9;
780 int max_segments = mdev->ldev->dc.max_bio_bvecs; 799 int max_segments = 0;
781 int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); 800
801 if (get_ldev_if_state(mdev, D_ATTACHING)) {
802 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
803
804 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
805 max_segments = mdev->ldev->dc.max_bio_bvecs;
806 put_ldev(mdev);
807 }
782 808
783 blk_queue_logical_block_size(q, 512); 809 blk_queue_logical_block_size(q, 512);
784 blk_queue_max_hw_sectors(q, max_hw_sectors); 810 blk_queue_max_hw_sectors(q, max_hw_sectors);
785 /* This is the workaround for "bio would need to, but cannot, be split" */ 811 /* This is the workaround for "bio would need to, but cannot, be split" */
786 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 812 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
787 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 813 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
788 blk_queue_stack_limits(q, b);
789 814
790 dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9); 815 if (get_ldev_if_state(mdev, D_ATTACHING)) {
816 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
817
818 blk_queue_stack_limits(q, b);
791 819
792 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 820 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
793 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 821 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
794 q->backing_dev_info.ra_pages, 822 q->backing_dev_info.ra_pages,
795 b->backing_dev_info.ra_pages); 823 b->backing_dev_info.ra_pages);
796 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 824 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
825 }
826 put_ldev(mdev);
797 } 827 }
798} 828}
799 829
830void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
831{
832 int now, new, local, peer;
833
834 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
835 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
836 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
837
838 if (get_ldev_if_state(mdev, D_ATTACHING)) {
839 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
840 mdev->local_max_bio_size = local;
841 put_ldev(mdev);
842 }
843
844 /* We may ignore peer limits if the peer is modern enough.
845 Because new from 8.3.8 onwards the peer can use multiple
846 BIOs for a single peer_request */
847 if (mdev->state.conn >= C_CONNECTED) {
848 if (mdev->agreed_pro_version < 94)
849 peer = mdev->peer_max_bio_size;
850 else if (mdev->agreed_pro_version == 94)
851 peer = DRBD_MAX_SIZE_H80_PACKET;
852 else /* drbd 8.3.8 onwards */
853 peer = DRBD_MAX_BIO_SIZE;
854 }
855
856 new = min_t(int, local, peer);
857
858 if (mdev->state.role == R_PRIMARY && new < now)
859 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
860
861 if (new != now)
862 dev_info(DEV, "max BIO size = %u\n", new);
863
864 drbd_setup_queue_param(mdev, new);
865}
866
800/* serialize deconfig (worker exiting, doing cleanup) 867/* serialize deconfig (worker exiting, doing cleanup)
801 * and reconfig (drbdsetup disk, drbdsetup net) 868 * and reconfig (drbdsetup disk, drbdsetup net)
802 * 869 *
@@ -865,7 +932,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
865 struct block_device *bdev; 932 struct block_device *bdev;
866 struct lru_cache *resync_lru = NULL; 933 struct lru_cache *resync_lru = NULL;
867 union drbd_state ns, os; 934 union drbd_state ns, os;
868 unsigned int max_bio_size;
869 enum drbd_state_rv rv; 935 enum drbd_state_rv rv;
870 int cp_discovered = 0; 936 int cp_discovered = 0;
871 int logical_block_size; 937 int logical_block_size;
@@ -1117,20 +1183,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1117 mdev->read_cnt = 0; 1183 mdev->read_cnt = 0;
1118 mdev->writ_cnt = 0; 1184 mdev->writ_cnt = 0;
1119 1185
1120 max_bio_size = DRBD_MAX_BIO_SIZE; 1186 drbd_reconsider_max_bio_size(mdev);
1121 if (mdev->state.conn == C_CONNECTED) {
1122 /* We are Primary, Connected, and now attach a new local
1123 * backing store. We must not increase the user visible maximum
1124 * bio size on this device to something the peer may not be
1125 * able to handle. */
1126 if (mdev->agreed_pro_version < 94)
1127 max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
1128 else if (mdev->agreed_pro_version == 94)
1129 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
1130 /* else: drbd 8.3.9 and later, stay with default */
1131 }
1132
1133 drbd_setup_queue_param(mdev, max_bio_size);
1134 1187
1135 /* If I am currently not R_PRIMARY, 1188 /* If I am currently not R_PRIMARY,
1136 * but meta data primary indicator is set, 1189 * but meta data primary indicator is set,
@@ -1152,7 +1205,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1152 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1205 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1153 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1206 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1154 1207
1155 dd = drbd_determin_dev_size(mdev, 0); 1208 dd = drbd_determine_dev_size(mdev, 0);
1156 if (dd == dev_size_error) { 1209 if (dd == dev_size_error) {
1157 retcode = ERR_NOMEM_BITMAP; 1210 retcode = ERR_NOMEM_BITMAP;
1158 goto force_diskless_dec; 1211 goto force_diskless_dec;
@@ -1281,11 +1334,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1281static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1334static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1282 struct drbd_nl_cfg_reply *reply) 1335 struct drbd_nl_cfg_reply *reply)
1283{ 1336{
1337 enum drbd_ret_code retcode;
1338 int ret;
1284 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ 1339 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1285 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1340 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1286 if (mdev->state.disk == D_DISKLESS) 1341 /* D_FAILED will transition to DISKLESS. */
1287 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 1342 ret = wait_event_interruptible(mdev->misc_wait,
1343 mdev->state.disk != D_FAILED);
1288 drbd_resume_io(mdev); 1344 drbd_resume_io(mdev);
1345 if ((int)retcode == (int)SS_IS_DISKLESS)
1346 retcode = SS_NOTHING_TO_DO;
1347 if (ret)
1348 retcode = ERR_INTR;
1349 reply->ret_code = retcode;
1289 return 0; 1350 return 0;
1290} 1351}
1291 1352
@@ -1658,7 +1719,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1658 1719
1659 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1720 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1660 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1721 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1661 dd = drbd_determin_dev_size(mdev, ddsf); 1722 dd = drbd_determine_dev_size(mdev, ddsf);
1662 drbd_md_sync(mdev); 1723 drbd_md_sync(mdev);
1663 put_ldev(mdev); 1724 put_ldev(mdev);
1664 if (dd == dev_size_error) { 1725 if (dd == dev_size_error) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index fd26666c0b08..25d32c5aa50a 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -333,7 +333,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
333 if (!page) 333 if (!page)
334 goto fail; 334 goto fail;
335 335
336 INIT_HLIST_NODE(&e->colision); 336 INIT_HLIST_NODE(&e->collision);
337 e->epoch = NULL; 337 e->epoch = NULL;
338 e->mdev = mdev; 338 e->mdev = mdev;
339 e->pages = page; 339 e->pages = page;
@@ -356,7 +356,7 @@ void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int i
356 kfree(e->digest); 356 kfree(e->digest);
357 drbd_pp_free(mdev, e->pages, is_net); 357 drbd_pp_free(mdev, e->pages, is_net);
358 D_ASSERT(atomic_read(&e->pending_bios) == 0); 358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
359 D_ASSERT(hlist_unhashed(&e->colision)); 359 D_ASSERT(hlist_unhashed(&e->collision));
360 mempool_free(e, drbd_ee_mempool); 360 mempool_free(e, drbd_ee_mempool);
361} 361}
362 362
@@ -787,7 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev)
787 } 787 }
788 788
789 if (sock && msock) { 789 if (sock && msock) {
790 schedule_timeout_interruptible(HZ / 10); 790 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
791 ok = drbd_socket_okay(mdev, &sock); 791 ok = drbd_socket_okay(mdev, &sock);
792 ok = drbd_socket_okay(mdev, &msock) && ok; 792 ok = drbd_socket_okay(mdev, &msock) && ok;
793 if (ok) 793 if (ok)
@@ -899,11 +899,6 @@ retry:
899 899
900 drbd_thread_start(&mdev->asender); 900 drbd_thread_start(&mdev->asender);
901 901
902 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
903 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
904 put_ldev(mdev);
905 }
906
907 if (drbd_send_protocol(mdev) == -1) 902 if (drbd_send_protocol(mdev) == -1)
908 return -1; 903 return -1;
909 drbd_send_sync_param(mdev, &mdev->sync_conf); 904 drbd_send_sync_param(mdev, &mdev->sync_conf);
@@ -1418,7 +1413,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
1418 sector_t sector = e->sector; 1413 sector_t sector = e->sector;
1419 int ok; 1414 int ok;
1420 1415
1421 D_ASSERT(hlist_unhashed(&e->colision)); 1416 D_ASSERT(hlist_unhashed(&e->collision));
1422 1417
1423 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1418 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1424 drbd_set_in_sync(mdev, sector, e->size); 1419 drbd_set_in_sync(mdev, sector, e->size);
@@ -1487,7 +1482,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
1487 return false; 1482 return false;
1488 } 1483 }
1489 1484
1490 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid 1485 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1491 * special casing it there for the various failure cases. 1486 * special casing it there for the various failure cases.
1492 * still no race with drbd_fail_pending_reads */ 1487 * still no race with drbd_fail_pending_reads */
1493 ok = recv_dless_read(mdev, req, sector, data_size); 1488 ok = recv_dless_read(mdev, req, sector, data_size);
@@ -1558,11 +1553,11 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1558 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1553 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1559 if (mdev->net_conf->two_primaries) { 1554 if (mdev->net_conf->two_primaries) {
1560 spin_lock_irq(&mdev->req_lock); 1555 spin_lock_irq(&mdev->req_lock);
1561 D_ASSERT(!hlist_unhashed(&e->colision)); 1556 D_ASSERT(!hlist_unhashed(&e->collision));
1562 hlist_del_init(&e->colision); 1557 hlist_del_init(&e->collision);
1563 spin_unlock_irq(&mdev->req_lock); 1558 spin_unlock_irq(&mdev->req_lock);
1564 } else { 1559 } else {
1565 D_ASSERT(hlist_unhashed(&e->colision)); 1560 D_ASSERT(hlist_unhashed(&e->collision));
1566 } 1561 }
1567 1562
1568 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1563 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
@@ -1579,8 +1574,8 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
1579 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1574 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1580 1575
1581 spin_lock_irq(&mdev->req_lock); 1576 spin_lock_irq(&mdev->req_lock);
1582 D_ASSERT(!hlist_unhashed(&e->colision)); 1577 D_ASSERT(!hlist_unhashed(&e->collision));
1583 hlist_del_init(&e->colision); 1578 hlist_del_init(&e->collision);
1584 spin_unlock_irq(&mdev->req_lock); 1579 spin_unlock_irq(&mdev->req_lock);
1585 1580
1586 dec_unacked(mdev); 1581 dec_unacked(mdev);
@@ -1755,7 +1750,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1755 1750
1756 spin_lock_irq(&mdev->req_lock); 1751 spin_lock_irq(&mdev->req_lock);
1757 1752
1758 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector)); 1753 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
1759 1754
1760#define OVERLAPS overlaps(i->sector, i->size, sector, size) 1755#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1761 slot = tl_hash_slot(mdev, sector); 1756 slot = tl_hash_slot(mdev, sector);
@@ -1765,7 +1760,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1765 int have_conflict = 0; 1760 int have_conflict = 0;
1766 prepare_to_wait(&mdev->misc_wait, &wait, 1761 prepare_to_wait(&mdev->misc_wait, &wait,
1767 TASK_INTERRUPTIBLE); 1762 TASK_INTERRUPTIBLE);
1768 hlist_for_each_entry(i, n, slot, colision) { 1763 hlist_for_each_entry(i, n, slot, collision) {
1769 if (OVERLAPS) { 1764 if (OVERLAPS) {
1770 /* only ALERT on first iteration, 1765 /* only ALERT on first iteration,
1771 * we may be woken up early... */ 1766 * we may be woken up early... */
@@ -1804,7 +1799,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1804 } 1799 }
1805 1800
1806 if (signal_pending(current)) { 1801 if (signal_pending(current)) {
1807 hlist_del_init(&e->colision); 1802 hlist_del_init(&e->collision);
1808 1803
1809 spin_unlock_irq(&mdev->req_lock); 1804 spin_unlock_irq(&mdev->req_lock);
1810 1805
@@ -1862,7 +1857,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1862 dev_err(DEV, "submit failed, triggering re-connect\n"); 1857 dev_err(DEV, "submit failed, triggering re-connect\n");
1863 spin_lock_irq(&mdev->req_lock); 1858 spin_lock_irq(&mdev->req_lock);
1864 list_del(&e->w.list); 1859 list_del(&e->w.list);
1865 hlist_del_init(&e->colision); 1860 hlist_del_init(&e->collision);
1866 spin_unlock_irq(&mdev->req_lock); 1861 spin_unlock_irq(&mdev->req_lock);
1867 if (e->flags & EE_CALL_AL_COMPLETE_IO) 1862 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1868 drbd_al_complete_io(mdev, e->sector); 1863 drbd_al_complete_io(mdev, e->sector);
@@ -2916,12 +2911,6 @@ disconnect:
2916 return false; 2911 return false;
2917} 2912}
2918 2913
2919static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2920{
2921 /* sorry, we currently have no working implementation
2922 * of distributed TCQ */
2923}
2924
2925/* warn if the arguments differ by more than 12.5% */ 2914/* warn if the arguments differ by more than 12.5% */
2926static void warn_if_differ_considerably(struct drbd_conf *mdev, 2915static void warn_if_differ_considerably(struct drbd_conf *mdev,
2927 const char *s, sector_t a, sector_t b) 2916 const char *s, sector_t a, sector_t b)
@@ -2939,7 +2928,6 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2939{ 2928{
2940 struct p_sizes *p = &mdev->data.rbuf.sizes; 2929 struct p_sizes *p = &mdev->data.rbuf.sizes;
2941 enum determine_dev_size dd = unchanged; 2930 enum determine_dev_size dd = unchanged;
2942 unsigned int max_bio_size;
2943 sector_t p_size, p_usize, my_usize; 2931 sector_t p_size, p_usize, my_usize;
2944 int ldsc = 0; /* local disk size changed */ 2932 int ldsc = 0; /* local disk size changed */
2945 enum dds_flags ddsf; 2933 enum dds_flags ddsf;
@@ -2994,7 +2982,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2994 2982
2995 ddsf = be16_to_cpu(p->dds_flags); 2983 ddsf = be16_to_cpu(p->dds_flags);
2996 if (get_ldev(mdev)) { 2984 if (get_ldev(mdev)) {
2997 dd = drbd_determin_dev_size(mdev, ddsf); 2985 dd = drbd_determine_dev_size(mdev, ddsf);
2998 put_ldev(mdev); 2986 put_ldev(mdev);
2999 if (dd == dev_size_error) 2987 if (dd == dev_size_error)
3000 return false; 2988 return false;
@@ -3004,23 +2992,15 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3004 drbd_set_my_capacity(mdev, p_size); 2992 drbd_set_my_capacity(mdev, p_size);
3005 } 2993 }
3006 2994
2995 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
2996 drbd_reconsider_max_bio_size(mdev);
2997
3007 if (get_ldev(mdev)) { 2998 if (get_ldev(mdev)) {
3008 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { 2999 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3009 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 3000 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3010 ldsc = 1; 3001 ldsc = 1;
3011 } 3002 }
3012 3003
3013 if (mdev->agreed_pro_version < 94)
3014 max_bio_size = be32_to_cpu(p->max_bio_size);
3015 else if (mdev->agreed_pro_version == 94)
3016 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
3017 else /* drbd 8.3.8 onwards */
3018 max_bio_size = DRBD_MAX_BIO_SIZE;
3019
3020 if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
3021 drbd_setup_queue_param(mdev, max_bio_size);
3022
3023 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3024 put_ldev(mdev); 3004 put_ldev(mdev);
3025 } 3005 }
3026 3006
@@ -4275,7 +4255,7 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4275 struct hlist_node *n; 4255 struct hlist_node *n;
4276 struct drbd_request *req; 4256 struct drbd_request *req;
4277 4257
4278 hlist_for_each_entry(req, n, slot, colision) { 4258 hlist_for_each_entry(req, n, slot, collision) {
4279 if ((unsigned long)req == (unsigned long)id) { 4259 if ((unsigned long)req == (unsigned long)id) {
4280 if (req->sector != sector) { 4260 if (req->sector != sector) {
4281 dev_err(DEV, "_ack_id_to_req: found req %p but it has " 4261 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
@@ -4554,6 +4534,7 @@ int drbd_asender(struct drbd_thread *thi)
4554 int received = 0; 4534 int received = 0;
4555 int expect = sizeof(struct p_header80); 4535 int expect = sizeof(struct p_header80);
4556 int empty; 4536 int empty;
4537 int ping_timeout_active = 0;
4557 4538
4558 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); 4539 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4559 4540
@@ -4566,6 +4547,7 @@ int drbd_asender(struct drbd_thread *thi)
4566 ERR_IF(!drbd_send_ping(mdev)) goto reconnect; 4547 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4567 mdev->meta.socket->sk->sk_rcvtimeo = 4548 mdev->meta.socket->sk->sk_rcvtimeo =
4568 mdev->net_conf->ping_timeo*HZ/10; 4549 mdev->net_conf->ping_timeo*HZ/10;
4550 ping_timeout_active = 1;
4569 } 4551 }
4570 4552
4571 /* conditionally cork; 4553 /* conditionally cork;
@@ -4620,8 +4602,7 @@ int drbd_asender(struct drbd_thread *thi)
4620 dev_err(DEV, "meta connection shut down by peer.\n"); 4602 dev_err(DEV, "meta connection shut down by peer.\n");
4621 goto reconnect; 4603 goto reconnect;
4622 } else if (rv == -EAGAIN) { 4604 } else if (rv == -EAGAIN) {
4623 if (mdev->meta.socket->sk->sk_rcvtimeo == 4605 if (ping_timeout_active) {
4624 mdev->net_conf->ping_timeo*HZ/10) {
4625 dev_err(DEV, "PingAck did not arrive in time.\n"); 4606 dev_err(DEV, "PingAck did not arrive in time.\n");
4626 goto reconnect; 4607 goto reconnect;
4627 } 4608 }
@@ -4660,6 +4641,11 @@ int drbd_asender(struct drbd_thread *thi)
4660 if (!cmd->process(mdev, h)) 4641 if (!cmd->process(mdev, h))
4661 goto reconnect; 4642 goto reconnect;
4662 4643
4644 /* the idle_timeout (ping-int)
4645 * has been restored in got_PingAck() */
4646 if (cmd == get_asender_cmd(P_PING_ACK))
4647 ping_timeout_active = 0;
4648
4663 buf = h; 4649 buf = h;
4664 received = 0; 4650 received = 0;
4665 expect = sizeof(struct p_header80); 4651 expect = sizeof(struct p_header80);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 5c0c8be1bb0a..3424d675b769 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -163,7 +163,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
163 * they must have been failed on the spot */ 163 * they must have been failed on the spot */
164#define OVERLAPS overlaps(sector, size, i->sector, i->size) 164#define OVERLAPS overlaps(sector, size, i->sector, i->size)
165 slot = tl_hash_slot(mdev, sector); 165 slot = tl_hash_slot(mdev, sector);
166 hlist_for_each_entry(i, n, slot, colision) { 166 hlist_for_each_entry(i, n, slot, collision) {
167 if (OVERLAPS) { 167 if (OVERLAPS) {
168 dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; " 168 dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
169 "other: %p %llus +%u\n", 169 "other: %p %llus +%u\n",
@@ -187,7 +187,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
187#undef OVERLAPS 187#undef OVERLAPS
188#define OVERLAPS overlaps(sector, size, e->sector, e->size) 188#define OVERLAPS overlaps(sector, size, e->sector, e->size)
189 slot = ee_hash_slot(mdev, req->sector); 189 slot = ee_hash_slot(mdev, req->sector);
190 hlist_for_each_entry(e, n, slot, colision) { 190 hlist_for_each_entry(e, n, slot, collision) {
191 if (OVERLAPS) { 191 if (OVERLAPS) {
192 wake_up(&mdev->misc_wait); 192 wake_up(&mdev->misc_wait);
193 break; 193 break;
@@ -260,8 +260,8 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
260 260
261 /* remove the request from the conflict detection 261 /* remove the request from the conflict detection
262 * respective block_id verification hash */ 262 * respective block_id verification hash */
263 if (!hlist_unhashed(&req->colision)) 263 if (!hlist_unhashed(&req->collision))
264 hlist_del(&req->colision); 264 hlist_del(&req->collision);
265 else 265 else
266 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); 266 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
267 267
@@ -329,7 +329,7 @@ static int _req_conflicts(struct drbd_request *req)
329 struct hlist_node *n; 329 struct hlist_node *n;
330 struct hlist_head *slot; 330 struct hlist_head *slot;
331 331
332 D_ASSERT(hlist_unhashed(&req->colision)); 332 D_ASSERT(hlist_unhashed(&req->collision));
333 333
334 if (!get_net_conf(mdev)) 334 if (!get_net_conf(mdev))
335 return 0; 335 return 0;
@@ -341,7 +341,7 @@ static int _req_conflicts(struct drbd_request *req)
341 341
342#define OVERLAPS overlaps(i->sector, i->size, sector, size) 342#define OVERLAPS overlaps(i->sector, i->size, sector, size)
343 slot = tl_hash_slot(mdev, sector); 343 slot = tl_hash_slot(mdev, sector);
344 hlist_for_each_entry(i, n, slot, colision) { 344 hlist_for_each_entry(i, n, slot, collision) {
345 if (OVERLAPS) { 345 if (OVERLAPS) {
346 dev_alert(DEV, "%s[%u] Concurrent local write detected! " 346 dev_alert(DEV, "%s[%u] Concurrent local write detected! "
347 "[DISCARD L] new: %llus +%u; " 347 "[DISCARD L] new: %llus +%u; "
@@ -359,7 +359,7 @@ static int _req_conflicts(struct drbd_request *req)
359#undef OVERLAPS 359#undef OVERLAPS
360#define OVERLAPS overlaps(e->sector, e->size, sector, size) 360#define OVERLAPS overlaps(e->sector, e->size, sector, size)
361 slot = ee_hash_slot(mdev, sector); 361 slot = ee_hash_slot(mdev, sector);
362 hlist_for_each_entry(e, n, slot, colision) { 362 hlist_for_each_entry(e, n, slot, collision) {
363 if (OVERLAPS) { 363 if (OVERLAPS) {
364 dev_alert(DEV, "%s[%u] Concurrent remote write detected!" 364 dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
365 " [DISCARD L] new: %llus +%u; " 365 " [DISCARD L] new: %llus +%u; "
@@ -491,7 +491,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
491 491
492 /* so we can verify the handle in the answer packet 492 /* so we can verify the handle in the answer packet
493 * corresponding hlist_del is in _req_may_be_done() */ 493 * corresponding hlist_del is in _req_may_be_done() */
494 hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector)); 494 hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
495 495
496 set_bit(UNPLUG_REMOTE, &mdev->flags); 496 set_bit(UNPLUG_REMOTE, &mdev->flags);
497 497
@@ -507,7 +507,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
507 /* assert something? */ 507 /* assert something? */
508 /* from drbd_make_request_common only */ 508 /* from drbd_make_request_common only */
509 509
510 hlist_add_head(&req->colision, tl_hash_slot(mdev, req->sector)); 510 hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector));
511 /* corresponding hlist_del is in _req_may_be_done() */ 511 /* corresponding hlist_del is in _req_may_be_done() */
512 512
513 /* NOTE 513 /* NOTE
@@ -1033,7 +1033,7 @@ fail_conflicting:
1033 err = 0; 1033 err = 0;
1034 1034
1035fail_free_complete: 1035fail_free_complete:
1036 if (rw == WRITE && local) 1036 if (req->rq_state & RQ_IN_ACT_LOG)
1037 drbd_al_complete_io(mdev, sector); 1037 drbd_al_complete_io(mdev, sector);
1038fail_and_free_req: 1038fail_and_free_req:
1039 if (local) { 1039 if (local) {
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 32e2c3e6a813..68a234a5fdc5 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -256,7 +256,7 @@ static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
256 struct hlist_node *n; 256 struct hlist_node *n;
257 struct drbd_request *req; 257 struct drbd_request *req;
258 258
259 hlist_for_each_entry(req, n, slot, colision) { 259 hlist_for_each_entry(req, n, slot, collision) {
260 if ((unsigned long)req == (unsigned long)id) { 260 if ((unsigned long)req == (unsigned long)id) {
261 D_ASSERT(req->sector == sector); 261 D_ASSERT(req->sector == sector);
262 return req; 262 return req;
@@ -291,7 +291,7 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
291 req->epoch = 0; 291 req->epoch = 0;
292 req->sector = bio_src->bi_sector; 292 req->sector = bio_src->bi_sector;
293 req->size = bio_src->bi_size; 293 req->size = bio_src->bi_size;
294 INIT_HLIST_NODE(&req->colision); 294 INIT_HLIST_NODE(&req->collision);
295 INIT_LIST_HEAD(&req->tl_requests); 295 INIT_LIST_HEAD(&req->tl_requests);
296 INIT_LIST_HEAD(&req->w.list); 296 INIT_LIST_HEAD(&req->w.list);
297 } 297 }
@@ -323,6 +323,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
323extern void complete_master_bio(struct drbd_conf *mdev, 323extern void complete_master_bio(struct drbd_conf *mdev,
324 struct bio_and_error *m); 324 struct bio_and_error *m);
325extern void request_timer_fn(unsigned long data); 325extern void request_timer_fn(unsigned long data);
326extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
326 327
327/* use this if you don't want to deal with calling complete_master_bio() 328/* use this if you don't want to deal with calling complete_master_bio()
328 * outside the spinlock, e.g. when walking some list on cleanup. */ 329 * outside the spinlock, e.g. when walking some list on cleanup. */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index f7e6c92f8d03..4d76b06b6b20 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -126,7 +126,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
126 list_del(&e->w.list); /* has been on active_ee or sync_ee */ 126 list_del(&e->w.list); /* has been on active_ee or sync_ee */
127 list_add_tail(&e->w.list, &mdev->done_ee); 127 list_add_tail(&e->w.list, &mdev->done_ee);
128 128
129 /* No hlist_del_init(&e->colision) here, we did not send the Ack yet, 129 /* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
130 * neither did we wake possibly waiting conflicting requests. 130 * neither did we wake possibly waiting conflicting requests.
131 * done from "drbd_process_done_ee" within the appropriate w.cb 131 * done from "drbd_process_done_ee" within the appropriate w.cb
132 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */ 132 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
@@ -297,42 +297,48 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
297 crypto_hash_final(&desc, digest); 297 crypto_hash_final(&desc, digest);
298} 298}
299 299
300static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 300/* TODO merge common code with w_e_end_ov_req */
301int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
301{ 302{
302 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); 303 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
303 int digest_size; 304 int digest_size;
304 void *digest; 305 void *digest;
305 int ok; 306 int ok = 1;
306 307
307 D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef); 308 D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
308 309
309 if (unlikely(cancel)) { 310 if (unlikely(cancel))
310 drbd_free_ee(mdev, e); 311 goto out;
311 return 1;
312 }
313 312
314 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 313 if (likely((e->flags & EE_WAS_ERROR) != 0))
315 digest_size = crypto_hash_digestsize(mdev->csums_tfm); 314 goto out;
316 digest = kmalloc(digest_size, GFP_NOIO);
317 if (digest) {
318 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
319 315
320 inc_rs_pending(mdev); 316 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
321 ok = drbd_send_drequest_csum(mdev, 317 digest = kmalloc(digest_size, GFP_NOIO);
322 e->sector, 318 if (digest) {
323 e->size, 319 sector_t sector = e->sector;
324 digest, 320 unsigned int size = e->size;
325 digest_size, 321 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
326 P_CSUM_RS_REQUEST); 322 /* Free e and pages before send.
327 kfree(digest); 323 * In case we block on congestion, we could otherwise run into
328 } else { 324 * some distributed deadlock, if the other side blocks on
329 dev_err(DEV, "kmalloc() of digest failed.\n"); 325 * congestion as well, because our receiver blocks in
330 ok = 0; 326 * drbd_pp_alloc due to pp_in_use > max_buffers. */
331 } 327 drbd_free_ee(mdev, e);
332 } else 328 e = NULL;
333 ok = 1; 329 inc_rs_pending(mdev);
330 ok = drbd_send_drequest_csum(mdev, sector, size,
331 digest, digest_size,
332 P_CSUM_RS_REQUEST);
333 kfree(digest);
334 } else {
335 dev_err(DEV, "kmalloc() of digest failed.\n");
336 ok = 0;
337 }
334 338
335 drbd_free_ee(mdev, e); 339out:
340 if (e)
341 drbd_free_ee(mdev, e);
336 342
337 if (unlikely(!ok)) 343 if (unlikely(!ok))
338 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); 344 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
@@ -834,7 +840,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
834 const int ratio = 840 const int ratio =
835 (t == 0) ? 0 : 841 (t == 0) ? 0 :
836 (t < 100000) ? ((s*100)/t) : (s/(t/100)); 842 (t < 100000) ? ((s*100)/t) : (s/(t/100));
837 dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; " 843 dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
838 "transferred %luK total %luK\n", 844 "transferred %luK total %luK\n",
839 ratio, 845 ratio,
840 Bit2KB(mdev->rs_same_csum), 846 Bit2KB(mdev->rs_same_csum),
@@ -1071,9 +1077,12 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1071 return ok; 1077 return ok;
1072} 1078}
1073 1079
1080/* TODO merge common code with w_e_send_csum */
1074int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1081int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1075{ 1082{
1076 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); 1083 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1084 sector_t sector = e->sector;
1085 unsigned int size = e->size;
1077 int digest_size; 1086 int digest_size;
1078 void *digest; 1087 void *digest;
1079 int ok = 1; 1088 int ok = 1;
@@ -1093,17 +1102,25 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1093 else 1102 else
1094 memset(digest, 0, digest_size); 1103 memset(digest, 0, digest_size);
1095 1104
1105 /* Free e and pages before send.
1106 * In case we block on congestion, we could otherwise run into
1107 * some distributed deadlock, if the other side blocks on
1108 * congestion as well, because our receiver blocks in
1109 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1110 drbd_free_ee(mdev, e);
1111 e = NULL;
1096 inc_rs_pending(mdev); 1112 inc_rs_pending(mdev);
1097 ok = drbd_send_drequest_csum(mdev, e->sector, e->size, 1113 ok = drbd_send_drequest_csum(mdev, sector, size,
1098 digest, digest_size, P_OV_REPLY); 1114 digest, digest_size,
1115 P_OV_REPLY);
1099 if (!ok) 1116 if (!ok)
1100 dec_rs_pending(mdev); 1117 dec_rs_pending(mdev);
1101 kfree(digest); 1118 kfree(digest);
1102 1119
1103out: 1120out:
1104 drbd_free_ee(mdev, e); 1121 if (e)
1122 drbd_free_ee(mdev, e);
1105 dec_unacked(mdev); 1123 dec_unacked(mdev);
1106
1107 return ok; 1124 return ok;
1108} 1125}
1109 1126
@@ -1122,8 +1139,10 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1122{ 1139{
1123 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); 1140 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1124 struct digest_info *di; 1141 struct digest_info *di;
1125 int digest_size;
1126 void *digest; 1142 void *digest;
1143 sector_t sector = e->sector;
1144 unsigned int size = e->size;
1145 int digest_size;
1127 int ok, eq = 0; 1146 int ok, eq = 0;
1128 1147
1129 if (unlikely(cancel)) { 1148 if (unlikely(cancel)) {
@@ -1153,16 +1172,21 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1153 } 1172 }
1154 } 1173 }
1155 1174
1156 dec_unacked(mdev); 1175 /* Free e and pages before send.
1176 * In case we block on congestion, we could otherwise run into
1177 * some distributed deadlock, if the other side blocks on
1178 * congestion as well, because our receiver blocks in
1179 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1180 drbd_free_ee(mdev, e);
1157 if (!eq) 1181 if (!eq)
1158 drbd_ov_oos_found(mdev, e->sector, e->size); 1182 drbd_ov_oos_found(mdev, sector, size);
1159 else 1183 else
1160 ov_oos_print(mdev); 1184 ov_oos_print(mdev);
1161 1185
1162 ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size, 1186 ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
1163 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); 1187 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1164 1188
1165 drbd_free_ee(mdev, e); 1189 dec_unacked(mdev);
1166 1190
1167 --mdev->ov_left; 1191 --mdev->ov_left;
1168 1192
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a076a14ca72d..c59a672a3de0 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1658,7 +1658,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1658 struct kobject *kobj; 1658 struct kobject *kobj;
1659 1659
1660 mutex_lock(&loop_devices_mutex); 1660 mutex_lock(&loop_devices_mutex);
1661 lo = loop_init_one(dev & MINORMASK); 1661 lo = loop_init_one(MINOR(dev) >> part_shift);
1662 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); 1662 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
1663 mutex_unlock(&loop_devices_mutex); 1663 mutex_unlock(&loop_devices_mutex);
1664 1664
@@ -1691,15 +1691,18 @@ static int __init loop_init(void)
1691 if (max_part > 0) 1691 if (max_part > 0)
1692 part_shift = fls(max_part); 1692 part_shift = fls(max_part);
1693 1693
1694 if ((1UL << part_shift) > DISK_MAX_PARTS)
1695 return -EINVAL;
1696
1694 if (max_loop > 1UL << (MINORBITS - part_shift)) 1697 if (max_loop > 1UL << (MINORBITS - part_shift))
1695 return -EINVAL; 1698 return -EINVAL;
1696 1699
1697 if (max_loop) { 1700 if (max_loop) {
1698 nr = max_loop; 1701 nr = max_loop;
1699 range = max_loop; 1702 range = max_loop << part_shift;
1700 } else { 1703 } else {
1701 nr = 8; 1704 nr = 8;
1702 range = 1UL << (MINORBITS - part_shift); 1705 range = 1UL << MINORBITS;
1703 } 1706 }
1704 1707
1705 if (register_blkdev(LOOP_MAJOR, "loop")) 1708 if (register_blkdev(LOOP_MAJOR, "loop"))
@@ -1738,7 +1741,7 @@ static void __exit loop_exit(void)
1738 unsigned long range; 1741 unsigned long range;
1739 struct loop_device *lo, *next; 1742 struct loop_device *lo, *next;
1740 1743
1741 range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift); 1744 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1742 1745
1743 list_for_each_entry_safe(lo, next, &loop_devices, lo_list) 1746 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
1744 loop_del_one(lo); 1747 loop_del_one(lo);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 8690e31d9932..a0aabd904a51 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -320,6 +320,8 @@ static void pcd_init_units(void)
320 disk->first_minor = unit; 320 disk->first_minor = unit;
321 strcpy(disk->disk_name, cd->name); /* umm... */ 321 strcpy(disk->disk_name, cd->name); /* umm... */
322 disk->fops = &pcd_bdops; 322 disk->fops = &pcd_bdops;
323 disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
324 disk->events = DISK_EVENT_MEDIA_CHANGE;
323 } 325 }
324} 326}
325 327
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9712fad82bc6..1278098624e6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1191,14 +1191,19 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1191static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) 1191static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1192{ 1192{
1193 struct rbd_device *dev = (struct rbd_device *)data; 1193 struct rbd_device *dev = (struct rbd_device *)data;
1194 int rc;
1195
1194 if (!dev) 1196 if (!dev)
1195 return; 1197 return;
1196 1198
1197 dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name, 1199 dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
1198 notify_id, (int)opcode); 1200 notify_id, (int)opcode);
1199 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 1201 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1200 __rbd_update_snaps(dev); 1202 rc = __rbd_update_snaps(dev);
1201 mutex_unlock(&ctl_mutex); 1203 mutex_unlock(&ctl_mutex);
1204 if (rc)
1205 pr_warning(DRV_NAME "%d got notification but failed to update"
1206 " snaps: %d\n", dev->major, rc);
1202 1207
1203 rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name); 1208 rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name);
1204} 1209}
@@ -1597,7 +1602,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1597 int name_len = strlen(snap_name); 1602 int name_len = strlen(snap_name);
1598 u64 new_snapid; 1603 u64 new_snapid;
1599 int ret; 1604 int ret;
1600 void *data, *data_start, *data_end; 1605 void *data, *p, *e;
1601 u64 ver; 1606 u64 ver;
1602 1607
1603 /* we should create a snapshot only if we're pointing at the head */ 1608 /* we should create a snapshot only if we're pointing at the head */
@@ -1614,16 +1619,16 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1614 if (!data) 1619 if (!data)
1615 return -ENOMEM; 1620 return -ENOMEM;
1616 1621
1617 data_start = data; 1622 p = data;
1618 data_end = data + name_len + 16; 1623 e = data + name_len + 16;
1619 1624
1620 ceph_encode_string_safe(&data, data_end, snap_name, name_len, bad); 1625 ceph_encode_string_safe(&p, e, snap_name, name_len, bad);
1621 ceph_encode_64_safe(&data, data_end, new_snapid, bad); 1626 ceph_encode_64_safe(&p, e, new_snapid, bad);
1622 1627
1623 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", 1628 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
1624 data_start, data - data_start, &ver); 1629 data, p - data, &ver);
1625 1630
1626 kfree(data_start); 1631 kfree(data);
1627 1632
1628 if (ret < 0) 1633 if (ret < 0)
1629 return ret; 1634 return ret;
@@ -1659,6 +1664,9 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1659 if (ret < 0) 1664 if (ret < 0)
1660 return ret; 1665 return ret;
1661 1666
1667 /* resized? */
1668 set_capacity(rbd_dev->disk, h.image_size / 512ULL);
1669
1662 down_write(&rbd_dev->header.snap_rwsem); 1670 down_write(&rbd_dev->header.snap_rwsem);
1663 1671
1664 snap_seq = rbd_dev->header.snapc->seq; 1672 snap_seq = rbd_dev->header.snapc->seq;
@@ -1716,7 +1724,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
1716 if (!disk) 1724 if (!disk)
1717 goto out; 1725 goto out;
1718 1726
1719 sprintf(disk->disk_name, DRV_NAME "%d", rbd_dev->id); 1727 snprintf(disk->disk_name, sizeof(disk->disk_name), DRV_NAME "%d",
1728 rbd_dev->id);
1720 disk->major = rbd_dev->major; 1729 disk->major = rbd_dev->major;
1721 disk->first_minor = 0; 1730 disk->first_minor = 0;
1722 disk->fops = &rbd_bd_ops; 1731 disk->fops = &rbd_bd_ops;
diff --git a/drivers/block/xen-blkback/Makefile b/drivers/block/xen-blkback/Makefile
new file mode 100644
index 000000000000..e491c1b76878
--- /dev/null
+++ b/drivers/block/xen-blkback/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_XEN_BLKDEV_BACKEND) := xen-blkback.o
2
3xen-blkback-y := blkback.o xenbus.o
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
new file mode 100644
index 000000000000..c73910cc28c9
--- /dev/null
+++ b/drivers/block/xen-blkback/blkback.c
@@ -0,0 +1,824 @@
1/******************************************************************************
2 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
8 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#include <linux/spinlock.h>
38#include <linux/kthread.h>
39#include <linux/list.h>
40#include <linux/delay.h>
41#include <linux/freezer.h>
42
43#include <xen/events.h>
44#include <xen/page.h>
45#include <asm/xen/hypervisor.h>
46#include <asm/xen/hypercall.h>
47#include "common.h"
48
49/*
50 * These are rather arbitrary. They are fairly large because adjacent requests
51 * pulled from a communication ring are quite likely to end up being part of
52 * the same scatter/gather request at the disc.
53 *
54 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
55 *
56 * This will increase the chances of being able to write whole tracks.
57 * 64 should be enough to keep us competitive with Linux.
58 */
59static int xen_blkif_reqs = 64;
60module_param_named(reqs, xen_blkif_reqs, int, 0);
61MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
62
63/* Run-time switchable: /sys/module/blkback/parameters/ */
64static unsigned int log_stats;
65module_param(log_stats, int, 0644);
66
67/*
68 * Each outstanding request that we've passed to the lower device layers has a
69 * 'pending_req' allocated to it. Each buffer_head that completes decrements
70 * the pendcnt towards zero. When it hits zero, the specified domain has a
71 * response queued for it, with the saved 'id' passed back.
72 */
73struct pending_req {
74 struct xen_blkif *blkif;
75 u64 id;
76 int nr_pages;
77 atomic_t pendcnt;
78 unsigned short operation;
79 int status;
80 struct list_head free_list;
81};
82
83#define BLKBACK_INVALID_HANDLE (~0)
84
85struct xen_blkbk {
86 struct pending_req *pending_reqs;
87 /* List of all 'pending_req' available */
88 struct list_head pending_free;
89 /* And its spinlock. */
90 spinlock_t pending_free_lock;
91 wait_queue_head_t pending_free_wq;
92 /* The list of all pages that are available. */
93 struct page **pending_pages;
94 /* And the grant handles that are available. */
95 grant_handle_t *pending_grant_handles;
96};
97
98static struct xen_blkbk *blkbk;
99
100/*
101 * Little helpful macro to figure out the index and virtual address of the
102 * pending_pages[..]. For each 'pending_req' we have have up to
103 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
104 * 10 and would index in the pending_pages[..].
105 */
106static inline int vaddr_pagenr(struct pending_req *req, int seg)
107{
108 return (req - blkbk->pending_reqs) *
109 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
110}
111
112#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
113
114static inline unsigned long vaddr(struct pending_req *req, int seg)
115{
116 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
117 return (unsigned long)pfn_to_kaddr(pfn);
118}
119
120#define pending_handle(_req, _seg) \
121 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
122
123
124static int do_block_io_op(struct xen_blkif *blkif);
125static int dispatch_rw_block_io(struct xen_blkif *blkif,
126 struct blkif_request *req,
127 struct pending_req *pending_req);
128static void make_response(struct xen_blkif *blkif, u64 id,
129 unsigned short op, int st);
130
131/*
132 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
133 */
134static struct pending_req *alloc_req(void)
135{
136 struct pending_req *req = NULL;
137 unsigned long flags;
138
139 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
140 if (!list_empty(&blkbk->pending_free)) {
141 req = list_entry(blkbk->pending_free.next, struct pending_req,
142 free_list);
143 list_del(&req->free_list);
144 }
145 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
146 return req;
147}
148
149/*
150 * Return the 'pending_req' structure back to the freepool. We also
151 * wake up the thread if it was waiting for a free page.
152 */
153static void free_req(struct pending_req *req)
154{
155 unsigned long flags;
156 int was_empty;
157
158 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
159 was_empty = list_empty(&blkbk->pending_free);
160 list_add(&req->free_list, &blkbk->pending_free);
161 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
162 if (was_empty)
163 wake_up(&blkbk->pending_free_wq);
164}
165
166/*
167 * Routines for managing virtual block devices (vbds).
168 */
169static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
170 int operation)
171{
172 struct xen_vbd *vbd = &blkif->vbd;
173 int rc = -EACCES;
174
175 if ((operation != READ) && vbd->readonly)
176 goto out;
177
178 if (likely(req->nr_sects)) {
179 blkif_sector_t end = req->sector_number + req->nr_sects;
180
181 if (unlikely(end < req->sector_number))
182 goto out;
183 if (unlikely(end > vbd_sz(vbd)))
184 goto out;
185 }
186
187 req->dev = vbd->pdevice;
188 req->bdev = vbd->bdev;
189 rc = 0;
190
191 out:
192 return rc;
193}
194
195static void xen_vbd_resize(struct xen_blkif *blkif)
196{
197 struct xen_vbd *vbd = &blkif->vbd;
198 struct xenbus_transaction xbt;
199 int err;
200 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
201 unsigned long long new_size = vbd_sz(vbd);
202
203 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
204 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
205 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
206 vbd->size = new_size;
207again:
208 err = xenbus_transaction_start(&xbt);
209 if (err) {
210 pr_warn(DRV_PFX "Error starting transaction");
211 return;
212 }
213 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
214 (unsigned long long)vbd_sz(vbd));
215 if (err) {
216 pr_warn(DRV_PFX "Error writing new size");
217 goto abort;
218 }
219 /*
220 * Write the current state; we will use this to synchronize
221 * the front-end. If the current state is "connected" the
222 * front-end will get the new size information online.
223 */
224 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
225 if (err) {
226 pr_warn(DRV_PFX "Error writing the state");
227 goto abort;
228 }
229
230 err = xenbus_transaction_end(xbt, 0);
231 if (err == -EAGAIN)
232 goto again;
233 if (err)
234 pr_warn(DRV_PFX "Error ending transaction");
235 return;
236abort:
237 xenbus_transaction_end(xbt, 1);
238}
239
240/*
241 * Notification from the guest OS.
242 */
243static void blkif_notify_work(struct xen_blkif *blkif)
244{
245 blkif->waiting_reqs = 1;
246 wake_up(&blkif->wq);
247}
248
249irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
250{
251 blkif_notify_work(dev_id);
252 return IRQ_HANDLED;
253}
254
255/*
256 * SCHEDULER FUNCTIONS
257 */
258
259static void print_stats(struct xen_blkif *blkif)
260{
261 pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d\n",
262 current->comm, blkif->st_oo_req,
263 blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req);
264 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
265 blkif->st_rd_req = 0;
266 blkif->st_wr_req = 0;
267 blkif->st_oo_req = 0;
268}
269
270int xen_blkif_schedule(void *arg)
271{
272 struct xen_blkif *blkif = arg;
273 struct xen_vbd *vbd = &blkif->vbd;
274
275 xen_blkif_get(blkif);
276
277 while (!kthread_should_stop()) {
278 if (try_to_freeze())
279 continue;
280 if (unlikely(vbd->size != vbd_sz(vbd)))
281 xen_vbd_resize(blkif);
282
283 wait_event_interruptible(
284 blkif->wq,
285 blkif->waiting_reqs || kthread_should_stop());
286 wait_event_interruptible(
287 blkbk->pending_free_wq,
288 !list_empty(&blkbk->pending_free) ||
289 kthread_should_stop());
290
291 blkif->waiting_reqs = 0;
292 smp_mb(); /* clear flag *before* checking for work */
293
294 if (do_block_io_op(blkif))
295 blkif->waiting_reqs = 1;
296
297 if (log_stats && time_after(jiffies, blkif->st_print))
298 print_stats(blkif);
299 }
300
301 if (log_stats)
302 print_stats(blkif);
303
304 blkif->xenblkd = NULL;
305 xen_blkif_put(blkif);
306
307 return 0;
308}
309
310struct seg_buf {
311 unsigned long buf;
312 unsigned int nsec;
313};
314/*
315 * Unmap the grant references, and also remove the M2P over-rides
316 * used in the 'pending_req'.
317 */
318static void xen_blkbk_unmap(struct pending_req *req)
319{
320 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
321 unsigned int i, invcount = 0;
322 grant_handle_t handle;
323 int ret;
324
325 for (i = 0; i < req->nr_pages; i++) {
326 handle = pending_handle(req, i);
327 if (handle == BLKBACK_INVALID_HANDLE)
328 continue;
329 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
330 GNTMAP_host_map, handle);
331 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
332 invcount++;
333 }
334
335 ret = HYPERVISOR_grant_table_op(
336 GNTTABOP_unmap_grant_ref, unmap, invcount);
337 BUG_ON(ret);
338 /*
339 * Note, we use invcount, so nr->pages, so we can't index
340 * using vaddr(req, i).
341 */
342 for (i = 0; i < invcount; i++) {
343 ret = m2p_remove_override(
344 virt_to_page(unmap[i].host_addr), false);
345 if (ret) {
346 pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
347 (unsigned long)unmap[i].host_addr);
348 continue;
349 }
350 }
351}
352
353static int xen_blkbk_map(struct blkif_request *req,
354 struct pending_req *pending_req,
355 struct seg_buf seg[])
356{
357 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
358 int i;
359 int nseg = req->nr_segments;
360 int ret = 0;
361
362 /*
363 * Fill out preq.nr_sects with proper amount of sectors, and setup
364 * assign map[..] with the PFN of the page in our domain with the
365 * corresponding grant reference for each page.
366 */
367 for (i = 0; i < nseg; i++) {
368 uint32_t flags;
369
370 flags = GNTMAP_host_map;
371 if (pending_req->operation != BLKIF_OP_READ)
372 flags |= GNTMAP_readonly;
373 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
374 req->u.rw.seg[i].gref,
375 pending_req->blkif->domid);
376 }
377
378 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
379 BUG_ON(ret);
380
381 /*
382 * Now swizzle the MFN in our domain with the MFN from the other domain
383 * so that when we access vaddr(pending_req,i) it has the contents of
384 * the page from the other domain.
385 */
386 for (i = 0; i < nseg; i++) {
387 if (unlikely(map[i].status != 0)) {
388 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
389 map[i].handle = BLKBACK_INVALID_HANDLE;
390 ret |= 1;
391 }
392
393 pending_handle(pending_req, i) = map[i].handle;
394
395 if (ret)
396 continue;
397
398 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
399 blkbk->pending_page(pending_req, i), false);
400 if (ret) {
401 pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
402 (unsigned long)map[i].dev_bus_addr, ret);
403 /* We could switch over to GNTTABOP_copy */
404 continue;
405 }
406
407 seg[i].buf = map[i].dev_bus_addr |
408 (req->u.rw.seg[i].first_sect << 9);
409 }
410 return ret;
411}
412
413/*
414 * Completion callback on the bio's. Called as bh->b_end_io()
415 */
416
417static void __end_block_io_op(struct pending_req *pending_req, int error)
418{
419 /* An error fails the entire request. */
420 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
421 (error == -EOPNOTSUPP)) {
422 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
423 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
424 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
425 } else if (error) {
426 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
427 " error=%d\n", error);
428 pending_req->status = BLKIF_RSP_ERROR;
429 }
430
431 /*
432 * If all of the bio's have completed it is time to unmap
433 * the grant references associated with 'request' and provide
434 * the proper response on the ring.
435 */
436 if (atomic_dec_and_test(&pending_req->pendcnt)) {
437 xen_blkbk_unmap(pending_req);
438 make_response(pending_req->blkif, pending_req->id,
439 pending_req->operation, pending_req->status);
440 xen_blkif_put(pending_req->blkif);
441 free_req(pending_req);
442 }
443}
444
445/*
446 * bio callback.
447 */
448static void end_block_io_op(struct bio *bio, int error)
449{
450 __end_block_io_op(bio->bi_private, error);
451 bio_put(bio);
452}
453
454
455
456/*
457 * Function to copy the from the ring buffer the 'struct blkif_request'
458 * (which has the sectors we want, number of them, grant references, etc),
459 * and transmute it to the block API to hand it over to the proper block disk.
460 */
461static int do_block_io_op(struct xen_blkif *blkif)
462{
463 union blkif_back_rings *blk_rings = &blkif->blk_rings;
464 struct blkif_request req;
465 struct pending_req *pending_req;
466 RING_IDX rc, rp;
467 int more_to_do = 0;
468
469 rc = blk_rings->common.req_cons;
470 rp = blk_rings->common.sring->req_prod;
471 rmb(); /* Ensure we see queued requests up to 'rp'. */
472
473 while (rc != rp) {
474
475 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
476 break;
477
478 if (kthread_should_stop()) {
479 more_to_do = 1;
480 break;
481 }
482
483 pending_req = alloc_req();
484 if (NULL == pending_req) {
485 blkif->st_oo_req++;
486 more_to_do = 1;
487 break;
488 }
489
490 switch (blkif->blk_protocol) {
491 case BLKIF_PROTOCOL_NATIVE:
492 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
493 break;
494 case BLKIF_PROTOCOL_X86_32:
495 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
496 break;
497 case BLKIF_PROTOCOL_X86_64:
498 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
499 break;
500 default:
501 BUG();
502 }
503 blk_rings->common.req_cons = ++rc; /* before make_response() */
504
505 /* Apply all sanity checks to /private copy/ of request. */
506 barrier();
507
508 if (dispatch_rw_block_io(blkif, &req, pending_req))
509 break;
510
511 /* Yield point for this unbounded loop. */
512 cond_resched();
513 }
514
515 return more_to_do;
516}
517
518/*
519 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
520 * and call the 'submit_bio' to pass it to the underlying storage.
521 */
522static int dispatch_rw_block_io(struct xen_blkif *blkif,
523 struct blkif_request *req,
524 struct pending_req *pending_req)
525{
526 struct phys_req preq;
527 struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
528 unsigned int nseg;
529 struct bio *bio = NULL;
530 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
531 int i, nbio = 0;
532 int operation;
533 struct blk_plug plug;
534
535 switch (req->operation) {
536 case BLKIF_OP_READ:
537 blkif->st_rd_req++;
538 operation = READ;
539 break;
540 case BLKIF_OP_WRITE:
541 blkif->st_wr_req++;
542 operation = WRITE_ODIRECT;
543 break;
544 case BLKIF_OP_FLUSH_DISKCACHE:
545 blkif->st_f_req++;
546 operation = WRITE_FLUSH;
547 break;
548 case BLKIF_OP_WRITE_BARRIER:
549 default:
550 operation = 0; /* make gcc happy */
551 goto fail_response;
552 break;
553 }
554
555 /* Check that the number of segments is sane. */
556 nseg = req->nr_segments;
557 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
558 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
559 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
560 nseg);
561 /* Haven't submitted any bio's yet. */
562 goto fail_response;
563 }
564
565 preq.dev = req->handle;
566 preq.sector_number = req->u.rw.sector_number;
567 preq.nr_sects = 0;
568
569 pending_req->blkif = blkif;
570 pending_req->id = req->id;
571 pending_req->operation = req->operation;
572 pending_req->status = BLKIF_RSP_OKAY;
573 pending_req->nr_pages = nseg;
574
575 for (i = 0; i < nseg; i++) {
576 seg[i].nsec = req->u.rw.seg[i].last_sect -
577 req->u.rw.seg[i].first_sect + 1;
578 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
579 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
580 goto fail_response;
581 preq.nr_sects += seg[i].nsec;
582
583 }
584
585 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
586 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
587 operation == READ ? "read" : "write",
588 preq.sector_number,
589 preq.sector_number + preq.nr_sects, preq.dev);
590 goto fail_response;
591 }
592
593 /*
594 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
595 * is set there.
596 */
597 for (i = 0; i < nseg; i++) {
598 if (((int)preq.sector_number|(int)seg[i].nsec) &
599 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
600 pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
601 blkif->domid);
602 goto fail_response;
603 }
604 }
605
606 /*
607 * If we have failed at this point, we need to undo the M2P override,
608 * set gnttab_set_unmap_op on all of the grant references and perform
609 * the hypercall to unmap the grants - that is all done in
610 * xen_blkbk_unmap.
611 */
612 if (xen_blkbk_map(req, pending_req, seg))
613 goto fail_flush;
614
615 /* This corresponding xen_blkif_put is done in __end_block_io_op */
616 xen_blkif_get(blkif);
617
618 for (i = 0; i < nseg; i++) {
619 while ((bio == NULL) ||
620 (bio_add_page(bio,
621 blkbk->pending_page(pending_req, i),
622 seg[i].nsec << 9,
623 seg[i].buf & ~PAGE_MASK) == 0)) {
624
625 bio = bio_alloc(GFP_KERNEL, nseg-i);
626 if (unlikely(bio == NULL))
627 goto fail_put_bio;
628
629 biolist[nbio++] = bio;
630 bio->bi_bdev = preq.bdev;
631 bio->bi_private = pending_req;
632 bio->bi_end_io = end_block_io_op;
633 bio->bi_sector = preq.sector_number;
634 }
635
636 preq.sector_number += seg[i].nsec;
637 }
638
639 /* This will be hit if the operation was a flush. */
640 if (!bio) {
641 BUG_ON(operation != WRITE_FLUSH);
642
643 bio = bio_alloc(GFP_KERNEL, 0);
644 if (unlikely(bio == NULL))
645 goto fail_put_bio;
646
647 biolist[nbio++] = bio;
648 bio->bi_bdev = preq.bdev;
649 bio->bi_private = pending_req;
650 bio->bi_end_io = end_block_io_op;
651 }
652
653 /*
654 * We set it one so that the last submit_bio does not have to call
655 * atomic_inc.
656 */
657 atomic_set(&pending_req->pendcnt, nbio);
658
659 /* Get a reference count for the disk queue and start sending I/O */
660 blk_start_plug(&plug);
661
662 for (i = 0; i < nbio; i++)
663 submit_bio(operation, biolist[i]);
664
665 /* Let the I/Os go.. */
666 blk_finish_plug(&plug);
667
668 if (operation == READ)
669 blkif->st_rd_sect += preq.nr_sects;
670 else if (operation == WRITE || operation == WRITE_FLUSH)
671 blkif->st_wr_sect += preq.nr_sects;
672
673 return 0;
674
675 fail_flush:
676 xen_blkbk_unmap(pending_req);
677 fail_response:
678 /* Haven't submitted any bio's yet. */
679 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
680 free_req(pending_req);
681 msleep(1); /* back off a bit */
682 return -EIO;
683
684 fail_put_bio:
685 for (i = 0; i < nbio; i++)
686 bio_put(biolist[i]);
687 __end_block_io_op(pending_req, -EINVAL);
688 msleep(1); /* back off a bit */
689 return -EIO;
690}
691
692
693
694/*
695 * Put a response on the ring on how the operation fared.
696 */
697static void make_response(struct xen_blkif *blkif, u64 id,
698 unsigned short op, int st)
699{
700 struct blkif_response resp;
701 unsigned long flags;
702 union blkif_back_rings *blk_rings = &blkif->blk_rings;
703 int more_to_do = 0;
704 int notify;
705
706 resp.id = id;
707 resp.operation = op;
708 resp.status = st;
709
710 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
711 /* Place on the response ring for the relevant domain. */
712 switch (blkif->blk_protocol) {
713 case BLKIF_PROTOCOL_NATIVE:
714 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
715 &resp, sizeof(resp));
716 break;
717 case BLKIF_PROTOCOL_X86_32:
718 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
719 &resp, sizeof(resp));
720 break;
721 case BLKIF_PROTOCOL_X86_64:
722 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
723 &resp, sizeof(resp));
724 break;
725 default:
726 BUG();
727 }
728 blk_rings->common.rsp_prod_pvt++;
729 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
730 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
731 /*
732 * Tail check for pending requests. Allows frontend to avoid
733 * notifications if requests are already in flight (lower
734 * overheads and promotes batching).
735 */
736 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
737
738 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
739 more_to_do = 1;
740 }
741
742 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
743
744 if (more_to_do)
745 blkif_notify_work(blkif);
746 if (notify)
747 notify_remote_via_irq(blkif->irq);
748}
749
750static int __init xen_blkif_init(void)
751{
752 int i, mmap_pages;
753 int rc = 0;
754
755 if (!xen_pv_domain())
756 return -ENODEV;
757
758 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
759 if (!blkbk) {
760 pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
761 return -ENOMEM;
762 }
763
764 mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
765
766 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
767 xen_blkif_reqs, GFP_KERNEL);
768 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
769 mmap_pages, GFP_KERNEL);
770 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
771 mmap_pages, GFP_KERNEL);
772
773 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
774 !blkbk->pending_pages) {
775 rc = -ENOMEM;
776 goto out_of_memory;
777 }
778
779 for (i = 0; i < mmap_pages; i++) {
780 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
781 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
782 if (blkbk->pending_pages[i] == NULL) {
783 rc = -ENOMEM;
784 goto out_of_memory;
785 }
786 }
787 rc = xen_blkif_interface_init();
788 if (rc)
789 goto failed_init;
790
791 memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
792
793 INIT_LIST_HEAD(&blkbk->pending_free);
794 spin_lock_init(&blkbk->pending_free_lock);
795 init_waitqueue_head(&blkbk->pending_free_wq);
796
797 for (i = 0; i < xen_blkif_reqs; i++)
798 list_add_tail(&blkbk->pending_reqs[i].free_list,
799 &blkbk->pending_free);
800
801 rc = xen_blkif_xenbus_init();
802 if (rc)
803 goto failed_init;
804
805 return 0;
806
807 out_of_memory:
808 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
809 failed_init:
810 kfree(blkbk->pending_reqs);
811 kfree(blkbk->pending_grant_handles);
812 for (i = 0; i < mmap_pages; i++) {
813 if (blkbk->pending_pages[i])
814 __free_page(blkbk->pending_pages[i]);
815 }
816 kfree(blkbk->pending_pages);
817 kfree(blkbk);
818 blkbk = NULL;
819 return rc;
820}
821
822module_init(xen_blkif_init);
823
824MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
new file mode 100644
index 000000000000..9e40b283a468
--- /dev/null
+++ b/drivers/block/xen-blkback/common.h
@@ -0,0 +1,233 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28#define __XEN_BLKIF__BACKEND__COMMON_H__
29
30#include <linux/version.h>
31#include <linux/module.h>
32#include <linux/interrupt.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/vmalloc.h>
36#include <linux/wait.h>
37#include <linux/io.h>
38#include <asm/setup.h>
39#include <asm/pgalloc.h>
40#include <asm/hypervisor.h>
41#include <xen/grant_table.h>
42#include <xen/xenbus.h>
43#include <xen/interface/io/ring.h>
44#include <xen/interface/io/blkif.h>
45#include <xen/interface/io/protocols.h>
46
47#define DRV_PFX "xen-blkback:"
48#define DPRINTK(fmt, args...) \
49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
50 __func__, __LINE__, ##args)
51
52
53/* Not a real protocol. Used to generate ring structs which contain
54 * the elements common to all protocols only. This way we get a
55 * compiler-checkable way to use common struct elements, so we can
56 * avoid using switch(protocol) in a number of places. */
57struct blkif_common_request {
58 char dummy;
59};
60struct blkif_common_response {
61 char dummy;
62};
63
64/* i386 protocol version */
65#pragma pack(push, 4)
66struct blkif_x86_32_request {
67 uint8_t operation; /* BLKIF_OP_??? */
68 uint8_t nr_segments; /* number of segments */
69 blkif_vdev_t handle; /* only for read/write requests */
70 uint64_t id; /* private guest value, echoed in resp */
71 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
72 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73};
74struct blkif_x86_32_response {
75 uint64_t id; /* copied from request */
76 uint8_t operation; /* copied from request */
77 int16_t status; /* BLKIF_RSP_??? */
78};
79#pragma pack(pop)
80
81/* x86_64 protocol version */
82struct blkif_x86_64_request {
83 uint8_t operation; /* BLKIF_OP_??? */
84 uint8_t nr_segments; /* number of segments */
85 blkif_vdev_t handle; /* only for read/write requests */
86 uint64_t __attribute__((__aligned__(8))) id;
87 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
88 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
89};
90struct blkif_x86_64_response {
91 uint64_t __attribute__((__aligned__(8))) id;
92 uint8_t operation; /* copied from request */
93 int16_t status; /* BLKIF_RSP_??? */
94};
95
96DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
97 struct blkif_common_response);
98DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
99 struct blkif_x86_32_response);
100DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
101 struct blkif_x86_64_response);
102
103union blkif_back_rings {
104 struct blkif_back_ring native;
105 struct blkif_common_back_ring common;
106 struct blkif_x86_32_back_ring x86_32;
107 struct blkif_x86_64_back_ring x86_64;
108};
109
110enum blkif_protocol {
111 BLKIF_PROTOCOL_NATIVE = 1,
112 BLKIF_PROTOCOL_X86_32 = 2,
113 BLKIF_PROTOCOL_X86_64 = 3,
114};
115
116struct xen_vbd {
117 /* What the domain refers to this vbd as. */
118 blkif_vdev_t handle;
119 /* Non-zero -> read-only */
120 unsigned char readonly;
121 /* VDISK_xxx */
122 unsigned char type;
123 /* phys device that this vbd maps to. */
124 u32 pdevice;
125 struct block_device *bdev;
126 /* Cached size parameter. */
127 sector_t size;
128 bool flush_support;
129};
130
131struct backend_info;
132
133struct xen_blkif {
134 /* Unique identifier for this interface. */
135 domid_t domid;
136 unsigned int handle;
137 /* Physical parameters of the comms window. */
138 unsigned int irq;
139 /* Comms information. */
140 enum blkif_protocol blk_protocol;
141 union blkif_back_rings blk_rings;
142 struct vm_struct *blk_ring_area;
143 /* The VBD attached to this interface. */
144 struct xen_vbd vbd;
145 /* Back pointer to the backend_info. */
146 struct backend_info *be;
147 /* Private fields. */
148 spinlock_t blk_ring_lock;
149 atomic_t refcnt;
150
151 wait_queue_head_t wq;
152 /* One thread per one blkif. */
153 struct task_struct *xenblkd;
154 unsigned int waiting_reqs;
155
156 /* statistics */
157 unsigned long st_print;
158 int st_rd_req;
159 int st_wr_req;
160 int st_oo_req;
161 int st_f_req;
162 int st_rd_sect;
163 int st_wr_sect;
164
165 wait_queue_head_t waiting_to_free;
166
167 grant_handle_t shmem_handle;
168 grant_ref_t shmem_ref;
169};
170
171
172#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
173 (_v)->bdev->bd_part->nr_sects : \
174 get_capacity((_v)->bdev->bd_disk))
175
176#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
177#define xen_blkif_put(_b) \
178 do { \
179 if (atomic_dec_and_test(&(_b)->refcnt)) \
180 wake_up(&(_b)->waiting_to_free);\
181 } while (0)
182
183struct phys_req {
184 unsigned short dev;
185 unsigned short nr_sects;
186 struct block_device *bdev;
187 blkif_sector_t sector_number;
188};
189int xen_blkif_interface_init(void);
190
191int xen_blkif_xenbus_init(void);
192
193irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
194int xen_blkif_schedule(void *arg);
195
196int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
197 struct backend_info *be, int state);
198
199struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
200
201static inline void blkif_get_x86_32_req(struct blkif_request *dst,
202 struct blkif_x86_32_request *src)
203{
204 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
205 dst->operation = src->operation;
206 dst->nr_segments = src->nr_segments;
207 dst->handle = src->handle;
208 dst->id = src->id;
209 dst->u.rw.sector_number = src->sector_number;
210 barrier();
211 if (n > dst->nr_segments)
212 n = dst->nr_segments;
213 for (i = 0; i < n; i++)
214 dst->u.rw.seg[i] = src->seg[i];
215}
216
217static inline void blkif_get_x86_64_req(struct blkif_request *dst,
218 struct blkif_x86_64_request *src)
219{
220 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
221 dst->operation = src->operation;
222 dst->nr_segments = src->nr_segments;
223 dst->handle = src->handle;
224 dst->id = src->id;
225 dst->u.rw.sector_number = src->sector_number;
226 barrier();
227 if (n > dst->nr_segments)
228 n = dst->nr_segments;
229 for (i = 0; i < n; i++)
230 dst->u.rw.seg[i] = src->seg[i];
231}
232
233#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
new file mode 100644
index 000000000000..34570823355b
--- /dev/null
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -0,0 +1,768 @@
1/* Xenbus code for blkif backend
2 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
3 Copyright (C) 2005 XenSource Ltd
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15*/
16
17#include <stdarg.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <xen/events.h>
21#include <xen/grant_table.h>
22#include "common.h"
23
24struct backend_info {
25 struct xenbus_device *dev;
26 struct xen_blkif *blkif;
27 struct xenbus_watch backend_watch;
28 unsigned major;
29 unsigned minor;
30 char *mode;
31};
32
33static struct kmem_cache *xen_blkif_cachep;
34static void connect(struct backend_info *);
35static int connect_ring(struct backend_info *);
36static void backend_changed(struct xenbus_watch *, const char **,
37 unsigned int);
38
39struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
40{
41 return be->dev;
42}
43
44static int blkback_name(struct xen_blkif *blkif, char *buf)
45{
46 char *devpath, *devname;
47 struct xenbus_device *dev = blkif->be->dev;
48
49 devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
50 if (IS_ERR(devpath))
51 return PTR_ERR(devpath);
52
53 devname = strstr(devpath, "/dev/");
54 if (devname != NULL)
55 devname += strlen("/dev/");
56 else
57 devname = devpath;
58
59 snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
60 kfree(devpath);
61
62 return 0;
63}
64
65static void xen_update_blkif_status(struct xen_blkif *blkif)
66{
67 int err;
68 char name[TASK_COMM_LEN];
69
70 /* Not ready to connect? */
71 if (!blkif->irq || !blkif->vbd.bdev)
72 return;
73
74 /* Already connected? */
75 if (blkif->be->dev->state == XenbusStateConnected)
76 return;
77
78 /* Attempt to connect: exit if we fail to. */
79 connect(blkif->be);
80 if (blkif->be->dev->state != XenbusStateConnected)
81 return;
82
83 err = blkback_name(blkif, name);
84 if (err) {
85 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
86 return;
87 }
88
89 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
90 if (err) {
91 xenbus_dev_error(blkif->be->dev, err, "block flush");
92 return;
93 }
94 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
95
96 blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name);
97 if (IS_ERR(blkif->xenblkd)) {
98 err = PTR_ERR(blkif->xenblkd);
99 blkif->xenblkd = NULL;
100 xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
101 }
102}
103
104static struct xen_blkif *xen_blkif_alloc(domid_t domid)
105{
106 struct xen_blkif *blkif;
107
108 blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
109 if (!blkif)
110 return ERR_PTR(-ENOMEM);
111
112 memset(blkif, 0, sizeof(*blkif));
113 blkif->domid = domid;
114 spin_lock_init(&blkif->blk_ring_lock);
115 atomic_set(&blkif->refcnt, 1);
116 init_waitqueue_head(&blkif->wq);
117 blkif->st_print = jiffies;
118 init_waitqueue_head(&blkif->waiting_to_free);
119
120 return blkif;
121}
122
123static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page)
124{
125 struct gnttab_map_grant_ref op;
126
127 gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
128 GNTMAP_host_map, shared_page, blkif->domid);
129
130 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
131 BUG();
132
133 if (op.status) {
134 DPRINTK("Grant table operation failure !\n");
135 return op.status;
136 }
137
138 blkif->shmem_ref = shared_page;
139 blkif->shmem_handle = op.handle;
140
141 return 0;
142}
143
144static void unmap_frontend_page(struct xen_blkif *blkif)
145{
146 struct gnttab_unmap_grant_ref op;
147
148 gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
149 GNTMAP_host_map, blkif->shmem_handle);
150
151 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
152 BUG();
153}
154
155static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
156 unsigned int evtchn)
157{
158 int err;
159
160 /* Already connected through? */
161 if (blkif->irq)
162 return 0;
163
164 blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
165 if (!blkif->blk_ring_area)
166 return -ENOMEM;
167
168 err = map_frontend_page(blkif, shared_page);
169 if (err) {
170 free_vm_area(blkif->blk_ring_area);
171 return err;
172 }
173
174 switch (blkif->blk_protocol) {
175 case BLKIF_PROTOCOL_NATIVE:
176 {
177 struct blkif_sring *sring;
178 sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
179 BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
180 break;
181 }
182 case BLKIF_PROTOCOL_X86_32:
183 {
184 struct blkif_x86_32_sring *sring_x86_32;
185 sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
186 BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
187 break;
188 }
189 case BLKIF_PROTOCOL_X86_64:
190 {
191 struct blkif_x86_64_sring *sring_x86_64;
192 sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
193 BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
194 break;
195 }
196 default:
197 BUG();
198 }
199
200 err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
201 xen_blkif_be_int, 0,
202 "blkif-backend", blkif);
203 if (err < 0) {
204 unmap_frontend_page(blkif);
205 free_vm_area(blkif->blk_ring_area);
206 blkif->blk_rings.common.sring = NULL;
207 return err;
208 }
209 blkif->irq = err;
210
211 return 0;
212}
213
214static void xen_blkif_disconnect(struct xen_blkif *blkif)
215{
216 if (blkif->xenblkd) {
217 kthread_stop(blkif->xenblkd);
218 blkif->xenblkd = NULL;
219 }
220
221 atomic_dec(&blkif->refcnt);
222 wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
223 atomic_inc(&blkif->refcnt);
224
225 if (blkif->irq) {
226 unbind_from_irqhandler(blkif->irq, blkif);
227 blkif->irq = 0;
228 }
229
230 if (blkif->blk_rings.common.sring) {
231 unmap_frontend_page(blkif);
232 free_vm_area(blkif->blk_ring_area);
233 blkif->blk_rings.common.sring = NULL;
234 }
235}
236
237void xen_blkif_free(struct xen_blkif *blkif)
238{
239 if (!atomic_dec_and_test(&blkif->refcnt))
240 BUG();
241 kmem_cache_free(xen_blkif_cachep, blkif);
242}
243
244int __init xen_blkif_interface_init(void)
245{
246 xen_blkif_cachep = kmem_cache_create("blkif_cache",
247 sizeof(struct xen_blkif),
248 0, 0, NULL);
249 if (!xen_blkif_cachep)
250 return -ENOMEM;
251
252 return 0;
253}
254
255/*
256 * sysfs interface for VBD I/O requests
257 */
258
259#define VBD_SHOW(name, format, args...) \
260 static ssize_t show_##name(struct device *_dev, \
261 struct device_attribute *attr, \
262 char *buf) \
263 { \
264 struct xenbus_device *dev = to_xenbus_device(_dev); \
265 struct backend_info *be = dev_get_drvdata(&dev->dev); \
266 \
267 return sprintf(buf, format, ##args); \
268 } \
269 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
270
271VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
272VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
273VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
274VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
275VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
276VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
277
278static struct attribute *xen_vbdstat_attrs[] = {
279 &dev_attr_oo_req.attr,
280 &dev_attr_rd_req.attr,
281 &dev_attr_wr_req.attr,
282 &dev_attr_f_req.attr,
283 &dev_attr_rd_sect.attr,
284 &dev_attr_wr_sect.attr,
285 NULL
286};
287
288static struct attribute_group xen_vbdstat_group = {
289 .name = "statistics",
290 .attrs = xen_vbdstat_attrs,
291};
292
293VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
294VBD_SHOW(mode, "%s\n", be->mode);
295
296int xenvbd_sysfs_addif(struct xenbus_device *dev)
297{
298 int error;
299
300 error = device_create_file(&dev->dev, &dev_attr_physical_device);
301 if (error)
302 goto fail1;
303
304 error = device_create_file(&dev->dev, &dev_attr_mode);
305 if (error)
306 goto fail2;
307
308 error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
309 if (error)
310 goto fail3;
311
312 return 0;
313
314fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
315fail2: device_remove_file(&dev->dev, &dev_attr_mode);
316fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
317 return error;
318}
319
320void xenvbd_sysfs_delif(struct xenbus_device *dev)
321{
322 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
323 device_remove_file(&dev->dev, &dev_attr_mode);
324 device_remove_file(&dev->dev, &dev_attr_physical_device);
325}
326
327
328static void xen_vbd_free(struct xen_vbd *vbd)
329{
330 if (vbd->bdev)
331 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
332 vbd->bdev = NULL;
333}
334
335static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
336 unsigned major, unsigned minor, int readonly,
337 int cdrom)
338{
339 struct xen_vbd *vbd;
340 struct block_device *bdev;
341 struct request_queue *q;
342
343 vbd = &blkif->vbd;
344 vbd->handle = handle;
345 vbd->readonly = readonly;
346 vbd->type = 0;
347
348 vbd->pdevice = MKDEV(major, minor);
349
350 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
351 FMODE_READ : FMODE_WRITE, NULL);
352
353 if (IS_ERR(bdev)) {
354 DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
355 vbd->pdevice);
356 return -ENOENT;
357 }
358
359 vbd->bdev = bdev;
360 vbd->size = vbd_sz(vbd);
361
362 if (vbd->bdev->bd_disk == NULL) {
363 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
364 vbd->pdevice);
365 xen_vbd_free(vbd);
366 return -ENOENT;
367 }
368
369 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
370 vbd->type |= VDISK_CDROM;
371 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
372 vbd->type |= VDISK_REMOVABLE;
373
374 q = bdev_get_queue(bdev);
375 if (q && q->flush_flags)
376 vbd->flush_support = true;
377
378 DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
379 handle, blkif->domid);
380 return 0;
381}
382static int xen_blkbk_remove(struct xenbus_device *dev)
383{
384 struct backend_info *be = dev_get_drvdata(&dev->dev);
385
386 DPRINTK("");
387
388 if (be->major || be->minor)
389 xenvbd_sysfs_delif(dev);
390
391 if (be->backend_watch.node) {
392 unregister_xenbus_watch(&be->backend_watch);
393 kfree(be->backend_watch.node);
394 be->backend_watch.node = NULL;
395 }
396
397 if (be->blkif) {
398 xen_blkif_disconnect(be->blkif);
399 xen_vbd_free(&be->blkif->vbd);
400 xen_blkif_free(be->blkif);
401 be->blkif = NULL;
402 }
403
404 kfree(be);
405 dev_set_drvdata(&dev->dev, NULL);
406 return 0;
407}
408
409int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
410 struct backend_info *be, int state)
411{
412 struct xenbus_device *dev = be->dev;
413 int err;
414
415 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
416 "%d", state);
417 if (err)
418 xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
419
420 return err;
421}
422
423/*
424 * Entry point to this code when a new device is created. Allocate the basic
425 * structures, and watch the store waiting for the hotplug scripts to tell us
426 * the device's physical major and minor numbers. Switch to InitWait.
427 */
428static int xen_blkbk_probe(struct xenbus_device *dev,
429 const struct xenbus_device_id *id)
430{
431 int err;
432 struct backend_info *be = kzalloc(sizeof(struct backend_info),
433 GFP_KERNEL);
434 if (!be) {
435 xenbus_dev_fatal(dev, -ENOMEM,
436 "allocating backend structure");
437 return -ENOMEM;
438 }
439 be->dev = dev;
440 dev_set_drvdata(&dev->dev, be);
441
442 be->blkif = xen_blkif_alloc(dev->otherend_id);
443 if (IS_ERR(be->blkif)) {
444 err = PTR_ERR(be->blkif);
445 be->blkif = NULL;
446 xenbus_dev_fatal(dev, err, "creating block interface");
447 goto fail;
448 }
449
450 /* setup back pointer */
451 be->blkif->be = be;
452
453 err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
454 "%s/%s", dev->nodename, "physical-device");
455 if (err)
456 goto fail;
457
458 err = xenbus_switch_state(dev, XenbusStateInitWait);
459 if (err)
460 goto fail;
461
462 return 0;
463
464fail:
465 DPRINTK("failed");
466 xen_blkbk_remove(dev);
467 return err;
468}
469
470
471/*
472 * Callback received when the hotplug scripts have placed the physical-device
473 * node. Read it and the mode node, and create a vbd. If the frontend is
474 * ready, connect.
475 */
476static void backend_changed(struct xenbus_watch *watch,
477 const char **vec, unsigned int len)
478{
479 int err;
480 unsigned major;
481 unsigned minor;
482 struct backend_info *be
483 = container_of(watch, struct backend_info, backend_watch);
484 struct xenbus_device *dev = be->dev;
485 int cdrom = 0;
486 char *device_type;
487
488 DPRINTK("");
489
490 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
491 &major, &minor);
492 if (XENBUS_EXIST_ERR(err)) {
493 /*
494 * Since this watch will fire once immediately after it is
495 * registered, we expect this. Ignore it, and wait for the
496 * hotplug scripts.
497 */
498 return;
499 }
500 if (err != 2) {
501 xenbus_dev_fatal(dev, err, "reading physical-device");
502 return;
503 }
504
505 if ((be->major || be->minor) &&
506 ((be->major != major) || (be->minor != minor))) {
507 pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
508 be->major, be->minor, major, minor);
509 return;
510 }
511
512 be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
513 if (IS_ERR(be->mode)) {
514 err = PTR_ERR(be->mode);
515 be->mode = NULL;
516 xenbus_dev_fatal(dev, err, "reading mode");
517 return;
518 }
519
520 device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
521 if (!IS_ERR(device_type)) {
522 cdrom = strcmp(device_type, "cdrom") == 0;
523 kfree(device_type);
524 }
525
526 if (be->major == 0 && be->minor == 0) {
527 /* Front end dir is a number, which is used as the handle. */
528
529 char *p = strrchr(dev->otherend, '/') + 1;
530 long handle;
531 err = strict_strtoul(p, 0, &handle);
532 if (err)
533 return;
534
535 be->major = major;
536 be->minor = minor;
537
538 err = xen_vbd_create(be->blkif, handle, major, minor,
539 (NULL == strchr(be->mode, 'w')), cdrom);
540 if (err) {
541 be->major = 0;
542 be->minor = 0;
543 xenbus_dev_fatal(dev, err, "creating vbd structure");
544 return;
545 }
546
547 err = xenvbd_sysfs_addif(dev);
548 if (err) {
549 xen_vbd_free(&be->blkif->vbd);
550 be->major = 0;
551 be->minor = 0;
552 xenbus_dev_fatal(dev, err, "creating sysfs entries");
553 return;
554 }
555
556 /* We're potentially connected now */
557 xen_update_blkif_status(be->blkif);
558 }
559}
560
561
562/*
563 * Callback received when the frontend's state changes.
564 */
565static void frontend_changed(struct xenbus_device *dev,
566 enum xenbus_state frontend_state)
567{
568 struct backend_info *be = dev_get_drvdata(&dev->dev);
569 int err;
570
571 DPRINTK("%s", xenbus_strstate(frontend_state));
572
573 switch (frontend_state) {
574 case XenbusStateInitialising:
575 if (dev->state == XenbusStateClosed) {
576 pr_info(DRV_PFX "%s: prepare for reconnect\n",
577 dev->nodename);
578 xenbus_switch_state(dev, XenbusStateInitWait);
579 }
580 break;
581
582 case XenbusStateInitialised:
583 case XenbusStateConnected:
584 /*
585 * Ensure we connect even when two watches fire in
586 * close successsion and we miss the intermediate value
587 * of frontend_state.
588 */
589 if (dev->state == XenbusStateConnected)
590 break;
591
592 /*
593 * Enforce precondition before potential leak point.
594 * blkif_disconnect() is idempotent.
595 */
596 xen_blkif_disconnect(be->blkif);
597
598 err = connect_ring(be);
599 if (err)
600 break;
601 xen_update_blkif_status(be->blkif);
602 break;
603
604 case XenbusStateClosing:
605 xen_blkif_disconnect(be->blkif);
606 xenbus_switch_state(dev, XenbusStateClosing);
607 break;
608
609 case XenbusStateClosed:
610 xenbus_switch_state(dev, XenbusStateClosed);
611 if (xenbus_dev_is_online(dev))
612 break;
613 /* fall through if not online */
614 case XenbusStateUnknown:
615 /* implies blkif_disconnect() via blkback_remove() */
616 device_unregister(&dev->dev);
617 break;
618
619 default:
620 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
621 frontend_state);
622 break;
623 }
624}
625
626
627/* ** Connection ** */
628
629
630/*
631 * Write the physical details regarding the block device to the store, and
632 * switch to Connected state.
633 */
634static void connect(struct backend_info *be)
635{
636 struct xenbus_transaction xbt;
637 int err;
638 struct xenbus_device *dev = be->dev;
639
640 DPRINTK("%s", dev->otherend);
641
642 /* Supply the information about the device the frontend needs */
643again:
644 err = xenbus_transaction_start(&xbt);
645 if (err) {
646 xenbus_dev_fatal(dev, err, "starting transaction");
647 return;
648 }
649
650 err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
651 if (err)
652 goto abort;
653
654 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
655 (unsigned long long)vbd_sz(&be->blkif->vbd));
656 if (err) {
657 xenbus_dev_fatal(dev, err, "writing %s/sectors",
658 dev->nodename);
659 goto abort;
660 }
661
662 /* FIXME: use a typename instead */
663 err = xenbus_printf(xbt, dev->nodename, "info", "%u",
664 be->blkif->vbd.type |
665 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
666 if (err) {
667 xenbus_dev_fatal(dev, err, "writing %s/info",
668 dev->nodename);
669 goto abort;
670 }
671 err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
672 (unsigned long)
673 bdev_logical_block_size(be->blkif->vbd.bdev));
674 if (err) {
675 xenbus_dev_fatal(dev, err, "writing %s/sector-size",
676 dev->nodename);
677 goto abort;
678 }
679
680 err = xenbus_transaction_end(xbt, 0);
681 if (err == -EAGAIN)
682 goto again;
683 if (err)
684 xenbus_dev_fatal(dev, err, "ending transaction");
685
686 err = xenbus_switch_state(dev, XenbusStateConnected);
687 if (err)
688 xenbus_dev_fatal(dev, err, "switching to Connected state",
689 dev->nodename);
690
691 return;
692 abort:
693 xenbus_transaction_end(xbt, 1);
694}
695
696
697static int connect_ring(struct backend_info *be)
698{
699 struct xenbus_device *dev = be->dev;
700 unsigned long ring_ref;
701 unsigned int evtchn;
702 char protocol[64] = "";
703 int err;
704
705 DPRINTK("%s", dev->otherend);
706
707 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
708 &ring_ref, "event-channel", "%u", &evtchn, NULL);
709 if (err) {
710 xenbus_dev_fatal(dev, err,
711 "reading %s/ring-ref and event-channel",
712 dev->otherend);
713 return err;
714 }
715
716 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
717 err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
718 "%63s", protocol, NULL);
719 if (err)
720 strcpy(protocol, "unspecified, assuming native");
721 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
722 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
723 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
724 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
725 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
726 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
727 else {
728 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
729 return -1;
730 }
731 pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n",
732 ring_ref, evtchn, be->blkif->blk_protocol, protocol);
733
734 /* Map the shared frame, irq etc. */
735 err = xen_blkif_map(be->blkif, ring_ref, evtchn);
736 if (err) {
737 xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
738 ring_ref, evtchn);
739 return err;
740 }
741
742 return 0;
743}
744
745
746/* ** Driver Registration ** */
747
748
749static const struct xenbus_device_id xen_blkbk_ids[] = {
750 { "vbd" },
751 { "" }
752};
753
754
755static struct xenbus_driver xen_blkbk = {
756 .name = "vbd",
757 .owner = THIS_MODULE,
758 .ids = xen_blkbk_ids,
759 .probe = xen_blkbk_probe,
760 .remove = xen_blkbk_remove,
761 .otherend_changed = frontend_changed
762};
763
764
765int xen_blkif_xenbus_init(void)
766{
767 return xenbus_register_backend(&xen_blkbk);
768}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9cb8668ff5f4..b536a9cef917 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -97,6 +97,7 @@ struct blkfront_info
97 struct blk_shadow shadow[BLK_RING_SIZE]; 97 struct blk_shadow shadow[BLK_RING_SIZE];
98 unsigned long shadow_free; 98 unsigned long shadow_free;
99 unsigned int feature_flush; 99 unsigned int feature_flush;
100 unsigned int flush_op;
100 int is_ready; 101 int is_ready;
101}; 102};
102 103
@@ -250,8 +251,7 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
250 251
251/* 252/*
252 * Generate a Xen blkfront IO request from a blk layer request. Reads 253 * Generate a Xen blkfront IO request from a blk layer request. Reads
253 * and writes are handled as expected. Since we lack a loose flush 254 * and writes are handled as expected.
254 * request, we map flushes into a full ordered barrier.
255 * 255 *
256 * @req: a request struct 256 * @req: a request struct
257 */ 257 */
@@ -293,14 +293,13 @@ static int blkif_queue_request(struct request *req)
293 293
294 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { 294 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
295 /* 295 /*
296 * Ideally we could just do an unordered 296 * Ideally we can do an unordered flush-to-disk. In case the
297 * flush-to-disk, but all we have is a full write 297 * backend onlysupports barriers, use that. A barrier request
298 * barrier at the moment. However, a barrier write is
299 * a superset of FUA, so we can implement it the same 298 * a superset of FUA, so we can implement it the same
300 * way. (It's also a FLUSH+FUA, since it is 299 * way. (It's also a FLUSH+FUA, since it is
301 * guaranteed ordered WRT previous writes.) 300 * guaranteed ordered WRT previous writes.)
302 */ 301 */
303 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 302 ring_req->operation = info->flush_op;
304 } 303 }
305 304
306 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 305 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
@@ -433,8 +432,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
433static void xlvbd_flush(struct blkfront_info *info) 432static void xlvbd_flush(struct blkfront_info *info)
434{ 433{
435 blk_queue_flush(info->rq, info->feature_flush); 434 blk_queue_flush(info->rq, info->feature_flush);
436 printk(KERN_INFO "blkfront: %s: barriers %s\n", 435 printk(KERN_INFO "blkfront: %s: %s: %s\n",
437 info->gd->disk_name, 436 info->gd->disk_name,
437 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
438 "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
439 "flush diskcache" : "barrier or flush"),
438 info->feature_flush ? "enabled" : "disabled"); 440 info->feature_flush ? "enabled" : "disabled");
439} 441}
440 442
@@ -720,15 +722,20 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
720 722
721 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 723 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
722 switch (bret->operation) { 724 switch (bret->operation) {
725 case BLKIF_OP_FLUSH_DISKCACHE:
723 case BLKIF_OP_WRITE_BARRIER: 726 case BLKIF_OP_WRITE_BARRIER:
724 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 727 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
725 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 728 printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
729 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
730 "barrier" : "flush disk cache",
726 info->gd->disk_name); 731 info->gd->disk_name);
727 error = -EOPNOTSUPP; 732 error = -EOPNOTSUPP;
728 } 733 }
729 if (unlikely(bret->status == BLKIF_RSP_ERROR && 734 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
730 info->shadow[id].req.nr_segments == 0)) { 735 info->shadow[id].req.nr_segments == 0)) {
731 printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", 736 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
737 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
738 "barrier" : "flush disk cache",
732 info->gd->disk_name); 739 info->gd->disk_name);
733 error = -EOPNOTSUPP; 740 error = -EOPNOTSUPP;
734 } 741 }
@@ -736,6 +743,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
736 if (error == -EOPNOTSUPP) 743 if (error == -EOPNOTSUPP)
737 error = 0; 744 error = 0;
738 info->feature_flush = 0; 745 info->feature_flush = 0;
746 info->flush_op = 0;
739 xlvbd_flush(info); 747 xlvbd_flush(info);
740 } 748 }
741 /* fall through */ 749 /* fall through */
@@ -1100,7 +1108,7 @@ static void blkfront_connect(struct blkfront_info *info)
1100 unsigned long sector_size; 1108 unsigned long sector_size;
1101 unsigned int binfo; 1109 unsigned int binfo;
1102 int err; 1110 int err;
1103 int barrier; 1111 int barrier, flush;
1104 1112
1105 switch (info->connected) { 1113 switch (info->connected) {
1106 case BLKIF_STATE_CONNECTED: 1114 case BLKIF_STATE_CONNECTED:
@@ -1140,8 +1148,11 @@ static void blkfront_connect(struct blkfront_info *info)
1140 return; 1148 return;
1141 } 1149 }
1142 1150
1151 info->feature_flush = 0;
1152 info->flush_op = 0;
1153
1143 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1154 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1144 "feature-barrier", "%lu", &barrier, 1155 "feature-barrier", "%d", &barrier,
1145 NULL); 1156 NULL);
1146 1157
1147 /* 1158 /*
@@ -1151,11 +1162,23 @@ static void blkfront_connect(struct blkfront_info *info)
1151 * 1162 *
1152 * If there are barriers, then we use flush. 1163 * If there are barriers, then we use flush.
1153 */ 1164 */
1154 info->feature_flush = 0; 1165 if (!err && barrier) {
1155
1156 if (!err && barrier)
1157 info->feature_flush = REQ_FLUSH | REQ_FUA; 1166 info->feature_flush = REQ_FLUSH | REQ_FUA;
1167 info->flush_op = BLKIF_OP_WRITE_BARRIER;
1168 }
1169 /*
1170 * And if there is "feature-flush-cache" use that above
1171 * barriers.
1172 */
1173 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1174 "feature-flush-cache", "%d", &flush,
1175 NULL);
1158 1176
1177 if (!err && flush) {
1178 info->feature_flush = REQ_FLUSH;
1179 info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
1180 }
1181
1159 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1182 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1160 if (err) { 1183 if (err) {
1161 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 1184 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index e427fbe45999..ae15a4ddaa9b 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -625,7 +625,9 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
625 blk_queue_max_hw_sectors(q, 4096 / 512); 625 blk_queue_max_hw_sectors(q, 4096 / 512);
626 gendisk->queue = q; 626 gendisk->queue = q;
627 gendisk->fops = &viocd_fops; 627 gendisk->fops = &viocd_fops;
628 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; 628 gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
629 GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
630 gendisk->events = DISK_EVENT_MEDIA_CHANGE;
629 set_capacity(gendisk, 0); 631 set_capacity(gendisk, 0);
630 gendisk->private_data = d; 632 gendisk->private_data = d;
631 d->viocd_disk = gendisk; 633 d->viocd_disk = gendisk;
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index d72433f2d310..6e40072fbf67 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -5,6 +5,9 @@
5 * 5 *
6 * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org> 6 * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org>
7 * 7 *
8 * Hwmon integration:
9 * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
10 *
8 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 12 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any 13 * Free Software Foundation; either version 2, or (at your option) any
@@ -24,6 +27,8 @@
24#include <linux/dmi.h> 27#include <linux/dmi.h>
25#include <linux/capability.h> 28#include <linux/capability.h>
26#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h>
27#include <asm/uaccess.h> 32#include <asm/uaccess.h>
28#include <asm/io.h> 33#include <asm/io.h>
29 34
@@ -58,6 +63,7 @@
58 63
59static DEFINE_MUTEX(i8k_mutex); 64static DEFINE_MUTEX(i8k_mutex);
60static char bios_version[4]; 65static char bios_version[4];
66static struct device *i8k_hwmon_dev;
61 67
62MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); 68MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
63MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops"); 69MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
@@ -139,8 +145,8 @@ static int i8k_smm(struct smm_regs *regs)
139 "movl %%edi,20(%%rax)\n\t" 145 "movl %%edi,20(%%rax)\n\t"
140 "popq %%rdx\n\t" 146 "popq %%rdx\n\t"
141 "movl %%edx,0(%%rax)\n\t" 147 "movl %%edx,0(%%rax)\n\t"
142 "lahf\n\t" 148 "pushfq\n\t"
143 "shrl $8,%%eax\n\t" 149 "popq %%rax\n\t"
144 "andl $1,%%eax\n" 150 "andl $1,%%eax\n"
145 :"=a"(rc) 151 :"=a"(rc)
146 : "a"(regs) 152 : "a"(regs)
@@ -455,6 +461,152 @@ static int i8k_open_fs(struct inode *inode, struct file *file)
455 return single_open(file, i8k_proc_show, NULL); 461 return single_open(file, i8k_proc_show, NULL);
456} 462}
457 463
464
465/*
466 * Hwmon interface
467 */
468
469static ssize_t i8k_hwmon_show_temp(struct device *dev,
470 struct device_attribute *devattr,
471 char *buf)
472{
473 int cpu_temp;
474
475 cpu_temp = i8k_get_temp(0);
476 if (cpu_temp < 0)
477 return cpu_temp;
478 return sprintf(buf, "%d\n", cpu_temp * 1000);
479}
480
481static ssize_t i8k_hwmon_show_fan(struct device *dev,
482 struct device_attribute *devattr,
483 char *buf)
484{
485 int index = to_sensor_dev_attr(devattr)->index;
486 int fan_speed;
487
488 fan_speed = i8k_get_fan_speed(index);
489 if (fan_speed < 0)
490 return fan_speed;
491 return sprintf(buf, "%d\n", fan_speed);
492}
493
494static ssize_t i8k_hwmon_show_label(struct device *dev,
495 struct device_attribute *devattr,
496 char *buf)
497{
498 static const char *labels[4] = {
499 "i8k",
500 "CPU",
501 "Left Fan",
502 "Right Fan",
503 };
504 int index = to_sensor_dev_attr(devattr)->index;
505
506 return sprintf(buf, "%s\n", labels[index]);
507}
508
509static DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL);
510static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
511 I8K_FAN_LEFT);
512static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
513 I8K_FAN_RIGHT);
514static SENSOR_DEVICE_ATTR(name, S_IRUGO, i8k_hwmon_show_label, NULL, 0);
515static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1);
516static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2);
517static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 3);
518
519static void i8k_hwmon_remove_files(struct device *dev)
520{
521 device_remove_file(dev, &dev_attr_temp1_input);
522 device_remove_file(dev, &sensor_dev_attr_fan1_input.dev_attr);
523 device_remove_file(dev, &sensor_dev_attr_fan2_input.dev_attr);
524 device_remove_file(dev, &sensor_dev_attr_temp1_label.dev_attr);
525 device_remove_file(dev, &sensor_dev_attr_fan1_label.dev_attr);
526 device_remove_file(dev, &sensor_dev_attr_fan2_label.dev_attr);
527 device_remove_file(dev, &sensor_dev_attr_name.dev_attr);
528}
529
530static int __init i8k_init_hwmon(void)
531{
532 int err;
533
534 i8k_hwmon_dev = hwmon_device_register(NULL);
535 if (IS_ERR(i8k_hwmon_dev)) {
536 err = PTR_ERR(i8k_hwmon_dev);
537 i8k_hwmon_dev = NULL;
538 printk(KERN_ERR "i8k: hwmon registration failed (%d)\n", err);
539 return err;
540 }
541
542 /* Required name attribute */
543 err = device_create_file(i8k_hwmon_dev,
544 &sensor_dev_attr_name.dev_attr);
545 if (err)
546 goto exit_unregister;
547
548 /* CPU temperature attributes, if temperature reading is OK */
549 err = i8k_get_temp(0);
550 if (err < 0) {
551 dev_dbg(i8k_hwmon_dev,
552 "Not creating temperature attributes (%d)\n", err);
553 } else {
554 err = device_create_file(i8k_hwmon_dev, &dev_attr_temp1_input);
555 if (err)
556 goto exit_remove_files;
557 err = device_create_file(i8k_hwmon_dev,
558 &sensor_dev_attr_temp1_label.dev_attr);
559 if (err)
560 goto exit_remove_files;
561 }
562
563 /* Left fan attributes, if left fan is present */
564 err = i8k_get_fan_status(I8K_FAN_LEFT);
565 if (err < 0) {
566 dev_dbg(i8k_hwmon_dev,
567 "Not creating %s fan attributes (%d)\n", "left", err);
568 } else {
569 err = device_create_file(i8k_hwmon_dev,
570 &sensor_dev_attr_fan1_input.dev_attr);
571 if (err)
572 goto exit_remove_files;
573 err = device_create_file(i8k_hwmon_dev,
574 &sensor_dev_attr_fan1_label.dev_attr);
575 if (err)
576 goto exit_remove_files;
577 }
578
579 /* Right fan attributes, if right fan is present */
580 err = i8k_get_fan_status(I8K_FAN_RIGHT);
581 if (err < 0) {
582 dev_dbg(i8k_hwmon_dev,
583 "Not creating %s fan attributes (%d)\n", "right", err);
584 } else {
585 err = device_create_file(i8k_hwmon_dev,
586 &sensor_dev_attr_fan2_input.dev_attr);
587 if (err)
588 goto exit_remove_files;
589 err = device_create_file(i8k_hwmon_dev,
590 &sensor_dev_attr_fan2_label.dev_attr);
591 if (err)
592 goto exit_remove_files;
593 }
594
595 return 0;
596
597 exit_remove_files:
598 i8k_hwmon_remove_files(i8k_hwmon_dev);
599 exit_unregister:
600 hwmon_device_unregister(i8k_hwmon_dev);
601 return err;
602}
603
604static void __exit i8k_exit_hwmon(void)
605{
606 i8k_hwmon_remove_files(i8k_hwmon_dev);
607 hwmon_device_unregister(i8k_hwmon_dev);
608}
609
458static struct dmi_system_id __initdata i8k_dmi_table[] = { 610static struct dmi_system_id __initdata i8k_dmi_table[] = {
459 { 611 {
460 .ident = "Dell Inspiron", 612 .ident = "Dell Inspiron",
@@ -580,6 +732,7 @@ static int __init i8k_probe(void)
580static int __init i8k_init(void) 732static int __init i8k_init(void)
581{ 733{
582 struct proc_dir_entry *proc_i8k; 734 struct proc_dir_entry *proc_i8k;
735 int err;
583 736
584 /* Are we running on an supported laptop? */ 737 /* Are we running on an supported laptop? */
585 if (i8k_probe()) 738 if (i8k_probe())
@@ -590,15 +743,24 @@ static int __init i8k_init(void)
590 if (!proc_i8k) 743 if (!proc_i8k)
591 return -ENOENT; 744 return -ENOENT;
592 745
746 err = i8k_init_hwmon();
747 if (err)
748 goto exit_remove_proc;
749
593 printk(KERN_INFO 750 printk(KERN_INFO
594 "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n", 751 "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
595 I8K_VERSION); 752 I8K_VERSION);
596 753
597 return 0; 754 return 0;
755
756 exit_remove_proc:
757 remove_proc_entry("i8k", NULL);
758 return err;
598} 759}
599 760
600static void __exit i8k_exit(void) 761static void __exit i8k_exit(void)
601{ 762{
763 i8k_exit_hwmon();
602 remove_proc_entry("i8k", NULL); 764 remove_proc_entry("i8k", NULL);
603} 765}
604 766
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 38223e93aa98..58c0e6387cf7 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -36,6 +36,7 @@
36#include <asm/system.h> 36#include <asm/system.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/seq_file.h>
39#include <linux/spinlock.h> 40#include <linux/spinlock.h>
40#include <linux/mutex.h> 41#include <linux/mutex.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
@@ -1896,102 +1897,128 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1896EXPORT_SYMBOL(ipmi_request_supply_msgs); 1897EXPORT_SYMBOL(ipmi_request_supply_msgs);
1897 1898
1898#ifdef CONFIG_PROC_FS 1899#ifdef CONFIG_PROC_FS
1899static int ipmb_file_read_proc(char *page, char **start, off_t off, 1900static int smi_ipmb_proc_show(struct seq_file *m, void *v)
1900 int count, int *eof, void *data)
1901{ 1901{
1902 char *out = (char *) page; 1902 ipmi_smi_t intf = m->private;
1903 ipmi_smi_t intf = data;
1904 int i; 1903 int i;
1905 int rv = 0;
1906 1904
1907 for (i = 0; i < IPMI_MAX_CHANNELS; i++) 1905 seq_printf(m, "%x", intf->channels[0].address);
1908 rv += sprintf(out+rv, "%x ", intf->channels[i].address); 1906 for (i = 1; i < IPMI_MAX_CHANNELS; i++)
1909 out[rv-1] = '\n'; /* Replace the final space with a newline */ 1907 seq_printf(m, " %x", intf->channels[i].address);
1910 out[rv] = '\0'; 1908 return seq_putc(m, '\n');
1911 rv++;
1912 return rv;
1913} 1909}
1914 1910
1915static int version_file_read_proc(char *page, char **start, off_t off, 1911static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
1916 int count, int *eof, void *data)
1917{ 1912{
1918 char *out = (char *) page; 1913 return single_open(file, smi_ipmb_proc_show, PDE(inode)->data);
1919 ipmi_smi_t intf = data; 1914}
1920 1915
1921 return sprintf(out, "%u.%u\n", 1916static const struct file_operations smi_ipmb_proc_ops = {
1917 .open = smi_ipmb_proc_open,
1918 .read = seq_read,
1919 .llseek = seq_lseek,
1920 .release = single_release,
1921};
1922
1923static int smi_version_proc_show(struct seq_file *m, void *v)
1924{
1925 ipmi_smi_t intf = m->private;
1926
1927 return seq_printf(m, "%u.%u\n",
1922 ipmi_version_major(&intf->bmc->id), 1928 ipmi_version_major(&intf->bmc->id),
1923 ipmi_version_minor(&intf->bmc->id)); 1929 ipmi_version_minor(&intf->bmc->id));
1924} 1930}
1925 1931
1926static int stat_file_read_proc(char *page, char **start, off_t off, 1932static int smi_version_proc_open(struct inode *inode, struct file *file)
1927 int count, int *eof, void *data)
1928{ 1933{
1929 char *out = (char *) page; 1934 return single_open(file, smi_version_proc_show, PDE(inode)->data);
1930 ipmi_smi_t intf = data; 1935}
1936
1937static const struct file_operations smi_version_proc_ops = {
1938 .open = smi_version_proc_open,
1939 .read = seq_read,
1940 .llseek = seq_lseek,
1941 .release = single_release,
1942};
1931 1943
1932 out += sprintf(out, "sent_invalid_commands: %u\n", 1944static int smi_stats_proc_show(struct seq_file *m, void *v)
1945{
1946 ipmi_smi_t intf = m->private;
1947
1948 seq_printf(m, "sent_invalid_commands: %u\n",
1933 ipmi_get_stat(intf, sent_invalid_commands)); 1949 ipmi_get_stat(intf, sent_invalid_commands));
1934 out += sprintf(out, "sent_local_commands: %u\n", 1950 seq_printf(m, "sent_local_commands: %u\n",
1935 ipmi_get_stat(intf, sent_local_commands)); 1951 ipmi_get_stat(intf, sent_local_commands));
1936 out += sprintf(out, "handled_local_responses: %u\n", 1952 seq_printf(m, "handled_local_responses: %u\n",
1937 ipmi_get_stat(intf, handled_local_responses)); 1953 ipmi_get_stat(intf, handled_local_responses));
1938 out += sprintf(out, "unhandled_local_responses: %u\n", 1954 seq_printf(m, "unhandled_local_responses: %u\n",
1939 ipmi_get_stat(intf, unhandled_local_responses)); 1955 ipmi_get_stat(intf, unhandled_local_responses));
1940 out += sprintf(out, "sent_ipmb_commands: %u\n", 1956 seq_printf(m, "sent_ipmb_commands: %u\n",
1941 ipmi_get_stat(intf, sent_ipmb_commands)); 1957 ipmi_get_stat(intf, sent_ipmb_commands));
1942 out += sprintf(out, "sent_ipmb_command_errs: %u\n", 1958 seq_printf(m, "sent_ipmb_command_errs: %u\n",
1943 ipmi_get_stat(intf, sent_ipmb_command_errs)); 1959 ipmi_get_stat(intf, sent_ipmb_command_errs));
1944 out += sprintf(out, "retransmitted_ipmb_commands: %u\n", 1960 seq_printf(m, "retransmitted_ipmb_commands: %u\n",
1945 ipmi_get_stat(intf, retransmitted_ipmb_commands)); 1961 ipmi_get_stat(intf, retransmitted_ipmb_commands));
1946 out += sprintf(out, "timed_out_ipmb_commands: %u\n", 1962 seq_printf(m, "timed_out_ipmb_commands: %u\n",
1947 ipmi_get_stat(intf, timed_out_ipmb_commands)); 1963 ipmi_get_stat(intf, timed_out_ipmb_commands));
1948 out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n", 1964 seq_printf(m, "timed_out_ipmb_broadcasts: %u\n",
1949 ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); 1965 ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
1950 out += sprintf(out, "sent_ipmb_responses: %u\n", 1966 seq_printf(m, "sent_ipmb_responses: %u\n",
1951 ipmi_get_stat(intf, sent_ipmb_responses)); 1967 ipmi_get_stat(intf, sent_ipmb_responses));
1952 out += sprintf(out, "handled_ipmb_responses: %u\n", 1968 seq_printf(m, "handled_ipmb_responses: %u\n",
1953 ipmi_get_stat(intf, handled_ipmb_responses)); 1969 ipmi_get_stat(intf, handled_ipmb_responses));
1954 out += sprintf(out, "invalid_ipmb_responses: %u\n", 1970 seq_printf(m, "invalid_ipmb_responses: %u\n",
1955 ipmi_get_stat(intf, invalid_ipmb_responses)); 1971 ipmi_get_stat(intf, invalid_ipmb_responses));
1956 out += sprintf(out, "unhandled_ipmb_responses: %u\n", 1972 seq_printf(m, "unhandled_ipmb_responses: %u\n",
1957 ipmi_get_stat(intf, unhandled_ipmb_responses)); 1973 ipmi_get_stat(intf, unhandled_ipmb_responses));
1958 out += sprintf(out, "sent_lan_commands: %u\n", 1974 seq_printf(m, "sent_lan_commands: %u\n",
1959 ipmi_get_stat(intf, sent_lan_commands)); 1975 ipmi_get_stat(intf, sent_lan_commands));
1960 out += sprintf(out, "sent_lan_command_errs: %u\n", 1976 seq_printf(m, "sent_lan_command_errs: %u\n",
1961 ipmi_get_stat(intf, sent_lan_command_errs)); 1977 ipmi_get_stat(intf, sent_lan_command_errs));
1962 out += sprintf(out, "retransmitted_lan_commands: %u\n", 1978 seq_printf(m, "retransmitted_lan_commands: %u\n",
1963 ipmi_get_stat(intf, retransmitted_lan_commands)); 1979 ipmi_get_stat(intf, retransmitted_lan_commands));
1964 out += sprintf(out, "timed_out_lan_commands: %u\n", 1980 seq_printf(m, "timed_out_lan_commands: %u\n",
1965 ipmi_get_stat(intf, timed_out_lan_commands)); 1981 ipmi_get_stat(intf, timed_out_lan_commands));
1966 out += sprintf(out, "sent_lan_responses: %u\n", 1982 seq_printf(m, "sent_lan_responses: %u\n",
1967 ipmi_get_stat(intf, sent_lan_responses)); 1983 ipmi_get_stat(intf, sent_lan_responses));
1968 out += sprintf(out, "handled_lan_responses: %u\n", 1984 seq_printf(m, "handled_lan_responses: %u\n",
1969 ipmi_get_stat(intf, handled_lan_responses)); 1985 ipmi_get_stat(intf, handled_lan_responses));
1970 out += sprintf(out, "invalid_lan_responses: %u\n", 1986 seq_printf(m, "invalid_lan_responses: %u\n",
1971 ipmi_get_stat(intf, invalid_lan_responses)); 1987 ipmi_get_stat(intf, invalid_lan_responses));
1972 out += sprintf(out, "unhandled_lan_responses: %u\n", 1988 seq_printf(m, "unhandled_lan_responses: %u\n",
1973 ipmi_get_stat(intf, unhandled_lan_responses)); 1989 ipmi_get_stat(intf, unhandled_lan_responses));
1974 out += sprintf(out, "handled_commands: %u\n", 1990 seq_printf(m, "handled_commands: %u\n",
1975 ipmi_get_stat(intf, handled_commands)); 1991 ipmi_get_stat(intf, handled_commands));
1976 out += sprintf(out, "invalid_commands: %u\n", 1992 seq_printf(m, "invalid_commands: %u\n",
1977 ipmi_get_stat(intf, invalid_commands)); 1993 ipmi_get_stat(intf, invalid_commands));
1978 out += sprintf(out, "unhandled_commands: %u\n", 1994 seq_printf(m, "unhandled_commands: %u\n",
1979 ipmi_get_stat(intf, unhandled_commands)); 1995 ipmi_get_stat(intf, unhandled_commands));
1980 out += sprintf(out, "invalid_events: %u\n", 1996 seq_printf(m, "invalid_events: %u\n",
1981 ipmi_get_stat(intf, invalid_events)); 1997 ipmi_get_stat(intf, invalid_events));
1982 out += sprintf(out, "events: %u\n", 1998 seq_printf(m, "events: %u\n",
1983 ipmi_get_stat(intf, events)); 1999 ipmi_get_stat(intf, events));
1984 out += sprintf(out, "failed rexmit LAN msgs: %u\n", 2000 seq_printf(m, "failed rexmit LAN msgs: %u\n",
1985 ipmi_get_stat(intf, dropped_rexmit_lan_commands)); 2001 ipmi_get_stat(intf, dropped_rexmit_lan_commands));
1986 out += sprintf(out, "failed rexmit IPMB msgs: %u\n", 2002 seq_printf(m, "failed rexmit IPMB msgs: %u\n",
1987 ipmi_get_stat(intf, dropped_rexmit_ipmb_commands)); 2003 ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
2004 return 0;
2005}
1988 2006
1989 return (out - ((char *) page)); 2007static int smi_stats_proc_open(struct inode *inode, struct file *file)
2008{
2009 return single_open(file, smi_stats_proc_show, PDE(inode)->data);
1990} 2010}
2011
2012static const struct file_operations smi_stats_proc_ops = {
2013 .open = smi_stats_proc_open,
2014 .read = seq_read,
2015 .llseek = seq_lseek,
2016 .release = single_release,
2017};
1991#endif /* CONFIG_PROC_FS */ 2018#endif /* CONFIG_PROC_FS */
1992 2019
1993int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 2020int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1994 read_proc_t *read_proc, 2021 const struct file_operations *proc_ops,
1995 void *data) 2022 void *data)
1996{ 2023{
1997 int rv = 0; 2024 int rv = 0;
@@ -2010,15 +2037,12 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2010 } 2037 }
2011 strcpy(entry->name, name); 2038 strcpy(entry->name, name);
2012 2039
2013 file = create_proc_entry(name, 0, smi->proc_dir); 2040 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2014 if (!file) { 2041 if (!file) {
2015 kfree(entry->name); 2042 kfree(entry->name);
2016 kfree(entry); 2043 kfree(entry);
2017 rv = -ENOMEM; 2044 rv = -ENOMEM;
2018 } else { 2045 } else {
2019 file->data = data;
2020 file->read_proc = read_proc;
2021
2022 mutex_lock(&smi->proc_entry_lock); 2046 mutex_lock(&smi->proc_entry_lock);
2023 /* Stick it on the list. */ 2047 /* Stick it on the list. */
2024 entry->next = smi->proc_entries; 2048 entry->next = smi->proc_entries;
@@ -2043,17 +2067,17 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
2043 2067
2044 if (rv == 0) 2068 if (rv == 0)
2045 rv = ipmi_smi_add_proc_entry(smi, "stats", 2069 rv = ipmi_smi_add_proc_entry(smi, "stats",
2046 stat_file_read_proc, 2070 &smi_stats_proc_ops,
2047 smi); 2071 smi);
2048 2072
2049 if (rv == 0) 2073 if (rv == 0)
2050 rv = ipmi_smi_add_proc_entry(smi, "ipmb", 2074 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
2051 ipmb_file_read_proc, 2075 &smi_ipmb_proc_ops,
2052 smi); 2076 smi);
2053 2077
2054 if (rv == 0) 2078 if (rv == 0)
2055 rv = ipmi_smi_add_proc_entry(smi, "version", 2079 rv = ipmi_smi_add_proc_entry(smi, "version",
2056 version_file_read_proc, 2080 &smi_version_proc_ops,
2057 smi); 2081 smi);
2058#endif /* CONFIG_PROC_FS */ 2082#endif /* CONFIG_PROC_FS */
2059 2083
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 64c6b8530615..9397ab49b72e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -43,6 +43,7 @@
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
44#include <asm/system.h> 44#include <asm/system.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <linux/seq_file.h>
46#include <linux/timer.h> 47#include <linux/timer.h>
47#include <linux/errno.h> 48#include <linux/errno.h>
48#include <linux/spinlock.h> 49#include <linux/spinlock.h>
@@ -2805,54 +2806,73 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2805 return rv; 2806 return rv;
2806} 2807}
2807 2808
2808static int type_file_read_proc(char *page, char **start, off_t off, 2809static int smi_type_proc_show(struct seq_file *m, void *v)
2809 int count, int *eof, void *data)
2810{ 2810{
2811 struct smi_info *smi = data; 2811 struct smi_info *smi = m->private;
2812 2812
2813 return sprintf(page, "%s\n", si_to_str[smi->si_type]); 2813 return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
2814} 2814}
2815 2815
2816static int stat_file_read_proc(char *page, char **start, off_t off, 2816static int smi_type_proc_open(struct inode *inode, struct file *file)
2817 int count, int *eof, void *data)
2818{ 2817{
2819 char *out = (char *) page; 2818 return single_open(file, smi_type_proc_show, PDE(inode)->data);
2820 struct smi_info *smi = data; 2819}
2820
2821static const struct file_operations smi_type_proc_ops = {
2822 .open = smi_type_proc_open,
2823 .read = seq_read,
2824 .llseek = seq_lseek,
2825 .release = single_release,
2826};
2827
2828static int smi_si_stats_proc_show(struct seq_file *m, void *v)
2829{
2830 struct smi_info *smi = m->private;
2821 2831
2822 out += sprintf(out, "interrupts_enabled: %d\n", 2832 seq_printf(m, "interrupts_enabled: %d\n",
2823 smi->irq && !smi->interrupt_disabled); 2833 smi->irq && !smi->interrupt_disabled);
2824 out += sprintf(out, "short_timeouts: %u\n", 2834 seq_printf(m, "short_timeouts: %u\n",
2825 smi_get_stat(smi, short_timeouts)); 2835 smi_get_stat(smi, short_timeouts));
2826 out += sprintf(out, "long_timeouts: %u\n", 2836 seq_printf(m, "long_timeouts: %u\n",
2827 smi_get_stat(smi, long_timeouts)); 2837 smi_get_stat(smi, long_timeouts));
2828 out += sprintf(out, "idles: %u\n", 2838 seq_printf(m, "idles: %u\n",
2829 smi_get_stat(smi, idles)); 2839 smi_get_stat(smi, idles));
2830 out += sprintf(out, "interrupts: %u\n", 2840 seq_printf(m, "interrupts: %u\n",
2831 smi_get_stat(smi, interrupts)); 2841 smi_get_stat(smi, interrupts));
2832 out += sprintf(out, "attentions: %u\n", 2842 seq_printf(m, "attentions: %u\n",
2833 smi_get_stat(smi, attentions)); 2843 smi_get_stat(smi, attentions));
2834 out += sprintf(out, "flag_fetches: %u\n", 2844 seq_printf(m, "flag_fetches: %u\n",
2835 smi_get_stat(smi, flag_fetches)); 2845 smi_get_stat(smi, flag_fetches));
2836 out += sprintf(out, "hosed_count: %u\n", 2846 seq_printf(m, "hosed_count: %u\n",
2837 smi_get_stat(smi, hosed_count)); 2847 smi_get_stat(smi, hosed_count));
2838 out += sprintf(out, "complete_transactions: %u\n", 2848 seq_printf(m, "complete_transactions: %u\n",
2839 smi_get_stat(smi, complete_transactions)); 2849 smi_get_stat(smi, complete_transactions));
2840 out += sprintf(out, "events: %u\n", 2850 seq_printf(m, "events: %u\n",
2841 smi_get_stat(smi, events)); 2851 smi_get_stat(smi, events));
2842 out += sprintf(out, "watchdog_pretimeouts: %u\n", 2852 seq_printf(m, "watchdog_pretimeouts: %u\n",
2843 smi_get_stat(smi, watchdog_pretimeouts)); 2853 smi_get_stat(smi, watchdog_pretimeouts));
2844 out += sprintf(out, "incoming_messages: %u\n", 2854 seq_printf(m, "incoming_messages: %u\n",
2845 smi_get_stat(smi, incoming_messages)); 2855 smi_get_stat(smi, incoming_messages));
2856 return 0;
2857}
2846 2858
2847 return out - page; 2859static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
2860{
2861 return single_open(file, smi_si_stats_proc_show, PDE(inode)->data);
2848} 2862}
2849 2863
2850static int param_read_proc(char *page, char **start, off_t off, 2864static const struct file_operations smi_si_stats_proc_ops = {
2851 int count, int *eof, void *data) 2865 .open = smi_si_stats_proc_open,
2866 .read = seq_read,
2867 .llseek = seq_lseek,
2868 .release = single_release,
2869};
2870
2871static int smi_params_proc_show(struct seq_file *m, void *v)
2852{ 2872{
2853 struct smi_info *smi = data; 2873 struct smi_info *smi = m->private;
2854 2874
2855 return sprintf(page, 2875 return seq_printf(m,
2856 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", 2876 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2857 si_to_str[smi->si_type], 2877 si_to_str[smi->si_type],
2858 addr_space_to_str[smi->io.addr_type], 2878 addr_space_to_str[smi->io.addr_type],
@@ -2864,6 +2884,18 @@ static int param_read_proc(char *page, char **start, off_t off,
2864 smi->slave_addr); 2884 smi->slave_addr);
2865} 2885}
2866 2886
2887static int smi_params_proc_open(struct inode *inode, struct file *file)
2888{
2889 return single_open(file, smi_params_proc_show, PDE(inode)->data);
2890}
2891
2892static const struct file_operations smi_params_proc_ops = {
2893 .open = smi_params_proc_open,
2894 .read = seq_read,
2895 .llseek = seq_lseek,
2896 .release = single_release,
2897};
2898
2867/* 2899/*
2868 * oem_data_avail_to_receive_msg_avail 2900 * oem_data_avail_to_receive_msg_avail
2869 * @info - smi_info structure with msg_flags set 2901 * @info - smi_info structure with msg_flags set
@@ -3257,7 +3289,7 @@ static int try_smi_init(struct smi_info *new_smi)
3257 } 3289 }
3258 3290
3259 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", 3291 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
3260 type_file_read_proc, 3292 &smi_type_proc_ops,
3261 new_smi); 3293 new_smi);
3262 if (rv) { 3294 if (rv) {
3263 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3295 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
@@ -3265,7 +3297,7 @@ static int try_smi_init(struct smi_info *new_smi)
3265 } 3297 }
3266 3298
3267 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", 3299 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
3268 stat_file_read_proc, 3300 &smi_si_stats_proc_ops,
3269 new_smi); 3301 new_smi);
3270 if (rv) { 3302 if (rv) {
3271 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3303 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
@@ -3273,7 +3305,7 @@ static int try_smi_init(struct smi_info *new_smi)
3273 } 3305 }
3274 3306
3275 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", 3307 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
3276 param_read_proc, 3308 &smi_params_proc_ops,
3277 new_smi); 3309 new_smi);
3278 if (rv) { 3310 if (rv) {
3279 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3311 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 966a95bc974b..25d139c9dbed 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -271,14 +271,13 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
271 pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 271 pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
272 vdata_size = sizeof(struct vma_data) + pages * sizeof(long); 272 vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
273 if (vdata_size <= PAGE_SIZE) 273 if (vdata_size <= PAGE_SIZE)
274 vdata = kmalloc(vdata_size, GFP_KERNEL); 274 vdata = kzalloc(vdata_size, GFP_KERNEL);
275 else { 275 else {
276 vdata = vmalloc(vdata_size); 276 vdata = vzalloc(vdata_size);
277 flags = VMD_VMALLOCED; 277 flags = VMD_VMALLOCED;
278 } 278 }
279 if (!vdata) 279 if (!vdata)
280 return -ENOMEM; 280 return -ENOMEM;
281 memset(vdata, 0, vdata_size);
282 281
283 vdata->vm_start = vma->vm_start; 282 vdata->vm_start = vma->vm_start;
284 vdata->vm_end = vma->vm_end; 283 vdata->vm_end = vma->vm_end;
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index f176dbaeb15a..3fcf80ff12f2 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -457,6 +457,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
457 return -ENODEV; 457 return -ENODEV;
458 458
459 modes = port->modes; 459 modes = port->modes;
460 parport_put_port(port);
460 if (copy_to_user (argp, &modes, sizeof (modes))) { 461 if (copy_to_user (argp, &modes, sizeof (modes))) {
461 return -EFAULT; 462 return -EFAULT;
462 } 463 }
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index c7f1a6f16b6e..e2fc2d21fa61 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -39,3 +39,5 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
39 39
40##################################################################################d 40##################################################################################d
41 41
42# ARM SoC drivers
43obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
new file mode 100644
index 000000000000..d90456a809f9
--- /dev/null
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -0,0 +1,169 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
7 * Author: Martin Persson <martin.persson@stericsson.com>
8 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/cpufreq.h>
13#include <linux/delay.h>
14#include <linux/slab.h>
15#include <linux/mfd/db8500-prcmu.h>
16#include <mach/id.h>
17
18static struct cpufreq_frequency_table freq_table[] = {
19 [0] = {
20 .index = 0,
21 .frequency = 300000,
22 },
23 [1] = {
24 .index = 1,
25 .frequency = 600000,
26 },
27 [2] = {
28 /* Used for MAX_OPP, if available */
29 .index = 2,
30 .frequency = CPUFREQ_TABLE_END,
31 },
32 [3] = {
33 .index = 3,
34 .frequency = CPUFREQ_TABLE_END,
35 },
36};
37
38static enum arm_opp idx2opp[] = {
39 ARM_50_OPP,
40 ARM_100_OPP,
41 ARM_MAX_OPP
42};
43
44static struct freq_attr *db8500_cpufreq_attr[] = {
45 &cpufreq_freq_attr_scaling_available_freqs,
46 NULL,
47};
48
49static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
50{
51 return cpufreq_frequency_table_verify(policy, freq_table);
52}
53
54static int db8500_cpufreq_target(struct cpufreq_policy *policy,
55 unsigned int target_freq,
56 unsigned int relation)
57{
58 struct cpufreq_freqs freqs;
59 unsigned int idx;
60
61 /* scale the target frequency to one of the extremes supported */
62 if (target_freq < policy->cpuinfo.min_freq)
63 target_freq = policy->cpuinfo.min_freq;
64 if (target_freq > policy->cpuinfo.max_freq)
65 target_freq = policy->cpuinfo.max_freq;
66
67 /* Lookup the next frequency */
68 if (cpufreq_frequency_table_target
69 (policy, freq_table, target_freq, relation, &idx)) {
70 return -EINVAL;
71 }
72
73 freqs.old = policy->cur;
74 freqs.new = freq_table[idx].frequency;
75 freqs.cpu = policy->cpu;
76
77 if (freqs.old == freqs.new)
78 return 0;
79
80 /* pre-change notification */
81 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
82
83 /* request the PRCM unit for opp change */
84 if (prcmu_set_arm_opp(idx2opp[idx])) {
85 pr_err("db8500-cpufreq: Failed to set OPP level\n");
86 return -EINVAL;
87 }
88
89 /* post change notification */
90 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
91
92 return 0;
93}
94
95static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
96{
97 int i;
98 /* request the prcm to get the current ARM opp */
99 for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++)
100 ;
101 return freq_table[i].frequency;
102}
103
104static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
105{
106 int res;
107 int i;
108
109 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
110
111 if (cpu_is_u8500v2() && !prcmu_is_u8400()) {
112 freq_table[0].frequency = 400000;
113 freq_table[1].frequency = 800000;
114 if (prcmu_has_arm_maxopp())
115 freq_table[2].frequency = 1000000;
116 }
117
118 /* get policy fields based on the table */
119 res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
120 if (!res)
121 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
122 else {
123 pr_err("db8500-cpufreq : Failed to read policy table\n");
124 return res;
125 }
126
127 policy->min = policy->cpuinfo.min_freq;
128 policy->max = policy->cpuinfo.max_freq;
129 policy->cur = db8500_cpufreq_getspeed(policy->cpu);
130
131 for (i = 0; freq_table[i].frequency != policy->cur; i++)
132 ;
133
134 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
135
136 /*
137 * FIXME : Need to take time measurement across the target()
138 * function with no/some/all drivers in the notification
139 * list.
140 */
141 policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
142
143 /* policy sharing between dual CPUs */
144 cpumask_copy(policy->cpus, &cpu_present_map);
145
146 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
147
148 return 0;
149}
150
151static struct cpufreq_driver db8500_cpufreq_driver = {
152 .flags = CPUFREQ_STICKY,
153 .verify = db8500_cpufreq_verify_speed,
154 .target = db8500_cpufreq_target,
155 .get = db8500_cpufreq_getspeed,
156 .init = db8500_cpufreq_init,
157 .name = "DB8500",
158 .attr = db8500_cpufreq_attr,
159};
160
161static int __init db8500_cpufreq_register(void)
162{
163 if (!cpu_is_u8500v20_or_later())
164 return -ENODEV;
165
166 pr_info("cpufreq for DB8500 started\n");
167 return cpufreq_register_driver(&db8500_cpufreq_driver);
168}
169device_initcall(db8500_cpufreq_register);
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index dcc1b2139fff..636e40925b16 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -213,12 +213,17 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
213 struct sh_dmae_device, common); 213 struct sh_dmae_device, common);
214 struct sh_dmae_pdata *pdata = shdev->pdata; 214 struct sh_dmae_pdata *pdata = shdev->pdata;
215 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 215 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
216 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 216 u16 __iomem *addr = shdev->dmars;
217 int shift = chan_pdata->dmars_bit; 217 int shift = chan_pdata->dmars_bit;
218 218
219 if (dmae_is_busy(sh_chan)) 219 if (dmae_is_busy(sh_chan))
220 return -EBUSY; 220 return -EBUSY;
221 221
222 /* in the case of a missing DMARS resource use first memory window */
223 if (!addr)
224 addr = (u16 __iomem *)shdev->chan_reg;
225 addr += chan_pdata->dmars / sizeof(u16);
226
222 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 227 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
223 addr); 228 addr);
224 229
@@ -1078,7 +1083,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1078 unsigned long irqflags = IRQF_DISABLED, 1083 unsigned long irqflags = IRQF_DISABLED,
1079 chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 1084 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1080 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 1085 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
1081 int err, i, irq_cnt = 0, irqres = 0; 1086 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
1082 struct sh_dmae_device *shdev; 1087 struct sh_dmae_device *shdev;
1083 struct resource *chan, *dmars, *errirq_res, *chanirq_res; 1088 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
1084 1089
@@ -1087,7 +1092,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1087 return -ENODEV; 1092 return -ENODEV;
1088 1093
1089 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1094 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1090 /* DMARS area is optional, if absent, this controller cannot do slave DMA */ 1095 /* DMARS area is optional */
1091 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1096 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1092 /* 1097 /*
1093 * IRQ resources: 1098 * IRQ resources:
@@ -1154,7 +1159,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1154 INIT_LIST_HEAD(&shdev->common.channels); 1159 INIT_LIST_HEAD(&shdev->common.channels);
1155 1160
1156 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1161 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1157 if (dmars) 1162 if (pdata->slave && pdata->slave_num)
1158 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1163 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1159 1164
1160 shdev->common.device_alloc_chan_resources 1165 shdev->common.device_alloc_chan_resources
@@ -1203,8 +1208,13 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1203 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 1208 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1204 /* Special case - all multiplexed */ 1209 /* Special case - all multiplexed */
1205 for (; irq_cnt < pdata->channel_num; irq_cnt++) { 1210 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1206 chan_irq[irq_cnt] = chanirq_res->start; 1211 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1207 chan_flag[irq_cnt] = IRQF_SHARED; 1212 chan_irq[irq_cnt] = chanirq_res->start;
1213 chan_flag[irq_cnt] = IRQF_SHARED;
1214 } else {
1215 irq_cap = 1;
1216 break;
1217 }
1208 } 1218 }
1209 } else { 1219 } else {
1210 do { 1220 do {
@@ -1218,22 +1228,32 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1218 "Found IRQ %d for channel %d\n", 1228 "Found IRQ %d for channel %d\n",
1219 i, irq_cnt); 1229 i, irq_cnt);
1220 chan_irq[irq_cnt++] = i; 1230 chan_irq[irq_cnt++] = i;
1231
1232 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1233 break;
1234 }
1235
1236 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1237 irq_cap = 1;
1238 break;
1221 } 1239 }
1222 chanirq_res = platform_get_resource(pdev, 1240 chanirq_res = platform_get_resource(pdev,
1223 IORESOURCE_IRQ, ++irqres); 1241 IORESOURCE_IRQ, ++irqres);
1224 } while (irq_cnt < pdata->channel_num && chanirq_res); 1242 } while (irq_cnt < pdata->channel_num && chanirq_res);
1225 } 1243 }
1226 1244
1227 if (irq_cnt < pdata->channel_num)
1228 goto eirqres;
1229
1230 /* Create DMA Channel */ 1245 /* Create DMA Channel */
1231 for (i = 0; i < pdata->channel_num; i++) { 1246 for (i = 0; i < irq_cnt; i++) {
1232 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1247 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1233 if (err) 1248 if (err)
1234 goto chan_probe_err; 1249 goto chan_probe_err;
1235 } 1250 }
1236 1251
1252 if (irq_cap)
1253 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1254 "channels when a maximum of %d are supported.\n",
1255 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1256
1237 pm_runtime_put(&pdev->dev); 1257 pm_runtime_put(&pdev->dev);
1238 1258
1239 platform_set_drvdata(pdev, shdev); 1259 platform_set_drvdata(pdev, shdev);
@@ -1243,7 +1263,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1243 1263
1244chan_probe_err: 1264chan_probe_err:
1245 sh_dmae_chan_remove(shdev); 1265 sh_dmae_chan_remove(shdev);
1246eirqres: 1266
1247#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1267#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1248 free_irq(errirq, shdev); 1268 free_irq(errirq, shdev);
1249eirq_err: 1269eirq_err:
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 3f9d3cd06584..5ae9fc512180 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,7 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#define SH_DMAC_MAX_CHANNELS 6 20#define SH_DMAC_MAX_CHANNELS 20
21#define SH_DMA_SLAVE_NUMBER 256 21#define SH_DMA_SLAVE_NUMBER 256
22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
23 23
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index d2c75feff7df..f69f90a61873 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -27,7 +27,6 @@
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/mfd/core.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32 31
33#include <linux/timb_dma.h> 32#include <linux/timb_dma.h>
@@ -685,7 +684,7 @@ static irqreturn_t td_irq(int irq, void *devid)
685 684
686static int __devinit td_probe(struct platform_device *pdev) 685static int __devinit td_probe(struct platform_device *pdev)
687{ 686{
688 struct timb_dma_platform_data *pdata = mfd_get_data(pdev); 687 struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
689 struct timb_dma *td; 688 struct timb_dma *td;
690 struct resource *iomem; 689 struct resource *iomem;
691 int irq; 690 int irq;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index cace0a7b707a..e47e73bbbcc5 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -19,7 +19,7 @@
19#include <linux/edac.h> 19#include <linux/edac.h>
20#include "edac_core.h" 20#include "edac_core.h"
21 21
22#define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ 22#define AMD76X_REVISION " Ver: 2.0.2"
23#define EDAC_MOD_STR "amd76x_edac" 23#define EDAC_MOD_STR "amd76x_edac"
24 24
25#define amd76x_printk(level, fmt, arg...) \ 25#define amd76x_printk(level, fmt, arg...) \
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 35b78d04bbfa..ddd890052ce2 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -33,7 +33,7 @@
33#include "edac_module.h" 33#include "edac_module.h"
34#include "amd8111_edac.h" 34#include "amd8111_edac.h"
35 35
36#define AMD8111_EDAC_REVISION " Ver: 1.0.0 " __DATE__ 36#define AMD8111_EDAC_REVISION " Ver: 1.0.0"
37#define AMD8111_EDAC_MOD_STR "amd8111_edac" 37#define AMD8111_EDAC_MOD_STR "amd8111_edac"
38 38
39#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 39#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index b432d60c622a..a5c680561c73 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -33,7 +33,7 @@
33#include "edac_module.h" 33#include "edac_module.h"
34#include "amd8131_edac.h" 34#include "amd8131_edac.h"
35 35
36#define AMD8131_EDAC_REVISION " Ver: 1.0.0 " __DATE__ 36#define AMD8131_EDAC_REVISION " Ver: 1.0.0"
37#define AMD8131_EDAC_MOD_STR "amd8131_edac" 37#define AMD8131_EDAC_MOD_STR "amd8131_edac"
38 38
39/* Wrapper functions for accessing PCI configuration space */ 39/* Wrapper functions for accessing PCI configuration space */
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 837ad8f85b48..a687a0d16962 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -30,7 +30,7 @@
30#include "edac_core.h" 30#include "edac_core.h"
31#include "edac_module.h" 31#include "edac_module.h"
32 32
33#define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__ 33#define CPC925_EDAC_REVISION " Ver: 1.0.0"
34#define CPC925_EDAC_MOD_STR "cpc925_edac" 34#define CPC925_EDAC_MOD_STR "cpc925_edac"
35 35
36#define cpc925_printk(level, fmt, arg...) \ 36#define cpc925_printk(level, fmt, arg...) \
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index ec302d426589..1af531a11d21 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -24,7 +24,7 @@
24#include <linux/edac.h> 24#include <linux/edac.h>
25#include "edac_core.h" 25#include "edac_core.h"
26 26
27#define E752X_REVISION " Ver: 2.0.2 " __DATE__ 27#define E752X_REVISION " Ver: 2.0.2"
28#define EDAC_MOD_STR "e752x_edac" 28#define EDAC_MOD_STR "e752x_edac"
29 29
30static int report_non_memory_errors; 30static int report_non_memory_errors;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 1731d7245816..6ffb6d23281f 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -29,7 +29,7 @@
29#include <linux/edac.h> 29#include <linux/edac.h>
30#include "edac_core.h" 30#include "edac_core.h"
31 31
32#define E7XXX_REVISION " Ver: 2.0.2 " __DATE__ 32#define E7XXX_REVISION " Ver: 2.0.2"
33#define EDAC_MOD_STR "e7xxx_edac" 33#define EDAC_MOD_STR "e7xxx_edac"
34 34
35#define e7xxx_printk(level, fmt, arg...) \ 35#define e7xxx_printk(level, fmt, arg...) \
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index eefa3501916b..55b8278bb172 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -421,10 +421,6 @@ struct mem_ctl_info {
421 u32 ce_count; /* Total Correctable Errors for this MC */ 421 u32 ce_count; /* Total Correctable Errors for this MC */
422 unsigned long start_time; /* mci load start time (in jiffies) */ 422 unsigned long start_time; /* mci load start time (in jiffies) */
423 423
424 /* this stuff is for safe removal of mc devices from global list while
425 * NMI handlers may be traversing list
426 */
427 struct rcu_head rcu;
428 struct completion complete; 424 struct completion complete;
429 425
430 /* edac sysfs device control */ 426 /* edac sysfs device control */
@@ -620,10 +616,6 @@ struct edac_device_ctl_info {
620 616
621 unsigned long start_time; /* edac_device load start time (jiffies) */ 617 unsigned long start_time; /* edac_device load start time (jiffies) */
622 618
623 /* these are for safe removal of mc devices from global list while
624 * NMI handlers may be traversing list
625 */
626 struct rcu_head rcu;
627 struct completion removal_complete; 619 struct completion removal_complete;
628 620
629 /* sysfs top name under 'edac' directory 621 /* sysfs top name under 'edac' directory
@@ -722,10 +714,6 @@ struct edac_pci_ctl_info {
722 714
723 unsigned long start_time; /* edac_pci load start time (jiffies) */ 715 unsigned long start_time; /* edac_pci load start time (jiffies) */
724 716
725 /* these are for safe removal of devices from global list while
726 * NMI handlers may be traversing list
727 */
728 struct rcu_head rcu;
729 struct completion complete; 717 struct completion complete;
730 718
731 /* sysfs top name under 'edac' directory 719 /* sysfs top name under 'edac' directory
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index a7408cf86f37..c3f67437afb6 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -346,30 +346,18 @@ fail1:
346} 346}
347 347
348/* 348/*
349 * complete_edac_device_list_del
350 *
351 * callback function when reference count is zero
352 */
353static void complete_edac_device_list_del(struct rcu_head *head)
354{
355 struct edac_device_ctl_info *edac_dev;
356
357 edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
358 INIT_LIST_HEAD(&edac_dev->link);
359}
360
361/*
362 * del_edac_device_from_global_list 349 * del_edac_device_from_global_list
363 *
364 * remove the RCU, setup for a callback call,
365 * then wait for the callback to occur
366 */ 350 */
367static void del_edac_device_from_global_list(struct edac_device_ctl_info 351static void del_edac_device_from_global_list(struct edac_device_ctl_info
368 *edac_device) 352 *edac_device)
369{ 353{
370 list_del_rcu(&edac_device->link); 354 list_del_rcu(&edac_device->link);
371 call_rcu(&edac_device->rcu, complete_edac_device_list_del); 355
372 rcu_barrier(); 356 /* these are for safe removal of devices from global list while
357 * NMI handlers may be traversing list
358 */
359 synchronize_rcu();
360 INIT_LIST_HEAD(&edac_device->link);
373} 361}
374 362
375/* 363/*
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 1d8056049072..d69144a09043 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -447,20 +447,16 @@ fail1:
447 return 1; 447 return 1;
448} 448}
449 449
450static void complete_mc_list_del(struct rcu_head *head)
451{
452 struct mem_ctl_info *mci;
453
454 mci = container_of(head, struct mem_ctl_info, rcu);
455 INIT_LIST_HEAD(&mci->link);
456}
457
458static void del_mc_from_global_list(struct mem_ctl_info *mci) 450static void del_mc_from_global_list(struct mem_ctl_info *mci)
459{ 451{
460 atomic_dec(&edac_handlers); 452 atomic_dec(&edac_handlers);
461 list_del_rcu(&mci->link); 453 list_del_rcu(&mci->link);
462 call_rcu(&mci->rcu, complete_mc_list_del); 454
463 rcu_barrier(); 455 /* these are for safe removal of devices from global list while
456 * NMI handlers may be traversing list
457 */
458 synchronize_rcu();
459 INIT_LIST_HEAD(&mci->link);
464} 460}
465 461
466/** 462/**
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index be4b075c3098..5ddaa86d6a6e 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -15,7 +15,7 @@
15#include "edac_core.h" 15#include "edac_core.h"
16#include "edac_module.h" 16#include "edac_module.h"
17 17
18#define EDAC_VERSION "Ver: 2.1.0 " __DATE__ 18#define EDAC_VERSION "Ver: 2.1.0"
19 19
20#ifdef CONFIG_EDAC_DEBUG 20#ifdef CONFIG_EDAC_DEBUG
21/* Values of 0 to 4 will generate output */ 21/* Values of 0 to 4 will generate output */
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index efb5d5650783..2b378207d571 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -164,19 +164,6 @@ fail1:
164} 164}
165 165
166/* 166/*
167 * complete_edac_pci_list_del
168 *
169 * RCU completion callback to indicate item is deleted
170 */
171static void complete_edac_pci_list_del(struct rcu_head *head)
172{
173 struct edac_pci_ctl_info *pci;
174
175 pci = container_of(head, struct edac_pci_ctl_info, rcu);
176 INIT_LIST_HEAD(&pci->link);
177}
178
179/*
180 * del_edac_pci_from_global_list 167 * del_edac_pci_from_global_list
181 * 168 *
182 * remove the PCI control struct from the global list 169 * remove the PCI control struct from the global list
@@ -184,8 +171,12 @@ static void complete_edac_pci_list_del(struct rcu_head *head)
184static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) 171static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
185{ 172{
186 list_del_rcu(&pci->link); 173 list_del_rcu(&pci->link);
187 call_rcu(&pci->rcu, complete_edac_pci_list_del); 174
188 rcu_barrier(); 175 /* these are for safe removal of devices from global list while
176 * NMI handlers may be traversing list
177 */
178 synchronize_rcu();
179 INIT_LIST_HEAD(&pci->link);
189} 180}
190 181
191#if 0 182#if 0
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index d41f9002da45..aa08497a075a 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -101,6 +101,19 @@ struct i3200_priv {
101 101
102static int nr_channels; 102static int nr_channels;
103 103
104#ifndef readq
105static inline __u64 readq(const volatile void __iomem *addr)
106{
107 const volatile u32 __iomem *p = addr;
108 u32 low, high;
109
110 low = readl(p);
111 high = readl(p + 1);
112
113 return low + ((u64)high << 32);
114}
115#endif
116
104static int how_many_channels(struct pci_dev *pdev) 117static int how_many_channels(struct pci_dev *pdev)
105{ 118{
106 unsigned char capid0_8b; /* 8th byte of CAPID0 */ 119 unsigned char capid0_8b; /* 8th byte of CAPID0 */
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 87f427c2ce5c..4dc3ac25a422 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -27,7 +27,7 @@
27/* 27/*
28 * Alter this version for the I5000 module when modifications are made 28 * Alter this version for the I5000 module when modifications are made
29 */ 29 */
30#define I5000_REVISION " Ver: 2.0.12 " __DATE__ 30#define I5000_REVISION " Ver: 2.0.12"
31#define EDAC_MOD_STR "i5000_edac" 31#define EDAC_MOD_STR "i5000_edac"
32 32
33#define i5000_printk(level, fmt, arg...) \ 33#define i5000_printk(level, fmt, arg...) \
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 80a465efbae8..74d6ec342afb 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -33,7 +33,7 @@
33/* 33/*
34 * Alter this version for the I5400 module when modifications are made 34 * Alter this version for the I5400 module when modifications are made
35 */ 35 */
36#define I5400_REVISION " Ver: 1.0.0 " __DATE__ 36#define I5400_REVISION " Ver: 1.0.0"
37 37
38#define EDAC_MOD_STR "i5400_edac" 38#define EDAC_MOD_STR "i5400_edac"
39 39
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 363cc1602944..a76fe8366b68 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -31,7 +31,7 @@
31/* 31/*
32 * Alter this version for the I7300 module when modifications are made 32 * Alter this version for the I7300 module when modifications are made
33 */ 33 */
34#define I7300_REVISION " Ver: 1.0.0 " __DATE__ 34#define I7300_REVISION " Ver: 1.0.0"
35 35
36#define EDAC_MOD_STR "i7300_edac" 36#define EDAC_MOD_STR "i7300_edac"
37 37
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 465cbc25149f..04f1e7ce02b1 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
59/* 59/*
60 * Alter this version for the module when modifications are made 60 * Alter this version for the module when modifications are made
61 */ 61 */
62#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__ 62#define I7CORE_REVISION " Ver: 1.0.0"
63#define EDAC_MOD_STR "i7core_edac" 63#define EDAC_MOD_STR "i7core_edac"
64 64
65/* 65/*
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index b8a95cf50718..931a05775049 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -16,7 +16,7 @@
16#include <linux/edac.h> 16#include <linux/edac.h>
17#include "edac_core.h" 17#include "edac_core.h"
18 18
19#define I82860_REVISION " Ver: 2.0.2 " __DATE__ 19#define I82860_REVISION " Ver: 2.0.2"
20#define EDAC_MOD_STR "i82860_edac" 20#define EDAC_MOD_STR "i82860_edac"
21 21
22#define i82860_printk(level, fmt, arg...) \ 22#define i82860_printk(level, fmt, arg...) \
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index b2fd1e899142..33864c63c684 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -20,7 +20,7 @@
20#include <linux/edac.h> 20#include <linux/edac.h>
21#include "edac_core.h" 21#include "edac_core.h"
22 22
23#define I82875P_REVISION " Ver: 2.0.2 " __DATE__ 23#define I82875P_REVISION " Ver: 2.0.2"
24#define EDAC_MOD_STR "i82875p_edac" 24#define EDAC_MOD_STR "i82875p_edac"
25 25
26#define i82875p_printk(level, fmt, arg...) \ 26#define i82875p_printk(level, fmt, arg...) \
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 92e65e7038e9..a5da732fe5b2 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -16,7 +16,7 @@
16#include <linux/edac.h> 16#include <linux/edac.h>
17#include "edac_core.h" 17#include "edac_core.h"
18 18
19#define I82975X_REVISION " Ver: 1.0.0 " __DATE__ 19#define I82975X_REVISION " Ver: 1.0.0"
20#define EDAC_MOD_STR "i82975x_edac" 20#define EDAC_MOD_STR "i82975x_edac"
21 21
22#define i82975x_printk(level, fmt, arg...) \ 22#define i82975x_printk(level, fmt, arg...) \
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index cb24df839460..932016f2cf06 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -11,7 +11,7 @@
11#ifndef _MPC85XX_EDAC_H_ 11#ifndef _MPC85XX_EDAC_H_
12#define _MPC85XX_EDAC_H_ 12#define _MPC85XX_EDAC_H_
13 13
14#define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__ 14#define MPC85XX_REVISION " Ver: 2.0.0"
15#define EDAC_MOD_STR "MPC85xx_edac" 15#define EDAC_MOD_STR "MPC85xx_edac"
16 16
17#define mpc85xx_printk(level, fmt, arg...) \ 17#define mpc85xx_printk(level, fmt, arg...) \
diff --git a/drivers/edac/mv64x60_edac.h b/drivers/edac/mv64x60_edac.h
index e042e2daa8f4..c7f209c92a1a 100644
--- a/drivers/edac/mv64x60_edac.h
+++ b/drivers/edac/mv64x60_edac.h
@@ -12,7 +12,7 @@
12#ifndef _MV64X60_EDAC_H_ 12#ifndef _MV64X60_EDAC_H_
13#define _MV64X60_EDAC_H_ 13#define _MV64X60_EDAC_H_
14 14
15#define MV64x60_REVISION " Ver: 2.0.0 " __DATE__ 15#define MV64x60_REVISION " Ver: 2.0.0"
16#define EDAC_MOD_STR "MV64x60_edac" 16#define EDAC_MOD_STR "MV64x60_edac"
17 17
18#define mv64x60_printk(level, fmt, arg...) \ 18#define mv64x60_printk(level, fmt, arg...) \
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index af8e7b1aa290..0de7d8770891 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -113,7 +113,7 @@
113#define EDAC_OPSTATE_UNKNOWN_STR "unknown" 113#define EDAC_OPSTATE_UNKNOWN_STR "unknown"
114 114
115#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac" 115#define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac"
116#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0 " __DATE__ 116#define PPC4XX_EDAC_MODULE_REVISION "v1.0.0"
117 117
118#define PPC4XX_EDAC_MESSAGE_SIZE 256 118#define PPC4XX_EDAC_MESSAGE_SIZE 256
119 119
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 678513738c33..b153674431f1 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -22,7 +22,7 @@
22#include <linux/edac.h> 22#include <linux/edac.h>
23#include "edac_core.h" 23#include "edac_core.h"
24 24
25#define R82600_REVISION " Ver: 2.0.2 " __DATE__ 25#define R82600_REVISION " Ver: 2.0.2"
26#define EDAC_MOD_STR "r82600_edac" 26#define EDAC_MOD_STR "r82600_edac"
27 27
28#define r82600_printk(level, fmt, arg...) \ 28#define r82600_printk(level, fmt, arg...) \
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d3b295305542..d21364603755 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1,5 +1,5 @@
1# 1#
2# platform-neutral GPIO infrastructure and expanders 2# GPIO infrastructure and drivers
3# 3#
4 4
5config ARCH_WANT_OPTIONAL_GPIOLIB 5config ARCH_WANT_OPTIONAL_GPIOLIB
@@ -31,7 +31,7 @@ menuconfig GPIOLIB
31 help 31 help
32 This enables GPIO support through the generic GPIO library. 32 This enables GPIO support through the generic GPIO library.
33 You only need to enable this, if you also want to enable 33 You only need to enable this, if you also want to enable
34 one or more of the GPIO expansion card drivers below. 34 one or more of the GPIO drivers below.
35 35
36 If unsure, say N. 36 If unsure, say N.
37 37
@@ -63,21 +63,26 @@ config GPIO_SYSFS
63 Kernel drivers may also request that a particular GPIO be 63 Kernel drivers may also request that a particular GPIO be
64 exported to userspace; this can be useful when debugging. 64 exported to userspace; this can be useful when debugging.
65 65
66# put expanders in the right section, in alphabetical order 66# put drivers in the right section, in alphabetical order
67 67
68config GPIO_MAX730X 68config GPIO_MAX730X
69 tristate 69 tristate
70 70
71comment "Memory mapped GPIO expanders:" 71comment "Memory mapped GPIO drivers:"
72
73config GPIO_BASIC_MMIO_CORE
74 tristate
75 help
76 Provides core functionality for basic memory-mapped GPIO controllers.
72 77
73config GPIO_BASIC_MMIO 78config GPIO_BASIC_MMIO
74 tristate "Basic memory-mapped GPIO controllers support" 79 tristate "Basic memory-mapped GPIO controllers support"
80 select GPIO_BASIC_MMIO_CORE
75 help 81 help
76 Say yes here to support basic memory-mapped GPIO controllers. 82 Say yes here to support basic memory-mapped GPIO controllers.
77 83
78config GPIO_IT8761E 84config GPIO_IT8761E
79 tristate "IT8761E GPIO support" 85 tristate "IT8761E GPIO support"
80 depends on GPIOLIB
81 help 86 help
82 Say yes here to support GPIO functionality of IT8761E super I/O chip. 87 Say yes here to support GPIO functionality of IT8761E super I/O chip.
83 88
@@ -101,7 +106,7 @@ config GPIO_VR41XX
101 106
102config GPIO_SCH 107config GPIO_SCH
103 tristate "Intel SCH/TunnelCreek GPIO" 108 tristate "Intel SCH/TunnelCreek GPIO"
104 depends on GPIOLIB && PCI && X86 109 depends on PCI && X86
105 select MFD_CORE 110 select MFD_CORE
106 select LPC_SCH 111 select LPC_SCH
107 help 112 help
@@ -121,7 +126,7 @@ config GPIO_SCH
121 126
122config GPIO_VX855 127config GPIO_VX855
123 tristate "VIA VX855/VX875 GPIO" 128 tristate "VIA VX855/VX875 GPIO"
124 depends on GPIOLIB && MFD_SUPPORT && PCI 129 depends on MFD_SUPPORT && PCI
125 select MFD_CORE 130 select MFD_CORE
126 select MFD_VX855 131 select MFD_VX855
127 help 132 help
@@ -347,13 +352,13 @@ config GPIO_ML_IOH
347 352
348config GPIO_TIMBERDALE 353config GPIO_TIMBERDALE
349 bool "Support for timberdale GPIO IP" 354 bool "Support for timberdale GPIO IP"
350 depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM 355 depends on MFD_TIMBERDALE && HAS_IOMEM
351 ---help--- 356 ---help---
352 Add support for the GPIO IP in the timberdale FPGA. 357 Add support for the GPIO IP in the timberdale FPGA.
353 358
354config GPIO_RDC321X 359config GPIO_RDC321X
355 tristate "RDC R-321x GPIO support" 360 tristate "RDC R-321x GPIO support"
356 depends on PCI && GPIOLIB 361 depends on PCI
357 select MFD_SUPPORT 362 select MFD_SUPPORT
358 select MFD_CORE 363 select MFD_CORE
359 select MFD_RDC321X 364 select MFD_RDC321X
@@ -419,4 +424,11 @@ config AB8500_GPIO
419 depends on AB8500_CORE && BROKEN 424 depends on AB8500_CORE && BROKEN
420 help 425 help
421 Select this to enable the AB8500 IC GPIO driver 426 Select this to enable the AB8500 IC GPIO driver
427
428config GPIO_TPS65910
429 bool "TPS65910 GPIO"
430 depends on MFD_TPS65910
431 help
432 Select this option to enable GPIO driver for the TPS65910
433 chip family.
422endif 434endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index becef5954356..6a3387acc0e5 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -1,8 +1,4 @@
1# generic gpio support: dedicated expander chips, etc 1# generic gpio support: platform drivers, dedicated expander chips, etc
2#
3# NOTE: platform-specific GPIO drivers don't belong in the
4# drivers/gpio directory; put them with other platform setup
5# code, IRQ controllers, board init, etc.
6 2
7ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG 3ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
8 4
@@ -10,6 +6,7 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o
10 6
11obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o 7obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
12obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o 8obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
9obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o
13obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o 10obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
14obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o 11obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
15obj-$(CONFIG_GPIO_MAX730X) += max730x.o 12obj-$(CONFIG_GPIO_MAX730X) += max730x.o
@@ -43,3 +40,4 @@ obj-$(CONFIG_GPIO_SX150X) += sx150x.o
43obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o 40obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
44obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o 41obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
45obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o 42obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o
43obj-$(CONFIG_GPIO_TPS65910) += tps65910-gpio.o
diff --git a/drivers/gpio/basic_mmio_gpio.c b/drivers/gpio/basic_mmio_gpio.c
index 3addea65894e..8152e9f516b0 100644
--- a/drivers/gpio/basic_mmio_gpio.c
+++ b/drivers/gpio/basic_mmio_gpio.c
@@ -45,6 +45,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
45 */ 45 */
46 46
47#include <linux/init.h> 47#include <linux/init.h>
48#include <linux/err.h>
48#include <linux/bug.h> 49#include <linux/bug.h>
49#include <linux/kernel.h> 50#include <linux/kernel.h>
50#include <linux/module.h> 51#include <linux/module.h>
@@ -61,102 +62,101 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
61#include <linux/mod_devicetable.h> 62#include <linux/mod_devicetable.h>
62#include <linux/basic_mmio_gpio.h> 63#include <linux/basic_mmio_gpio.h>
63 64
64struct bgpio_chip { 65static void bgpio_write8(void __iomem *reg, unsigned long data)
65 struct gpio_chip gc; 66{
66 void __iomem *reg_dat; 67 writeb(data, reg);
67 void __iomem *reg_set; 68}
68 void __iomem *reg_clr;
69
70 /* Number of bits (GPIOs): <register width> * 8. */
71 int bits;
72
73 /*
74 * Some GPIO controllers work with the big-endian bits notation,
75 * e.g. in a 8-bits register, GPIO7 is the least significant bit.
76 */
77 int big_endian_bits;
78
79 /*
80 * Used to lock bgpio_chip->data. Also, this is needed to keep
81 * shadowed and real data registers writes together.
82 */
83 spinlock_t lock;
84
85 /* Shadowed data register to clear/set bits safely. */
86 unsigned long data;
87};
88 69
89static struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc) 70static unsigned long bgpio_read8(void __iomem *reg)
90{ 71{
91 return container_of(gc, struct bgpio_chip, gc); 72 return readb(reg);
92} 73}
93 74
94static unsigned long bgpio_in(struct bgpio_chip *bgc) 75static void bgpio_write16(void __iomem *reg, unsigned long data)
95{ 76{
96 switch (bgc->bits) { 77 writew(data, reg);
97 case 8:
98 return __raw_readb(bgc->reg_dat);
99 case 16:
100 return __raw_readw(bgc->reg_dat);
101 case 32:
102 return __raw_readl(bgc->reg_dat);
103#if BITS_PER_LONG >= 64
104 case 64:
105 return __raw_readq(bgc->reg_dat);
106#endif
107 }
108 return -EINVAL;
109} 78}
110 79
111static void bgpio_out(struct bgpio_chip *bgc, void __iomem *reg, 80static unsigned long bgpio_read16(void __iomem *reg)
112 unsigned long data)
113{ 81{
114 switch (bgc->bits) { 82 return readw(reg);
115 case 8: 83}
116 __raw_writeb(data, reg); 84
117 return; 85static void bgpio_write32(void __iomem *reg, unsigned long data)
118 case 16: 86{
119 __raw_writew(data, reg); 87 writel(data, reg);
120 return; 88}
121 case 32: 89
122 __raw_writel(data, reg); 90static unsigned long bgpio_read32(void __iomem *reg)
123 return; 91{
92 return readl(reg);
93}
94
124#if BITS_PER_LONG >= 64 95#if BITS_PER_LONG >= 64
125 case 64: 96static void bgpio_write64(void __iomem *reg, unsigned long data)
126 __raw_writeq(data, reg); 97{
127 return; 98 writeq(data, reg);
128#endif
129 }
130} 99}
131 100
101static unsigned long bgpio_read64(void __iomem *reg)
102{
103 return readq(reg);
104}
105#endif /* BITS_PER_LONG >= 64 */
106
132static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin) 107static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin)
133{ 108{
134 if (bgc->big_endian_bits) 109 return 1 << pin;
135 return 1 << (bgc->bits - 1 - pin); 110}
136 else 111
137 return 1 << pin; 112static unsigned long bgpio_pin2mask_be(struct bgpio_chip *bgc,
113 unsigned int pin)
114{
115 return 1 << (bgc->bits - 1 - pin);
138} 116}
139 117
140static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) 118static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
141{ 119{
142 struct bgpio_chip *bgc = to_bgpio_chip(gc); 120 struct bgpio_chip *bgc = to_bgpio_chip(gc);
143 121
144 return bgpio_in(bgc) & bgpio_pin2mask(bgc, gpio); 122 return bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio);
145} 123}
146 124
147static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) 125static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
148{ 126{
149 struct bgpio_chip *bgc = to_bgpio_chip(gc); 127 struct bgpio_chip *bgc = to_bgpio_chip(gc);
150 unsigned long mask = bgpio_pin2mask(bgc, gpio); 128 unsigned long mask = bgc->pin2mask(bgc, gpio);
151 unsigned long flags; 129 unsigned long flags;
152 130
153 if (bgc->reg_set) { 131 spin_lock_irqsave(&bgc->lock, flags);
154 if (val) 132
155 bgpio_out(bgc, bgc->reg_set, mask); 133 if (val)
156 else 134 bgc->data |= mask;
157 bgpio_out(bgc, bgc->reg_clr, mask); 135 else
158 return; 136 bgc->data &= ~mask;
159 } 137
138 bgc->write_reg(bgc->reg_dat, bgc->data);
139
140 spin_unlock_irqrestore(&bgc->lock, flags);
141}
142
143static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
144 int val)
145{
146 struct bgpio_chip *bgc = to_bgpio_chip(gc);
147 unsigned long mask = bgc->pin2mask(bgc, gpio);
148
149 if (val)
150 bgc->write_reg(bgc->reg_set, mask);
151 else
152 bgc->write_reg(bgc->reg_clr, mask);
153}
154
155static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
156{
157 struct bgpio_chip *bgc = to_bgpio_chip(gc);
158 unsigned long mask = bgc->pin2mask(bgc, gpio);
159 unsigned long flags;
160 160
161 spin_lock_irqsave(&bgc->lock, flags); 161 spin_lock_irqsave(&bgc->lock, flags);
162 162
@@ -165,103 +165,352 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
165 else 165 else
166 bgc->data &= ~mask; 166 bgc->data &= ~mask;
167 167
168 bgpio_out(bgc, bgc->reg_dat, bgc->data); 168 bgc->write_reg(bgc->reg_set, bgc->data);
169 169
170 spin_unlock_irqrestore(&bgc->lock, flags); 170 spin_unlock_irqrestore(&bgc->lock, flags);
171} 171}
172 172
173static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
174{
175 return 0;
176}
177
178static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
179 int val)
180{
181 gc->set(gc, gpio, val);
182
183 return 0;
184}
185
173static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) 186static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
174{ 187{
188 struct bgpio_chip *bgc = to_bgpio_chip(gc);
189 unsigned long flags;
190
191 spin_lock_irqsave(&bgc->lock, flags);
192
193 bgc->dir &= ~bgc->pin2mask(bgc, gpio);
194 bgc->write_reg(bgc->reg_dir, bgc->dir);
195
196 spin_unlock_irqrestore(&bgc->lock, flags);
197
175 return 0; 198 return 0;
176} 199}
177 200
178static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) 201static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
179{ 202{
180 bgpio_set(gc, gpio, val); 203 struct bgpio_chip *bgc = to_bgpio_chip(gc);
204 unsigned long flags;
205
206 gc->set(gc, gpio, val);
207
208 spin_lock_irqsave(&bgc->lock, flags);
209
210 bgc->dir |= bgc->pin2mask(bgc, gpio);
211 bgc->write_reg(bgc->reg_dir, bgc->dir);
212
213 spin_unlock_irqrestore(&bgc->lock, flags);
214
181 return 0; 215 return 0;
182} 216}
183 217
184static int __devinit bgpio_probe(struct platform_device *pdev) 218static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
185{ 219{
186 const struct platform_device_id *platid = platform_get_device_id(pdev); 220 struct bgpio_chip *bgc = to_bgpio_chip(gc);
187 struct device *dev = &pdev->dev; 221 unsigned long flags;
188 struct bgpio_pdata *pdata = dev_get_platdata(dev);
189 struct bgpio_chip *bgc;
190 struct resource *res_dat;
191 struct resource *res_set;
192 struct resource *res_clr;
193 resource_size_t dat_sz;
194 int bits;
195 int ret;
196 222
197 res_dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat"); 223 spin_lock_irqsave(&bgc->lock, flags);
198 if (!res_dat)
199 return -EINVAL;
200 224
201 dat_sz = resource_size(res_dat); 225 bgc->dir |= bgc->pin2mask(bgc, gpio);
202 if (!is_power_of_2(dat_sz)) 226 bgc->write_reg(bgc->reg_dir, bgc->dir);
203 return -EINVAL; 227
228 spin_unlock_irqrestore(&bgc->lock, flags);
229
230 return 0;
231}
204 232
205 bits = dat_sz * 8; 233static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
206 if (bits > BITS_PER_LONG) 234{
235 struct bgpio_chip *bgc = to_bgpio_chip(gc);
236 unsigned long flags;
237
238 gc->set(gc, gpio, val);
239
240 spin_lock_irqsave(&bgc->lock, flags);
241
242 bgc->dir &= ~bgc->pin2mask(bgc, gpio);
243 bgc->write_reg(bgc->reg_dir, bgc->dir);
244
245 spin_unlock_irqrestore(&bgc->lock, flags);
246
247 return 0;
248}
249
250static int bgpio_setup_accessors(struct device *dev,
251 struct bgpio_chip *bgc,
252 bool be)
253{
254
255 switch (bgc->bits) {
256 case 8:
257 bgc->read_reg = bgpio_read8;
258 bgc->write_reg = bgpio_write8;
259 break;
260 case 16:
261 bgc->read_reg = bgpio_read16;
262 bgc->write_reg = bgpio_write16;
263 break;
264 case 32:
265 bgc->read_reg = bgpio_read32;
266 bgc->write_reg = bgpio_write32;
267 break;
268#if BITS_PER_LONG >= 64
269 case 64:
270 bgc->read_reg = bgpio_read64;
271 bgc->write_reg = bgpio_write64;
272 break;
273#endif /* BITS_PER_LONG >= 64 */
274 default:
275 dev_err(dev, "unsupported data width %u bits\n", bgc->bits);
207 return -EINVAL; 276 return -EINVAL;
277 }
208 278
209 bgc = devm_kzalloc(dev, sizeof(*bgc), GFP_KERNEL); 279 bgc->pin2mask = be ? bgpio_pin2mask_be : bgpio_pin2mask;
210 if (!bgc) 280
211 return -ENOMEM; 281 return 0;
282}
283
284/*
285 * Create the device and allocate the resources. For setting GPIO's there are
286 * three supported configurations:
287 *
288 * - single input/output register resource (named "dat").
289 * - set/clear pair (named "set" and "clr").
290 * - single output register resource and single input resource ("set" and
291 * dat").
292 *
293 * For the single output register, this drives a 1 by setting a bit and a zero
294 * by clearing a bit. For the set clr pair, this drives a 1 by setting a bit
295 * in the set register and clears it by setting a bit in the clear register.
296 * The configuration is detected by which resources are present.
297 *
298 * For setting the GPIO direction, there are three supported configurations:
299 *
300 * - simple bidirection GPIO that requires no configuration.
301 * - an output direction register (named "dirout") where a 1 bit
302 * indicates the GPIO is an output.
303 * - an input direction register (named "dirin") where a 1 bit indicates
304 * the GPIO is an input.
305 */
306static int bgpio_setup_io(struct bgpio_chip *bgc,
307 void __iomem *dat,
308 void __iomem *set,
309 void __iomem *clr)
310{
212 311
213 bgc->reg_dat = devm_ioremap(dev, res_dat->start, dat_sz); 312 bgc->reg_dat = dat;
214 if (!bgc->reg_dat) 313 if (!bgc->reg_dat)
215 return -ENOMEM; 314 return -EINVAL;
315
316 if (set && clr) {
317 bgc->reg_set = set;
318 bgc->reg_clr = clr;
319 bgc->gc.set = bgpio_set_with_clear;
320 } else if (set && !clr) {
321 bgc->reg_set = set;
322 bgc->gc.set = bgpio_set_set;
323 } else {
324 bgc->gc.set = bgpio_set;
325 }
326
327 bgc->gc.get = bgpio_get;
328
329 return 0;
330}
216 331
217 res_set = platform_get_resource_byname(pdev, IORESOURCE_MEM, "set"); 332static int bgpio_setup_direction(struct bgpio_chip *bgc,
218 res_clr = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clr"); 333 void __iomem *dirout,
219 if (res_set && res_clr) { 334 void __iomem *dirin)
220 if (resource_size(res_set) != resource_size(res_clr) || 335{
221 resource_size(res_set) != dat_sz) 336 if (dirout && dirin) {
222 return -EINVAL;
223
224 bgc->reg_set = devm_ioremap(dev, res_set->start, dat_sz);
225 bgc->reg_clr = devm_ioremap(dev, res_clr->start, dat_sz);
226 if (!bgc->reg_set || !bgc->reg_clr)
227 return -ENOMEM;
228 } else if (res_set || res_clr) {
229 return -EINVAL; 337 return -EINVAL;
338 } else if (dirout) {
339 bgc->reg_dir = dirout;
340 bgc->gc.direction_output = bgpio_dir_out;
341 bgc->gc.direction_input = bgpio_dir_in;
342 } else if (dirin) {
343 bgc->reg_dir = dirin;
344 bgc->gc.direction_output = bgpio_dir_out_inv;
345 bgc->gc.direction_input = bgpio_dir_in_inv;
346 } else {
347 bgc->gc.direction_output = bgpio_simple_dir_out;
348 bgc->gc.direction_input = bgpio_simple_dir_in;
230 } 349 }
231 350
232 spin_lock_init(&bgc->lock); 351 return 0;
352}
233 353
234 bgc->bits = bits; 354int __devexit bgpio_remove(struct bgpio_chip *bgc)
235 bgc->big_endian_bits = !strcmp(platid->name, "basic-mmio-gpio-be"); 355{
236 bgc->data = bgpio_in(bgc); 356 int err = gpiochip_remove(&bgc->gc);
237 357
238 bgc->gc.ngpio = bits; 358 kfree(bgc);
239 bgc->gc.direction_input = bgpio_dir_in; 359
240 bgc->gc.direction_output = bgpio_dir_out; 360 return err;
241 bgc->gc.get = bgpio_get; 361}
242 bgc->gc.set = bgpio_set; 362EXPORT_SYMBOL_GPL(bgpio_remove);
363
364int __devinit bgpio_init(struct bgpio_chip *bgc,
365 struct device *dev,
366 unsigned long sz,
367 void __iomem *dat,
368 void __iomem *set,
369 void __iomem *clr,
370 void __iomem *dirout,
371 void __iomem *dirin,
372 bool big_endian)
373{
374 int ret;
375
376 if (!is_power_of_2(sz))
377 return -EINVAL;
378
379 bgc->bits = sz * 8;
380 if (bgc->bits > BITS_PER_LONG)
381 return -EINVAL;
382
383 spin_lock_init(&bgc->lock);
243 bgc->gc.dev = dev; 384 bgc->gc.dev = dev;
244 bgc->gc.label = dev_name(dev); 385 bgc->gc.label = dev_name(dev);
386 bgc->gc.base = -1;
387 bgc->gc.ngpio = bgc->bits;
245 388
246 if (pdata) 389 ret = bgpio_setup_io(bgc, dat, set, clr);
247 bgc->gc.base = pdata->base; 390 if (ret)
248 else 391 return ret;
249 bgc->gc.base = -1;
250 392
251 dev_set_drvdata(dev, bgc); 393 ret = bgpio_setup_accessors(dev, bgc, big_endian);
394 if (ret)
395 return ret;
252 396
253 ret = gpiochip_add(&bgc->gc); 397 ret = bgpio_setup_direction(bgc, dirout, dirin);
254 if (ret) 398 if (ret)
255 dev_err(dev, "gpiochip_add() failed: %d\n", ret); 399 return ret;
400
401 bgc->data = bgc->read_reg(bgc->reg_dat);
256 402
257 return ret; 403 return ret;
258} 404}
405EXPORT_SYMBOL_GPL(bgpio_init);
406
407#ifdef CONFIG_GPIO_BASIC_MMIO
259 408
260static int __devexit bgpio_remove(struct platform_device *pdev) 409static void __iomem *bgpio_map(struct platform_device *pdev,
410 const char *name,
411 resource_size_t sane_sz,
412 int *err)
261{ 413{
262 struct bgpio_chip *bgc = dev_get_drvdata(&pdev->dev); 414 struct device *dev = &pdev->dev;
415 struct resource *r;
416 resource_size_t start;
417 resource_size_t sz;
418 void __iomem *ret;
419
420 *err = 0;
421
422 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
423 if (!r)
424 return NULL;
263 425
264 return gpiochip_remove(&bgc->gc); 426 sz = resource_size(r);
427 if (sz != sane_sz) {
428 *err = -EINVAL;
429 return NULL;
430 }
431
432 start = r->start;
433 if (!devm_request_mem_region(dev, start, sz, r->name)) {
434 *err = -EBUSY;
435 return NULL;
436 }
437
438 ret = devm_ioremap(dev, start, sz);
439 if (!ret) {
440 *err = -ENOMEM;
441 return NULL;
442 }
443
444 return ret;
445}
446
447static int __devinit bgpio_pdev_probe(struct platform_device *pdev)
448{
449 struct device *dev = &pdev->dev;
450 struct resource *r;
451 void __iomem *dat;
452 void __iomem *set;
453 void __iomem *clr;
454 void __iomem *dirout;
455 void __iomem *dirin;
456 unsigned long sz;
457 bool be;
458 int err;
459 struct bgpio_chip *bgc;
460 struct bgpio_pdata *pdata = dev_get_platdata(dev);
461
462 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
463 if (!r)
464 return -EINVAL;
465
466 sz = resource_size(r);
467
468 dat = bgpio_map(pdev, "dat", sz, &err);
469 if (!dat)
470 return err ? err : -EINVAL;
471
472 set = bgpio_map(pdev, "set", sz, &err);
473 if (err)
474 return err;
475
476 clr = bgpio_map(pdev, "clr", sz, &err);
477 if (err)
478 return err;
479
480 dirout = bgpio_map(pdev, "dirout", sz, &err);
481 if (err)
482 return err;
483
484 dirin = bgpio_map(pdev, "dirin", sz, &err);
485 if (err)
486 return err;
487
488 be = !strcmp(platform_get_device_id(pdev)->name, "basic-mmio-gpio-be");
489
490 bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL);
491 if (!bgc)
492 return -ENOMEM;
493
494 err = bgpio_init(bgc, dev, sz, dat, set, clr, dirout, dirin, be);
495 if (err)
496 return err;
497
498 if (pdata) {
499 bgc->gc.base = pdata->base;
500 if (pdata->ngpio > 0)
501 bgc->gc.ngpio = pdata->ngpio;
502 }
503
504 platform_set_drvdata(pdev, bgc);
505
506 return gpiochip_add(&bgc->gc);
507}
508
509static int __devexit bgpio_pdev_remove(struct platform_device *pdev)
510{
511 struct bgpio_chip *bgc = platform_get_drvdata(pdev);
512
513 return bgpio_remove(bgc);
265} 514}
266 515
267static const struct platform_device_id bgpio_id_table[] = { 516static const struct platform_device_id bgpio_id_table[] = {
@@ -276,21 +525,23 @@ static struct platform_driver bgpio_driver = {
276 .name = "basic-mmio-gpio", 525 .name = "basic-mmio-gpio",
277 }, 526 },
278 .id_table = bgpio_id_table, 527 .id_table = bgpio_id_table,
279 .probe = bgpio_probe, 528 .probe = bgpio_pdev_probe,
280 .remove = __devexit_p(bgpio_remove), 529 .remove = __devexit_p(bgpio_pdev_remove),
281}; 530};
282 531
283static int __init bgpio_init(void) 532static int __init bgpio_platform_init(void)
284{ 533{
285 return platform_driver_register(&bgpio_driver); 534 return platform_driver_register(&bgpio_driver);
286} 535}
287module_init(bgpio_init); 536module_init(bgpio_platform_init);
288 537
289static void __exit bgpio_exit(void) 538static void __exit bgpio_platform_exit(void)
290{ 539{
291 platform_driver_unregister(&bgpio_driver); 540 platform_driver_unregister(&bgpio_driver);
292} 541}
293module_exit(bgpio_exit); 542module_exit(bgpio_platform_exit);
543
544#endif /* CONFIG_GPIO_BASIC_MMIO */
294 545
295MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers"); 546MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
296MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); 547MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 36a2974815b7..137a8ca67822 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -12,6 +12,8 @@
12#include <linux/idr.h> 12#include <linux/idr.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14 14
15#define CREATE_TRACE_POINTS
16#include <trace/events/gpio.h>
15 17
16/* Optional implementation infrastructure for GPIO interfaces. 18/* Optional implementation infrastructure for GPIO interfaces.
17 * 19 *
@@ -1165,6 +1167,7 @@ struct gpio_chip *gpiochip_find(void *data,
1165 1167
1166 return chip; 1168 return chip;
1167} 1169}
1170EXPORT_SYMBOL_GPL(gpiochip_find);
1168 1171
1169/* These "optional" allocation calls help prevent drivers from stomping 1172/* These "optional" allocation calls help prevent drivers from stomping
1170 * on each other, and help provide better diagnostics in debugfs. 1173 * on each other, and help provide better diagnostics in debugfs.
@@ -1404,6 +1407,8 @@ int gpio_direction_input(unsigned gpio)
1404 status = chip->direction_input(chip, gpio); 1407 status = chip->direction_input(chip, gpio);
1405 if (status == 0) 1408 if (status == 0)
1406 clear_bit(FLAG_IS_OUT, &desc->flags); 1409 clear_bit(FLAG_IS_OUT, &desc->flags);
1410
1411 trace_gpio_direction(chip->base + gpio, 1, status);
1407lose: 1412lose:
1408 return status; 1413 return status;
1409fail: 1414fail:
@@ -1457,6 +1462,8 @@ int gpio_direction_output(unsigned gpio, int value)
1457 status = chip->direction_output(chip, gpio, value); 1462 status = chip->direction_output(chip, gpio, value);
1458 if (status == 0) 1463 if (status == 0)
1459 set_bit(FLAG_IS_OUT, &desc->flags); 1464 set_bit(FLAG_IS_OUT, &desc->flags);
1465 trace_gpio_value(chip->base + gpio, 0, value);
1466 trace_gpio_direction(chip->base + gpio, 0, status);
1460lose: 1467lose:
1461 return status; 1468 return status;
1462fail: 1469fail:
@@ -1546,10 +1553,13 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
1546int __gpio_get_value(unsigned gpio) 1553int __gpio_get_value(unsigned gpio)
1547{ 1554{
1548 struct gpio_chip *chip; 1555 struct gpio_chip *chip;
1556 int value;
1549 1557
1550 chip = gpio_to_chip(gpio); 1558 chip = gpio_to_chip(gpio);
1551 WARN_ON(chip->can_sleep); 1559 WARN_ON(chip->can_sleep);
1552 return chip->get ? chip->get(chip, gpio - chip->base) : 0; 1560 value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
1561 trace_gpio_value(gpio, 1, value);
1562 return value;
1553} 1563}
1554EXPORT_SYMBOL_GPL(__gpio_get_value); 1564EXPORT_SYMBOL_GPL(__gpio_get_value);
1555 1565
@@ -1568,6 +1578,7 @@ void __gpio_set_value(unsigned gpio, int value)
1568 1578
1569 chip = gpio_to_chip(gpio); 1579 chip = gpio_to_chip(gpio);
1570 WARN_ON(chip->can_sleep); 1580 WARN_ON(chip->can_sleep);
1581 trace_gpio_value(gpio, 0, value);
1571 chip->set(chip, gpio - chip->base, value); 1582 chip->set(chip, gpio - chip->base, value);
1572} 1583}
1573EXPORT_SYMBOL_GPL(__gpio_set_value); 1584EXPORT_SYMBOL_GPL(__gpio_set_value);
@@ -1618,10 +1629,13 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
1618int gpio_get_value_cansleep(unsigned gpio) 1629int gpio_get_value_cansleep(unsigned gpio)
1619{ 1630{
1620 struct gpio_chip *chip; 1631 struct gpio_chip *chip;
1632 int value;
1621 1633
1622 might_sleep_if(extra_checks); 1634 might_sleep_if(extra_checks);
1623 chip = gpio_to_chip(gpio); 1635 chip = gpio_to_chip(gpio);
1624 return chip->get ? chip->get(chip, gpio - chip->base) : 0; 1636 value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
1637 trace_gpio_value(gpio, 1, value);
1638 return value;
1625} 1639}
1626EXPORT_SYMBOL_GPL(gpio_get_value_cansleep); 1640EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
1627 1641
@@ -1631,6 +1645,7 @@ void gpio_set_value_cansleep(unsigned gpio, int value)
1631 1645
1632 might_sleep_if(extra_checks); 1646 might_sleep_if(extra_checks);
1633 chip = gpio_to_chip(gpio); 1647 chip = gpio_to_chip(gpio);
1648 trace_gpio_value(gpio, 0, value);
1634 chip->set(chip, gpio - chip->base, value); 1649 chip->set(chip, gpio - chip->base, value);
1635} 1650}
1636EXPORT_SYMBOL_GPL(gpio_set_value_cansleep); 1651EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
index 2514fb075f4a..813ac077e5d7 100644
--- a/drivers/gpio/janz-ttl.c
+++ b/drivers/gpio/janz-ttl.c
@@ -15,7 +15,6 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/mfd/core.h>
19#include <linux/io.h> 18#include <linux/io.h>
20#include <linux/gpio.h> 19#include <linux/gpio.h>
21#include <linux/slab.h> 20#include <linux/slab.h>
@@ -150,7 +149,7 @@ static int __devinit ttl_probe(struct platform_device *pdev)
150 struct resource *res; 149 struct resource *res;
151 int ret; 150 int ret;
152 151
153 pdata = mfd_get_data(pdev); 152 pdata = pdev->dev.platform_data;
154 if (!pdata) { 153 if (!pdata) {
155 dev_err(dev, "no platform data\n"); 154 dev_err(dev, "no platform data\n");
156 ret = -ENXIO; 155 ret = -ENXIO;
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
index 0a775f7987c2..1bc621ac3536 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -15,6 +15,7 @@
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 */ 16 */
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/slab.h>
18#include <linux/pci.h> 19#include <linux/pci.h>
19#include <linux/gpio.h> 20#include <linux/gpio.h>
20 21
@@ -138,6 +139,7 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
138 return 0; 139 return 0;
139} 140}
140 141
142#ifdef CONFIG_PM
141/* 143/*
142 * Save register configuration and disable interrupts. 144 * Save register configuration and disable interrupts.
143 */ 145 */
@@ -157,6 +159,7 @@ static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
157 /* to store contents of PM register */ 159 /* to store contents of PM register */
158 iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm); 160 iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm);
159} 161}
162#endif
160 163
161static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port) 164static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
162{ 165{
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 7630ab7b9bec..78a843947d82 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -397,7 +397,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
397 397
398 irq_set_chip_data(irq, chip); 398 irq_set_chip_data(irq, chip);
399 irq_set_chip_and_handler(irq, &pca953x_irq_chip, 399 irq_set_chip_and_handler(irq, &pca953x_irq_chip,
400 handle_edge_irq); 400 handle_simple_irq);
401#ifdef CONFIG_ARM 401#ifdef CONFIG_ARM
402 set_irq_flags(irq, IRQF_VALID); 402 set_irq_flags(irq, IRQF_VALID);
403#else 403#else
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
index a9bda881935a..2762698e0204 100644
--- a/drivers/gpio/rdc321x-gpio.c
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -27,7 +27,6 @@
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/mfd/rdc321x.h> 29#include <linux/mfd/rdc321x.h>
30#include <linux/mfd/core.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32 31
33struct rdc321x_gpio { 32struct rdc321x_gpio {
@@ -136,7 +135,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
136 struct rdc321x_gpio *rdc321x_gpio_dev; 135 struct rdc321x_gpio *rdc321x_gpio_dev;
137 struct rdc321x_gpio_pdata *pdata; 136 struct rdc321x_gpio_pdata *pdata;
138 137
139 pdata = mfd_get_data(pdev); 138 pdata = pdev->dev.platform_data;
140 if (!pdata) { 139 if (!pdata) {
141 dev_err(&pdev->dev, "no platform data supplied\n"); 140 dev_err(&pdev->dev, "no platform data supplied\n");
142 return -ENODEV; 141 return -ENODEV;
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index edbe1eae531f..0265872e57d1 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -23,7 +23,6 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/mfd/core.h>
27#include <linux/irq.h> 26#include <linux/irq.h>
28#include <linux/io.h> 27#include <linux/io.h>
29#include <linux/timb_gpio.h> 28#include <linux/timb_gpio.h>
@@ -229,7 +228,7 @@ static int __devinit timbgpio_probe(struct platform_device *pdev)
229 struct gpio_chip *gc; 228 struct gpio_chip *gc;
230 struct timbgpio *tgpio; 229 struct timbgpio *tgpio;
231 struct resource *iomem; 230 struct resource *iomem;
232 struct timbgpio_platform_data *pdata = mfd_get_data(pdev); 231 struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
233 int irq = platform_get_irq(pdev, 0); 232 int irq = platform_get_irq(pdev, 0);
234 233
235 if (!pdata || pdata->nr_pins > 32) { 234 if (!pdata || pdata->nr_pins > 32) {
@@ -320,13 +319,14 @@ err_mem:
320static int __devexit timbgpio_remove(struct platform_device *pdev) 319static int __devexit timbgpio_remove(struct platform_device *pdev)
321{ 320{
322 int err; 321 int err;
322 struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
323 struct timbgpio *tgpio = platform_get_drvdata(pdev); 323 struct timbgpio *tgpio = platform_get_drvdata(pdev);
324 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 324 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
325 int irq = platform_get_irq(pdev, 0); 325 int irq = platform_get_irq(pdev, 0);
326 326
327 if (irq >= 0 && tgpio->irq_base > 0) { 327 if (irq >= 0 && tgpio->irq_base > 0) {
328 int i; 328 int i;
329 for (i = 0; i < tgpio->gpio.ngpio; i++) { 329 for (i = 0; i < pdata->nr_pins; i++) {
330 irq_set_chip(tgpio->irq_base + i, NULL); 330 irq_set_chip(tgpio->irq_base + i, NULL);
331 irq_set_chip_data(tgpio->irq_base + i, NULL); 331 irq_set_chip_data(tgpio->irq_base + i, NULL);
332 } 332 }
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c
new file mode 100644
index 000000000000..8d1ddfdd63eb
--- /dev/null
+++ b/drivers/gpio/tps65910-gpio.c
@@ -0,0 +1,100 @@
1/*
2 * tps65910-gpio.c -- TI TPS6591x
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria jedu@slimlogic.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/gpio.h>
20#include <linux/i2c.h>
21#include <linux/mfd/tps65910.h>
22
23static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
24{
25 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
26 uint8_t val;
27
28 tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
29
30 if (val & GPIO_STS_MASK)
31 return 1;
32
33 return 0;
34}
35
36static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
37 int value)
38{
39 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
40
41 if (value)
42 tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
43 GPIO_SET_MASK);
44 else
45 tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
46 GPIO_SET_MASK);
47}
48
49static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
50 int value)
51{
52 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
53
54 /* Set the initial value */
55 tps65910_gpio_set(gc, 0, value);
56
57 return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
58 GPIO_CFG_MASK);
59}
60
61static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
62{
63 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
64
65 return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
66 GPIO_CFG_MASK);
67}
68
69void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
70{
71 int ret;
72
73 if (!gpio_base)
74 return;
75
76 tps65910->gpio.owner = THIS_MODULE;
77 tps65910->gpio.label = tps65910->i2c_client->name;
78 tps65910->gpio.dev = tps65910->dev;
79 tps65910->gpio.base = gpio_base;
80
81 switch(tps65910_chip_id(tps65910)) {
82 case TPS65910:
83 tps65910->gpio.ngpio = 6;
84 case TPS65911:
85 tps65910->gpio.ngpio = 9;
86 default:
87 return;
88 }
89 tps65910->gpio.can_sleep = 1;
90
91 tps65910->gpio.direction_input = tps65910_gpio_input;
92 tps65910->gpio.direction_output = tps65910_gpio_output;
93 tps65910->gpio.set = tps65910_gpio_set;
94 tps65910->gpio.get = tps65910_gpio_get;
95
96 ret = gpiochip_add(&tps65910->gpio);
97
98 if (ret)
99 dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
100}
diff --git a/drivers/gpio/vx855_gpio.c b/drivers/gpio/vx855_gpio.c
index 8a98ee5d5f6c..ef5aabd8b8b7 100644
--- a/drivers/gpio/vx855_gpio.c
+++ b/drivers/gpio/vx855_gpio.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/slab.h>
29#include <linux/device.h> 30#include <linux/device.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/pci.h> 32#include <linux/pci.h>
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c6289034e29a..0b2e167d2bce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -56,9 +56,7 @@ static int i915_gem_phys_pwrite(struct drm_device *dev,
56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); 56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
57 57
58static int i915_gem_inactive_shrink(struct shrinker *shrinker, 58static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59 int nr_to_scan, 59 struct shrink_control *sc);
60 gfp_t gfp_mask);
61
62 60
63/* some bookkeeping */ 61/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 62static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -4092,9 +4090,7 @@ i915_gpu_is_active(struct drm_device *dev)
4092} 4090}
4093 4091
4094static int 4092static int
4095i915_gem_inactive_shrink(struct shrinker *shrinker, 4093i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4096 int nr_to_scan,
4097 gfp_t gfp_mask)
4098{ 4094{
4099 struct drm_i915_private *dev_priv = 4095 struct drm_i915_private *dev_priv =
4100 container_of(shrinker, 4096 container_of(shrinker,
@@ -4102,6 +4098,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker,
4102 mm.inactive_shrinker); 4098 mm.inactive_shrinker);
4103 struct drm_device *dev = dev_priv->dev; 4099 struct drm_device *dev = dev_priv->dev;
4104 struct drm_i915_gem_object *obj, *next; 4100 struct drm_i915_gem_object *obj, *next;
4101 int nr_to_scan = sc->nr_to_scan;
4105 int cnt; 4102 int cnt;
4106 4103
4107 if (!mutex_trylock(&dev->struct_mutex)) 4104 if (!mutex_trylock(&dev->struct_mutex))
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 9d9d92945f8c..d948575717bf 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -395,12 +395,14 @@ static int ttm_pool_get_num_unused_pages(void)
395/** 395/**
396 * Callback for mm to request pool to reduce number of page held. 396 * Callback for mm to request pool to reduce number of page held.
397 */ 397 */
398static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) 398static int ttm_pool_mm_shrink(struct shrinker *shrink,
399 struct shrink_control *sc)
399{ 400{
400 static atomic_t start_pool = ATOMIC_INIT(0); 401 static atomic_t start_pool = ATOMIC_INIT(0);
401 unsigned i; 402 unsigned i;
402 unsigned pool_offset = atomic_add_return(1, &start_pool); 403 unsigned pool_offset = atomic_add_return(1, &start_pool);
403 struct ttm_page_pool *pool; 404 struct ttm_page_pool *pool;
405 int shrink_pages = sc->nr_to_scan;
404 406
405 pool_offset = pool_offset % NUM_POOLS; 407 pool_offset = pool_offset % NUM_POOLS;
406 /* select start pool in round robin fashion */ 408 /* select start pool in round robin fashion */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 43221beb9e97..16db83c83c8b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -41,7 +41,7 @@ comment "Native drivers"
41 41
42config SENSORS_ABITUGURU 42config SENSORS_ABITUGURU
43 tristate "Abit uGuru (rev 1 & 2)" 43 tristate "Abit uGuru (rev 1 & 2)"
44 depends on X86 && EXPERIMENTAL 44 depends on X86 && DMI && EXPERIMENTAL
45 help 45 help
46 If you say yes here you get support for the sensor part of the first 46 If you say yes here you get support for the sensor part of the first
47 and second revision of the Abit uGuru chip. The voltage and frequency 47 and second revision of the Abit uGuru chip. The voltage and frequency
@@ -56,7 +56,7 @@ config SENSORS_ABITUGURU
56 56
57config SENSORS_ABITUGURU3 57config SENSORS_ABITUGURU3
58 tristate "Abit uGuru (rev 3)" 58 tristate "Abit uGuru (rev 3)"
59 depends on X86 && EXPERIMENTAL 59 depends on X86 && DMI && EXPERIMENTAL
60 help 60 help
61 If you say yes here you get support for the sensor part of the 61 If you say yes here you get support for the sensor part of the
62 third revision of the Abit uGuru chip. Only reading the sensors 62 third revision of the Abit uGuru chip. Only reading the sensors
@@ -213,7 +213,7 @@ config SENSORS_ADT7475
213 213
214config SENSORS_ASC7621 214config SENSORS_ASC7621
215 tristate "Andigilog aSC7621" 215 tristate "Andigilog aSC7621"
216 depends on HWMON && I2C 216 depends on I2C
217 help 217 help
218 If you say yes here you get support for the aSC7621 218 If you say yes here you get support for the aSC7621
219 family of SMBus sensors chip found on most Intel X38, X48, X58, 219 family of SMBus sensors chip found on most Intel X38, X48, X58,
@@ -237,17 +237,27 @@ config SENSORS_K8TEMP
237 will be called k8temp. 237 will be called k8temp.
238 238
239config SENSORS_K10TEMP 239config SENSORS_K10TEMP
240 tristate "AMD Family 10h/11h/12h/14h temperature sensor" 240 tristate "AMD Family 10h+ temperature sensor"
241 depends on X86 && PCI 241 depends on X86 && PCI
242 help 242 help
243 If you say yes here you get support for the temperature 243 If you say yes here you get support for the temperature
244 sensor(s) inside your CPU. Supported are later revisions of 244 sensor(s) inside your CPU. Supported are later revisions of
245 the AMD Family 10h and all revisions of the AMD Family 11h, 245 the AMD Family 10h and all revisions of the AMD Family 11h,
246 12h (Llano), and 14h (Brazos) microarchitectures. 246 12h (Llano), 14h (Brazos) and 15h (Bulldozer) microarchitectures.
247 247
248 This driver can also be built as a module. If so, the module 248 This driver can also be built as a module. If so, the module
249 will be called k10temp. 249 will be called k10temp.
250 250
251config SENSORS_FAM15H_POWER
252 tristate "AMD Family 15h processor power"
253 depends on X86 && PCI
254 help
255 If you say yes here you get support for processor power
256 information of your AMD family 15h CPU.
257
258 This driver can also be built as a module. If so, the module
259 will be called fam15h_power.
260
251config SENSORS_ASB100 261config SENSORS_ASB100
252 tristate "Asus ASB100 Bach" 262 tristate "Asus ASB100 Bach"
253 depends on X86 && I2C && EXPERIMENTAL 263 depends on X86 && I2C && EXPERIMENTAL
@@ -319,7 +329,7 @@ config SENSORS_F71882FG
319 If you say yes here you get support for hardware monitoring 329 If you say yes here you get support for hardware monitoring
320 features of many Fintek Super-I/O (LPC) chips. The currently 330 features of many Fintek Super-I/O (LPC) chips. The currently
321 supported chips are: 331 supported chips are:
322 F71808E 332 F71808E/A
323 F71858FG 333 F71858FG
324 F71862FG 334 F71862FG
325 F71863FG 335 F71863FG
@@ -978,6 +988,16 @@ config SENSORS_EMC2103
978 This driver can also be built as a module. If so, the module 988 This driver can also be built as a module. If so, the module
979 will be called emc2103. 989 will be called emc2103.
980 990
991config SENSORS_EMC6W201
992 tristate "SMSC EMC6W201"
993 depends on I2C
994 help
995 If you say yes here you get support for the SMSC EMC6W201
996 hardware monitoring chip.
997
998 This driver can also be built as a module. If so, the module
999 will be called emc6w201.
1000
981config SENSORS_SMSC47M1 1001config SENSORS_SMSC47M1
982 tristate "SMSC LPC47M10x and compatibles" 1002 tristate "SMSC LPC47M10x and compatibles"
983 help 1003 help
@@ -1341,6 +1361,16 @@ if ACPI
1341 1361
1342comment "ACPI drivers" 1362comment "ACPI drivers"
1343 1363
1364config SENSORS_ACPI_POWER
1365 tristate "ACPI 4.0 power meter"
1366 help
1367 This driver exposes ACPI 4.0 power meters as hardware monitoring
1368 devices. Say Y (or M) if you have a computer with ACPI 4.0 firmware
1369 and a power meter.
1370
1371 To compile this driver as a module, choose M here:
1372 the module will be called acpi_power_meter.
1373
1344config SENSORS_ATK0110 1374config SENSORS_ATK0110
1345 tristate "ASUS ATK0110" 1375 tristate "ASUS ATK0110"
1346 depends on X86 && EXPERIMENTAL 1376 depends on X86 && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 28e8d52f6379..28061cfa0cdb 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_HWMON) += hwmon.o
6obj-$(CONFIG_HWMON_VID) += hwmon-vid.o 6obj-$(CONFIG_HWMON_VID) += hwmon-vid.o
7 7
8# APCI drivers 8# APCI drivers
9obj-$(CONFIG_SENSORS_ACPI_POWER) += acpi_power_meter.o
9obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o 10obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
10 11
11# Native drivers 12# Native drivers
@@ -45,9 +46,11 @@ obj-$(CONFIG_SENSORS_DS620) += ds620.o
45obj-$(CONFIG_SENSORS_DS1621) += ds1621.o 46obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
46obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o 47obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
47obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o 48obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o
49obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o
48obj-$(CONFIG_SENSORS_F71805F) += f71805f.o 50obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
49obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o 51obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
50obj-$(CONFIG_SENSORS_F75375S) += f75375s.o 52obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
53obj-$(CONFIG_SENSORS_FAM15H_POWER) += fam15h_power.o
51obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o 54obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o
52obj-$(CONFIG_SENSORS_G760A) += g760a.o 55obj-$(CONFIG_SENSORS_G760A) += g760a.o
53obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o 56obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index e7d4c4687f02..65a35cf5b3c5 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1448,15 +1448,12 @@ static int __init abituguru_init(void)
1448{ 1448{
1449 int address, err; 1449 int address, err;
1450 struct resource res = { .flags = IORESOURCE_IO }; 1450 struct resource res = { .flags = IORESOURCE_IO };
1451
1452#ifdef CONFIG_DMI
1453 const char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); 1451 const char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
1454 1452
1455 /* safety check, refuse to load on non Abit motherboards */ 1453 /* safety check, refuse to load on non Abit motherboards */
1456 if (!force && (!board_vendor || 1454 if (!force && (!board_vendor ||
1457 strcmp(board_vendor, "http://www.abit.com.tw/"))) 1455 strcmp(board_vendor, "http://www.abit.com.tw/")))
1458 return -ENODEV; 1456 return -ENODEV;
1459#endif
1460 1457
1461 address = abituguru_detect(); 1458 address = abituguru_detect();
1462 if (address < 0) 1459 if (address < 0)
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index e89d572e3320..d30855a75786 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1119,8 +1119,6 @@ static struct platform_driver abituguru3_driver = {
1119 .resume = abituguru3_resume 1119 .resume = abituguru3_resume
1120}; 1120};
1121 1121
1122#ifdef CONFIG_DMI
1123
1124static int __init abituguru3_dmi_detect(void) 1122static int __init abituguru3_dmi_detect(void)
1125{ 1123{
1126 const char *board_vendor, *board_name; 1124 const char *board_vendor, *board_name;
@@ -1159,15 +1157,6 @@ static int __init abituguru3_dmi_detect(void)
1159 return 1; 1157 return 1;
1160} 1158}
1161 1159
1162#else /* !CONFIG_DMI */
1163
1164static inline int abituguru3_dmi_detect(void)
1165{
1166 return 1;
1167}
1168
1169#endif /* CONFIG_DMI */
1170
1171/* FIXME: Manual detection should die eventually; we need to collect stable 1160/* FIXME: Manual detection should die eventually; we need to collect stable
1172 * DMI model names first before we can rely entirely on CONFIG_DMI. 1161 * DMI model names first before we can rely entirely on CONFIG_DMI.
1173 */ 1162 */
@@ -1216,10 +1205,8 @@ static int __init abituguru3_init(void)
1216 if (err) 1205 if (err)
1217 return err; 1206 return err;
1218 1207
1219#ifdef CONFIG_DMI
1220 pr_warn("this motherboard was not detected using DMI. " 1208 pr_warn("this motherboard was not detected using DMI. "
1221 "Please send the output of \"dmidecode\" to the abituguru3 maintainer (see MAINTAINERS)\n"); 1209 "Please send the output of \"dmidecode\" to the abituguru3 maintainer (see MAINTAINERS)\n");
1222#endif
1223 } 1210 }
1224 1211
1225 err = platform_driver_register(&abituguru3_driver); 1212 err = platform_driver_register(&abituguru3_driver);
diff --git a/drivers/acpi/power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 66f67293341e..66f67293341e 100644
--- a/drivers/acpi/power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index fbdc7655303b..b2cacbe707a8 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -62,7 +62,7 @@ static ssize_t adcxx_read(struct device *dev,
62{ 62{
63 struct spi_device *spi = to_spi_device(dev); 63 struct spi_device *spi = to_spi_device(dev);
64 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 64 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
65 struct adcxx *adc = dev_get_drvdata(&spi->dev); 65 struct adcxx *adc = spi_get_drvdata(spi);
66 u8 tx_buf[2]; 66 u8 tx_buf[2];
67 u8 rx_buf[2]; 67 u8 rx_buf[2];
68 int status; 68 int status;
@@ -105,7 +105,7 @@ static ssize_t adcxx_show_max(struct device *dev,
105 struct device_attribute *devattr, char *buf) 105 struct device_attribute *devattr, char *buf)
106{ 106{
107 struct spi_device *spi = to_spi_device(dev); 107 struct spi_device *spi = to_spi_device(dev);
108 struct adcxx *adc = dev_get_drvdata(&spi->dev); 108 struct adcxx *adc = spi_get_drvdata(spi);
109 u32 reference; 109 u32 reference;
110 110
111 if (mutex_lock_interruptible(&adc->lock)) 111 if (mutex_lock_interruptible(&adc->lock))
@@ -122,7 +122,7 @@ static ssize_t adcxx_set_max(struct device *dev,
122 struct device_attribute *devattr, const char *buf, size_t count) 122 struct device_attribute *devattr, const char *buf, size_t count)
123{ 123{
124 struct spi_device *spi = to_spi_device(dev); 124 struct spi_device *spi = to_spi_device(dev);
125 struct adcxx *adc = dev_get_drvdata(&spi->dev); 125 struct adcxx *adc = spi_get_drvdata(spi);
126 unsigned long value; 126 unsigned long value;
127 127
128 if (strict_strtoul(buf, 10, &value)) 128 if (strict_strtoul(buf, 10, &value))
@@ -142,7 +142,7 @@ static ssize_t adcxx_show_name(struct device *dev, struct device_attribute
142 *devattr, char *buf) 142 *devattr, char *buf)
143{ 143{
144 struct spi_device *spi = to_spi_device(dev); 144 struct spi_device *spi = to_spi_device(dev);
145 struct adcxx *adc = dev_get_drvdata(&spi->dev); 145 struct adcxx *adc = spi_get_drvdata(spi);
146 146
147 return sprintf(buf, "adcxx%ds\n", adc->channels); 147 return sprintf(buf, "adcxx%ds\n", adc->channels);
148} 148}
@@ -182,7 +182,7 @@ static int __devinit adcxx_probe(struct spi_device *spi)
182 182
183 mutex_lock(&adc->lock); 183 mutex_lock(&adc->lock);
184 184
185 dev_set_drvdata(&spi->dev, adc); 185 spi_set_drvdata(spi, adc);
186 186
187 for (i = 0; i < 3 + adc->channels; i++) { 187 for (i = 0; i < 3 + adc->channels; i++) {
188 status = device_create_file(&spi->dev, &ad_input[i].dev_attr); 188 status = device_create_file(&spi->dev, &ad_input[i].dev_attr);
@@ -206,7 +206,7 @@ out_err:
206 for (i--; i >= 0; i--) 206 for (i--; i >= 0; i--)
207 device_remove_file(&spi->dev, &ad_input[i].dev_attr); 207 device_remove_file(&spi->dev, &ad_input[i].dev_attr);
208 208
209 dev_set_drvdata(&spi->dev, NULL); 209 spi_set_drvdata(spi, NULL);
210 mutex_unlock(&adc->lock); 210 mutex_unlock(&adc->lock);
211 kfree(adc); 211 kfree(adc);
212 return status; 212 return status;
@@ -214,7 +214,7 @@ out_err:
214 214
215static int __devexit adcxx_remove(struct spi_device *spi) 215static int __devexit adcxx_remove(struct spi_device *spi)
216{ 216{
217 struct adcxx *adc = dev_get_drvdata(&spi->dev); 217 struct adcxx *adc = spi_get_drvdata(spi);
218 int i; 218 int i;
219 219
220 mutex_lock(&adc->lock); 220 mutex_lock(&adc->lock);
@@ -222,7 +222,7 @@ static int __devexit adcxx_remove(struct spi_device *spi)
222 for (i = 0; i < 3 + adc->channels; i++) 222 for (i = 0; i < 3 + adc->channels; i++)
223 device_remove_file(&spi->dev, &ad_input[i].dev_attr); 223 device_remove_file(&spi->dev, &ad_input[i].dev_attr);
224 224
225 dev_set_drvdata(&spi->dev, NULL); 225 spi_set_drvdata(spi, NULL);
226 mutex_unlock(&adc->lock); 226 mutex_unlock(&adc->lock);
227 kfree(adc); 227 kfree(adc);
228 228
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 9577c432e77f..de3d2465fe24 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -350,6 +350,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
350 350
351static int create_name_attr(struct platform_data *pdata, struct device *dev) 351static int create_name_attr(struct platform_data *pdata, struct device *dev)
352{ 352{
353 sysfs_attr_init(&pdata->name_attr.attr);
353 pdata->name_attr.attr.name = "name"; 354 pdata->name_attr.attr.name = "name";
354 pdata->name_attr.attr.mode = S_IRUGO; 355 pdata->name_attr.attr.mode = S_IRUGO;
355 pdata->name_attr.show = show_name; 356 pdata->name_attr.show = show_name;
@@ -372,6 +373,7 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
372 for (i = 0; i < MAX_ATTRS; i++) { 373 for (i = 0; i < MAX_ATTRS; i++) {
373 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], 374 snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
374 attr_no); 375 attr_no);
376 sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
375 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; 377 tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
376 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; 378 tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
377 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; 379 tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
@@ -422,7 +424,7 @@ static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
422 } 424 }
423} 425}
424 426
425static int chk_ucode_version(struct platform_device *pdev) 427static int __devinit chk_ucode_version(struct platform_device *pdev)
426{ 428{
427 struct cpuinfo_x86 *c = &cpu_data(pdev->id); 429 struct cpuinfo_x86 *c = &cpu_data(pdev->id);
428 int err; 430 int err;
@@ -509,8 +511,8 @@ static int create_core_data(struct platform_data *pdata,
509 /* 511 /*
510 * Provide a single set of attributes for all HT siblings of a core 512 * Provide a single set of attributes for all HT siblings of a core
511 * to avoid duplicate sensors (the processor ID and core ID of all 513 * to avoid duplicate sensors (the processor ID and core ID of all
512 * HT siblings of a core is the same). 514 * HT siblings of a core are the same).
513 * Skip if a HT sibling of this core is already online. 515 * Skip if a HT sibling of this core is already registered.
514 * This is not an error. 516 * This is not an error.
515 */ 517 */
516 if (pdata->core_data[attr_no] != NULL) 518 if (pdata->core_data[attr_no] != NULL)
@@ -770,10 +772,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
770 coretemp_remove_core(pdata, &pdev->dev, indx); 772 coretemp_remove_core(pdata, &pdev->dev, indx);
771 773
772 /* 774 /*
773 * If a core is taken offline, but a HT sibling of the same core is 775 * If a HT sibling of a core is taken offline, but another HT sibling
774 * still online, register the alternate sibling. This ensures that 776 * of the same core is still online, register the alternate sibling.
775 * exactly one set of attributes is provided as long as at least one 777 * This ensures that exactly one set of attributes is provided as long
776 * HT sibling of a core is online. 778 * as at least one HT sibling of a core is online.
777 */ 779 */
778 for_each_sibling(i, cpu) { 780 for_each_sibling(i, cpu) {
779 if (i != cpu) { 781 if (i != cpu) {
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
new file mode 100644
index 000000000000..e0ef32378ac6
--- /dev/null
+++ b/drivers/hwmon/emc6w201.c
@@ -0,0 +1,539 @@
1/*
2 * emc6w201.c - Hardware monitoring driver for the SMSC EMC6W201
3 * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/module.h>
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/jiffies.h>
25#include <linux/i2c.h>
26#include <linux/hwmon.h>
27#include <linux/hwmon-sysfs.h>
28#include <linux/err.h>
29#include <linux/mutex.h>
30
31/*
32 * Addresses to scan
33 */
34
35static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
36
37/*
38 * The EMC6W201 registers
39 */
40
41#define EMC6W201_REG_IN(nr) (0x20 + (nr))
42#define EMC6W201_REG_TEMP(nr) (0x26 + (nr))
43#define EMC6W201_REG_FAN(nr) (0x2C + (nr) * 2)
44#define EMC6W201_REG_COMPANY 0x3E
45#define EMC6W201_REG_VERSTEP 0x3F
46#define EMC6W201_REG_CONFIG 0x40
47#define EMC6W201_REG_IN_LOW(nr) (0x4A + (nr) * 2)
48#define EMC6W201_REG_IN_HIGH(nr) (0x4B + (nr) * 2)
49#define EMC6W201_REG_TEMP_LOW(nr) (0x56 + (nr) * 2)
50#define EMC6W201_REG_TEMP_HIGH(nr) (0x57 + (nr) * 2)
51#define EMC6W201_REG_FAN_MIN(nr) (0x62 + (nr) * 2)
52
53enum { input, min, max } subfeature;
54
55/*
56 * Per-device data
57 */
58
59struct emc6w201_data {
60 struct device *hwmon_dev;
61 struct mutex update_lock;
62 char valid; /* zero until following fields are valid */
63 unsigned long last_updated; /* in jiffies */
64
65 /* registers values */
66 u8 in[3][6];
67 s8 temp[3][6];
68 u16 fan[2][5];
69};
70
71/*
72 * Combine LSB and MSB registers in a single value
73 * Locking: must be called with data->update_lock held
74 */
75static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
76{
77 int lsb, msb;
78
79 lsb = i2c_smbus_read_byte_data(client, reg);
80 msb = i2c_smbus_read_byte_data(client, reg + 1);
81 if (lsb < 0 || msb < 0) {
82 dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg);
83 return 0xFFFF; /* Arbitrary value */
84 }
85
86 return (msb << 8) | lsb;
87}
88
89/*
90 * Write 16-bit value to LSB and MSB registers
91 * Locking: must be called with data->update_lock held
92 */
93static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
94{
95 int err;
96
97 err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
98 if (!err)
99 err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
100 if (err < 0)
101 dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg);
102
103 return err;
104}
105
106static struct emc6w201_data *emc6w201_update_device(struct device *dev)
107{
108 struct i2c_client *client = to_i2c_client(dev);
109 struct emc6w201_data *data = i2c_get_clientdata(client);
110 int nr;
111
112 mutex_lock(&data->update_lock);
113
114 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
115 for (nr = 0; nr < 6; nr++) {
116 data->in[input][nr] =
117 i2c_smbus_read_byte_data(client,
118 EMC6W201_REG_IN(nr));
119 data->in[min][nr] =
120 i2c_smbus_read_byte_data(client,
121 EMC6W201_REG_IN_LOW(nr));
122 data->in[max][nr] =
123 i2c_smbus_read_byte_data(client,
124 EMC6W201_REG_IN_HIGH(nr));
125 }
126
127 for (nr = 0; nr < 6; nr++) {
128 data->temp[input][nr] =
129 i2c_smbus_read_byte_data(client,
130 EMC6W201_REG_TEMP(nr));
131 data->temp[min][nr] =
132 i2c_smbus_read_byte_data(client,
133 EMC6W201_REG_TEMP_LOW(nr));
134 data->temp[max][nr] =
135 i2c_smbus_read_byte_data(client,
136 EMC6W201_REG_TEMP_HIGH(nr));
137 }
138
139 for (nr = 0; nr < 5; nr++) {
140 data->fan[input][nr] =
141 emc6w201_read16(client,
142 EMC6W201_REG_FAN(nr));
143 data->fan[min][nr] =
144 emc6w201_read16(client,
145 EMC6W201_REG_FAN_MIN(nr));
146 }
147
148 data->last_updated = jiffies;
149 data->valid = 1;
150 }
151
152 mutex_unlock(&data->update_lock);
153
154 return data;
155}
156
157/*
158 * Sysfs callback functions
159 */
160
161static const u16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 };
162
163static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
164 char *buf)
165{
166 struct emc6w201_data *data = emc6w201_update_device(dev);
167 int sf = to_sensor_dev_attr_2(devattr)->index;
168 int nr = to_sensor_dev_attr_2(devattr)->nr;
169
170 return sprintf(buf, "%u\n",
171 (unsigned)data->in[sf][nr] * nominal_mv[nr] / 0xC0);
172}
173
174static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
175 const char *buf, size_t count)
176{
177 struct i2c_client *client = to_i2c_client(dev);
178 struct emc6w201_data *data = i2c_get_clientdata(client);
179 int sf = to_sensor_dev_attr_2(devattr)->index;
180 int nr = to_sensor_dev_attr_2(devattr)->nr;
181 int err;
182 long val;
183 u8 reg;
184
185 err = strict_strtol(buf, 10, &val);
186 if (err < 0)
187 return err;
188
189 val = DIV_ROUND_CLOSEST(val * 0xC0, nominal_mv[nr]);
190 reg = (sf == min) ? EMC6W201_REG_IN_LOW(nr)
191 : EMC6W201_REG_IN_HIGH(nr);
192
193 mutex_lock(&data->update_lock);
194 data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
195 err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]);
196 mutex_unlock(&data->update_lock);
197
198 return err < 0 ? err : count;
199}
200
201static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
202 char *buf)
203{
204 struct emc6w201_data *data = emc6w201_update_device(dev);
205 int sf = to_sensor_dev_attr_2(devattr)->index;
206 int nr = to_sensor_dev_attr_2(devattr)->nr;
207
208 return sprintf(buf, "%d\n", (int)data->temp[sf][nr] * 1000);
209}
210
211static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
212 const char *buf, size_t count)
213{
214 struct i2c_client *client = to_i2c_client(dev);
215 struct emc6w201_data *data = i2c_get_clientdata(client);
216 int sf = to_sensor_dev_attr_2(devattr)->index;
217 int nr = to_sensor_dev_attr_2(devattr)->nr;
218 int err;
219 long val;
220 u8 reg;
221
222 err = strict_strtol(buf, 10, &val);
223 if (err < 0)
224 return err;
225
226 val /= 1000;
227 reg = (sf == min) ? EMC6W201_REG_TEMP_LOW(nr)
228 : EMC6W201_REG_TEMP_HIGH(nr);
229
230 mutex_lock(&data->update_lock);
231 data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
232 err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]);
233 mutex_unlock(&data->update_lock);
234
235 return err < 0 ? err : count;
236}
237
238static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
239 char *buf)
240{
241 struct emc6w201_data *data = emc6w201_update_device(dev);
242 int sf = to_sensor_dev_attr_2(devattr)->index;
243 int nr = to_sensor_dev_attr_2(devattr)->nr;
244 unsigned rpm;
245
246 if (data->fan[sf][nr] == 0 || data->fan[sf][nr] == 0xFFFF)
247 rpm = 0;
248 else
249 rpm = 5400000U / data->fan[sf][nr];
250
251 return sprintf(buf, "%u\n", rpm);
252}
253
254static ssize_t set_fan(struct device *dev, struct device_attribute *devattr,
255 const char *buf, size_t count)
256{
257 struct i2c_client *client = to_i2c_client(dev);
258 struct emc6w201_data *data = i2c_get_clientdata(client);
259 int sf = to_sensor_dev_attr_2(devattr)->index;
260 int nr = to_sensor_dev_attr_2(devattr)->nr;
261 int err;
262 unsigned long val;
263
264 err = strict_strtoul(buf, 10, &val);
265 if (err < 0)
266 return err;
267
268 if (val == 0) {
269 val = 0xFFFF;
270 } else {
271 val = DIV_ROUND_CLOSEST(5400000U, val);
272 val = SENSORS_LIMIT(val, 0, 0xFFFE);
273 }
274
275 mutex_lock(&data->update_lock);
276 data->fan[sf][nr] = val;
277 err = emc6w201_write16(client, EMC6W201_REG_FAN_MIN(nr),
278 data->fan[sf][nr]);
279 mutex_unlock(&data->update_lock);
280
281 return err < 0 ? err : count;
282}
283
284static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, input);
285static SENSOR_DEVICE_ATTR_2(in0_min, S_IRUGO | S_IWUSR, show_in, set_in,
286 0, min);
287static SENSOR_DEVICE_ATTR_2(in0_max, S_IRUGO | S_IWUSR, show_in, set_in,
288 0, max);
289static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 1, input);
290static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_in, set_in,
291 1, min);
292static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_in, set_in,
293 1, max);
294static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 2, input);
295static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_in, set_in,
296 2, min);
297static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_in, set_in,
298 2, max);
299static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 3, input);
300static SENSOR_DEVICE_ATTR_2(in3_min, S_IRUGO | S_IWUSR, show_in, set_in,
301 3, min);
302static SENSOR_DEVICE_ATTR_2(in3_max, S_IRUGO | S_IWUSR, show_in, set_in,
303 3, max);
304static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 4, input);
305static SENSOR_DEVICE_ATTR_2(in4_min, S_IRUGO | S_IWUSR, show_in, set_in,
306 4, min);
307static SENSOR_DEVICE_ATTR_2(in4_max, S_IRUGO | S_IWUSR, show_in, set_in,
308 4, max);
309static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 5, input);
310static SENSOR_DEVICE_ATTR_2(in5_min, S_IRUGO | S_IWUSR, show_in, set_in,
311 5, min);
312static SENSOR_DEVICE_ATTR_2(in5_max, S_IRUGO | S_IWUSR, show_in, set_in,
313 5, max);
314
315static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, input);
316static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
317 0, min);
318static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
319 0, max);
320static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 1, input);
321static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
322 1, min);
323static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
324 1, max);
325static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 2, input);
326static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
327 2, min);
328static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
329 2, max);
330static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 3, input);
331static SENSOR_DEVICE_ATTR_2(temp4_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
332 3, min);
333static SENSOR_DEVICE_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
334 3, max);
335static SENSOR_DEVICE_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 4, input);
336static SENSOR_DEVICE_ATTR_2(temp5_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
337 4, min);
338static SENSOR_DEVICE_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
339 4, max);
340static SENSOR_DEVICE_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 5, input);
341static SENSOR_DEVICE_ATTR_2(temp6_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
342 5, min);
343static SENSOR_DEVICE_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
344 5, max);
345
346static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, input);
347static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
348 0, min);
349static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, 1, input);
350static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
351 1, min);
352static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 2, input);
353static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
354 2, min);
355static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 3, input);
356static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
357 3, min);
358static SENSOR_DEVICE_ATTR_2(fan5_input, S_IRUGO, show_fan, NULL, 4, input);
359static SENSOR_DEVICE_ATTR_2(fan5_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
360 4, min);
361
362static struct attribute *emc6w201_attributes[] = {
363 &sensor_dev_attr_in0_input.dev_attr.attr,
364 &sensor_dev_attr_in0_min.dev_attr.attr,
365 &sensor_dev_attr_in0_max.dev_attr.attr,
366 &sensor_dev_attr_in1_input.dev_attr.attr,
367 &sensor_dev_attr_in1_min.dev_attr.attr,
368 &sensor_dev_attr_in1_max.dev_attr.attr,
369 &sensor_dev_attr_in2_input.dev_attr.attr,
370 &sensor_dev_attr_in2_min.dev_attr.attr,
371 &sensor_dev_attr_in2_max.dev_attr.attr,
372 &sensor_dev_attr_in3_input.dev_attr.attr,
373 &sensor_dev_attr_in3_min.dev_attr.attr,
374 &sensor_dev_attr_in3_max.dev_attr.attr,
375 &sensor_dev_attr_in4_input.dev_attr.attr,
376 &sensor_dev_attr_in4_min.dev_attr.attr,
377 &sensor_dev_attr_in4_max.dev_attr.attr,
378 &sensor_dev_attr_in5_input.dev_attr.attr,
379 &sensor_dev_attr_in5_min.dev_attr.attr,
380 &sensor_dev_attr_in5_max.dev_attr.attr,
381
382 &sensor_dev_attr_temp1_input.dev_attr.attr,
383 &sensor_dev_attr_temp1_min.dev_attr.attr,
384 &sensor_dev_attr_temp1_max.dev_attr.attr,
385 &sensor_dev_attr_temp2_input.dev_attr.attr,
386 &sensor_dev_attr_temp2_min.dev_attr.attr,
387 &sensor_dev_attr_temp2_max.dev_attr.attr,
388 &sensor_dev_attr_temp3_input.dev_attr.attr,
389 &sensor_dev_attr_temp3_min.dev_attr.attr,
390 &sensor_dev_attr_temp3_max.dev_attr.attr,
391 &sensor_dev_attr_temp4_input.dev_attr.attr,
392 &sensor_dev_attr_temp4_min.dev_attr.attr,
393 &sensor_dev_attr_temp4_max.dev_attr.attr,
394 &sensor_dev_attr_temp5_input.dev_attr.attr,
395 &sensor_dev_attr_temp5_min.dev_attr.attr,
396 &sensor_dev_attr_temp5_max.dev_attr.attr,
397 &sensor_dev_attr_temp6_input.dev_attr.attr,
398 &sensor_dev_attr_temp6_min.dev_attr.attr,
399 &sensor_dev_attr_temp6_max.dev_attr.attr,
400
401 &sensor_dev_attr_fan1_input.dev_attr.attr,
402 &sensor_dev_attr_fan1_min.dev_attr.attr,
403 &sensor_dev_attr_fan2_input.dev_attr.attr,
404 &sensor_dev_attr_fan2_min.dev_attr.attr,
405 &sensor_dev_attr_fan3_input.dev_attr.attr,
406 &sensor_dev_attr_fan3_min.dev_attr.attr,
407 &sensor_dev_attr_fan4_input.dev_attr.attr,
408 &sensor_dev_attr_fan4_min.dev_attr.attr,
409 &sensor_dev_attr_fan5_input.dev_attr.attr,
410 &sensor_dev_attr_fan5_min.dev_attr.attr,
411 NULL
412};
413
414static const struct attribute_group emc6w201_group = {
415 .attrs = emc6w201_attributes,
416};
417
418/*
419 * Driver interface
420 */
421
422/* Return 0 if detection is successful, -ENODEV otherwise */
423static int emc6w201_detect(struct i2c_client *client,
424 struct i2c_board_info *info)
425{
426 struct i2c_adapter *adapter = client->adapter;
427 int company, verstep, config;
428
429 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
430 return -ENODEV;
431
432 /* Identification */
433 company = i2c_smbus_read_byte_data(client, EMC6W201_REG_COMPANY);
434 if (company != 0x5C)
435 return -ENODEV;
436 verstep = i2c_smbus_read_byte_data(client, EMC6W201_REG_VERSTEP);
437 if (verstep < 0 || (verstep & 0xF0) != 0xB0)
438 return -ENODEV;
439 if ((verstep & 0x0F) > 2) {
440 dev_dbg(&client->dev, "Unknwown EMC6W201 stepping %d\n",
441 verstep & 0x0F);
442 return -ENODEV;
443 }
444
445 /* Check configuration */
446 config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
447 if ((config & 0xF4) != 0x04)
448 return -ENODEV;
449 if (!(config & 0x01)) {
450 dev_err(&client->dev, "Monitoring not enabled\n");
451 return -ENODEV;
452 }
453
454 strlcpy(info->type, "emc6w201", I2C_NAME_SIZE);
455
456 return 0;
457}
458
459static int emc6w201_probe(struct i2c_client *client,
460 const struct i2c_device_id *id)
461{
462 struct emc6w201_data *data;
463 int err;
464
465 data = kzalloc(sizeof(struct emc6w201_data), GFP_KERNEL);
466 if (!data) {
467 err = -ENOMEM;
468 goto exit;
469 }
470
471 i2c_set_clientdata(client, data);
472 mutex_init(&data->update_lock);
473
474 /* Create sysfs attribute */
475 err = sysfs_create_group(&client->dev.kobj, &emc6w201_group);
476 if (err)
477 goto exit_free;
478
479 /* Expose as a hwmon device */
480 data->hwmon_dev = hwmon_device_register(&client->dev);
481 if (IS_ERR(data->hwmon_dev)) {
482 err = PTR_ERR(data->hwmon_dev);
483 goto exit_remove;
484 }
485
486 return 0;
487
488 exit_remove:
489 sysfs_remove_group(&client->dev.kobj, &emc6w201_group);
490 exit_free:
491 kfree(data);
492 exit:
493 return err;
494}
495
496static int emc6w201_remove(struct i2c_client *client)
497{
498 struct emc6w201_data *data = i2c_get_clientdata(client);
499
500 hwmon_device_unregister(data->hwmon_dev);
501 sysfs_remove_group(&client->dev.kobj, &emc6w201_group);
502 kfree(data);
503
504 return 0;
505}
506
507static const struct i2c_device_id emc6w201_id[] = {
508 { "emc6w201", 0 },
509 { }
510};
511MODULE_DEVICE_TABLE(i2c, emc6w201_id);
512
513static struct i2c_driver emc6w201_driver = {
514 .class = I2C_CLASS_HWMON,
515 .driver = {
516 .name = "emc6w201",
517 },
518 .probe = emc6w201_probe,
519 .remove = emc6w201_remove,
520 .id_table = emc6w201_id,
521 .detect = emc6w201_detect,
522 .address_list = normal_i2c,
523};
524
525static int __init sensors_emc6w201_init(void)
526{
527 return i2c_add_driver(&emc6w201_driver);
528}
529module_init(sensors_emc6w201_init);
530
531static void __exit sensors_emc6w201_exit(void)
532{
533 i2c_del_driver(&emc6w201_driver);
534}
535module_exit(sensors_emc6w201_exit);
536
537MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
538MODULE_DESCRIPTION("SMSC EMC6W201 hardware monitoring driver");
539MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index ca07a32447c2..a4a94a096c90 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -48,6 +48,7 @@
48 48
49#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ 49#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
50#define SIO_F71808E_ID 0x0901 /* Chipset ID */ 50#define SIO_F71808E_ID 0x0901 /* Chipset ID */
51#define SIO_F71808A_ID 0x1001 /* Chipset ID */
51#define SIO_F71858_ID 0x0507 /* Chipset ID */ 52#define SIO_F71858_ID 0x0507 /* Chipset ID */
52#define SIO_F71862_ID 0x0601 /* Chipset ID */ 53#define SIO_F71862_ID 0x0601 /* Chipset ID */
53#define SIO_F71869_ID 0x0814 /* Chipset ID */ 54#define SIO_F71869_ID 0x0814 /* Chipset ID */
@@ -107,11 +108,12 @@ static unsigned short force_id;
107module_param(force_id, ushort, 0); 108module_param(force_id, ushort, 0);
108MODULE_PARM_DESC(force_id, "Override the detected device ID"); 109MODULE_PARM_DESC(force_id, "Override the detected device ID");
109 110
110enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg, 111enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
111 f71889ed, f71889a, f8000, f81865f }; 112 f71889ed, f71889a, f8000, f81865f };
112 113
113static const char *f71882fg_names[] = { 114static const char *f71882fg_names[] = {
114 "f71808e", 115 "f71808e",
116 "f71808a",
115 "f71858fg", 117 "f71858fg",
116 "f71862fg", 118 "f71862fg",
117 "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ 119 "f71869", /* Both f71869f and f71869e, reg. compatible and same id */
@@ -125,6 +127,7 @@ static const char *f71882fg_names[] = {
125 127
126static const char f71882fg_has_in[][F71882FG_MAX_INS] = { 128static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
127 [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, 129 [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1 },
130 [f71808a] = { 1, 1, 1, 1, 0, 0, 0, 1, 1 },
128 [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, 131 [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
129 [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, 132 [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
130 [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, 133 [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
@@ -138,6 +141,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
138 141
139static const char f71882fg_has_in1_alarm[] = { 142static const char f71882fg_has_in1_alarm[] = {
140 [f71808e] = 0, 143 [f71808e] = 0,
144 [f71808a] = 0,
141 [f71858fg] = 0, 145 [f71858fg] = 0,
142 [f71862fg] = 0, 146 [f71862fg] = 0,
143 [f71869] = 0, 147 [f71869] = 0,
@@ -149,8 +153,9 @@ static const char f71882fg_has_in1_alarm[] = {
149 [f81865f] = 1, 153 [f81865f] = 1,
150}; 154};
151 155
152static const char f71882fg_has_beep[] = { 156static const char f71882fg_fan_has_beep[] = {
153 [f71808e] = 0, 157 [f71808e] = 0,
158 [f71808a] = 0,
154 [f71858fg] = 0, 159 [f71858fg] = 0,
155 [f71862fg] = 1, 160 [f71862fg] = 1,
156 [f71869] = 1, 161 [f71869] = 1,
@@ -164,6 +169,7 @@ static const char f71882fg_has_beep[] = {
164 169
165static const char f71882fg_nr_fans[] = { 170static const char f71882fg_nr_fans[] = {
166 [f71808e] = 3, 171 [f71808e] = 3,
172 [f71808a] = 2, /* +1 fan which is monitor + simple pwm only */
167 [f71858fg] = 3, 173 [f71858fg] = 3,
168 [f71862fg] = 3, 174 [f71862fg] = 3,
169 [f71869] = 3, 175 [f71869] = 3,
@@ -171,12 +177,27 @@ static const char f71882fg_nr_fans[] = {
171 [f71889fg] = 3, 177 [f71889fg] = 3,
172 [f71889ed] = 3, 178 [f71889ed] = 3,
173 [f71889a] = 3, 179 [f71889a] = 3,
174 [f8000] = 3, 180 [f8000] = 3, /* +1 fan which is monitor only */
175 [f81865f] = 2, 181 [f81865f] = 2,
176}; 182};
177 183
184static const char f71882fg_temp_has_beep[] = {
185 [f71808e] = 0,
186 [f71808a] = 1,
187 [f71858fg] = 0,
188 [f71862fg] = 1,
189 [f71869] = 1,
190 [f71882fg] = 1,
191 [f71889fg] = 1,
192 [f71889ed] = 1,
193 [f71889a] = 1,
194 [f8000] = 0,
195 [f81865f] = 1,
196};
197
178static const char f71882fg_nr_temps[] = { 198static const char f71882fg_nr_temps[] = {
179 [f71808e] = 2, 199 [f71808e] = 2,
200 [f71808a] = 2,
180 [f71858fg] = 3, 201 [f71858fg] = 3,
181 [f71862fg] = 3, 202 [f71862fg] = 3,
182 [f71869] = 3, 203 [f71869] = 3,
@@ -301,6 +322,10 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
301 char *buf); 322 char *buf);
302static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, 323static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr,
303 const char *buf, size_t count); 324 const char *buf, size_t count);
325static ssize_t show_simple_pwm(struct device *dev,
326 struct device_attribute *devattr, char *buf);
327static ssize_t store_simple_pwm(struct device *dev,
328 struct device_attribute *devattr, const char *buf, size_t count);
304static ssize_t show_pwm_enable(struct device *dev, 329static ssize_t show_pwm_enable(struct device *dev,
305 struct device_attribute *devattr, char *buf); 330 struct device_attribute *devattr, char *buf);
306static ssize_t store_pwm_enable(struct device *dev, 331static ssize_t store_pwm_enable(struct device *dev,
@@ -550,6 +575,14 @@ static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { {
550 show_pwm_interpolate, store_pwm_interpolate, 0, 3), 575 show_pwm_interpolate, store_pwm_interpolate, 0, 3),
551} }; 576} };
552 577
578/* Attr for the third fan of the f71808a, which only has manual pwm */
579static struct sensor_device_attribute_2 f71808a_fan3_attr[] = {
580 SENSOR_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 0, 2),
581 SENSOR_ATTR_2(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 2),
582 SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR,
583 show_simple_pwm, store_simple_pwm, 0, 2),
584};
585
553/* Attr for models which can beep on Fan alarm */ 586/* Attr for models which can beep on Fan alarm */
554static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = { 587static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
555 SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep, 588 SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
@@ -1146,12 +1179,13 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
1146 data->temp_type[3] = (reg & 0x08) ? 2 : 4; 1179 data->temp_type[3] = (reg & 0x08) ? 2 : 4;
1147 } 1180 }
1148 1181
1149 if (f71882fg_has_beep[data->type]) { 1182 if (f71882fg_fan_has_beep[data->type])
1150 data->fan_beep = f71882fg_read8(data, 1183 data->fan_beep = f71882fg_read8(data,
1151 F71882FG_REG_FAN_BEEP); 1184 F71882FG_REG_FAN_BEEP);
1185
1186 if (f71882fg_temp_has_beep[data->type])
1152 data->temp_beep = f71882fg_read8(data, 1187 data->temp_beep = f71882fg_read8(data,
1153 F71882FG_REG_TEMP_BEEP); 1188 F71882FG_REG_TEMP_BEEP);
1154 }
1155 1189
1156 data->pwm_enable = f71882fg_read8(data, 1190 data->pwm_enable = f71882fg_read8(data,
1157 F71882FG_REG_PWM_ENABLE); 1191 F71882FG_REG_PWM_ENABLE);
@@ -1232,7 +1266,13 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
1232 data->pwm[nr] = 1266 data->pwm[nr] =
1233 f71882fg_read8(data, F71882FG_REG_PWM(nr)); 1267 f71882fg_read8(data, F71882FG_REG_PWM(nr));
1234 } 1268 }
1235 /* The f8000 can monitor 1 more fan, but has no pwm for it */ 1269 /* Some models have 1 more fan with limited capabilities */
1270 if (data->type == f71808a) {
1271 data->fan[2] = f71882fg_read16(data,
1272 F71882FG_REG_FAN(2));
1273 data->pwm[2] = f71882fg_read8(data,
1274 F71882FG_REG_PWM(2));
1275 }
1236 if (data->type == f8000) 1276 if (data->type == f8000)
1237 data->fan[3] = f71882fg_read16(data, 1277 data->fan[3] = f71882fg_read16(data,
1238 F71882FG_REG_FAN(3)); 1278 F71882FG_REG_FAN(3));
@@ -1722,6 +1762,38 @@ leave:
1722 return count; 1762 return count;
1723} 1763}
1724 1764
1765static ssize_t show_simple_pwm(struct device *dev,
1766 struct device_attribute *devattr, char *buf)
1767{
1768 struct f71882fg_data *data = f71882fg_update_device(dev);
1769 int val, nr = to_sensor_dev_attr_2(devattr)->index;
1770
1771 val = data->pwm[nr];
1772 return sprintf(buf, "%d\n", val);
1773}
1774
1775static ssize_t store_simple_pwm(struct device *dev,
1776 struct device_attribute *devattr,
1777 const char *buf, size_t count)
1778{
1779 struct f71882fg_data *data = dev_get_drvdata(dev);
1780 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1781 long val;
1782
1783 err = strict_strtol(buf, 10, &val);
1784 if (err)
1785 return err;
1786
1787 val = SENSORS_LIMIT(val, 0, 255);
1788
1789 mutex_lock(&data->update_lock);
1790 f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
1791 data->pwm[nr] = val;
1792 mutex_unlock(&data->update_lock);
1793
1794 return count;
1795}
1796
1725static ssize_t show_pwm_enable(struct device *dev, 1797static ssize_t show_pwm_enable(struct device *dev,
1726 struct device_attribute *devattr, char *buf) 1798 struct device_attribute *devattr, char *buf)
1727{ 1799{
@@ -2140,7 +2212,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2140 if (err) 2212 if (err)
2141 goto exit_unregister_sysfs; 2213 goto exit_unregister_sysfs;
2142 2214
2143 if (f71882fg_has_beep[data->type]) { 2215 if (f71882fg_temp_has_beep[data->type]) {
2144 err = f71882fg_create_sysfs_files(pdev, 2216 err = f71882fg_create_sysfs_files(pdev,
2145 &fxxxx_temp_beep_attr[0][0], 2217 &fxxxx_temp_beep_attr[0][0],
2146 ARRAY_SIZE(fxxxx_temp_beep_attr[0]) 2218 ARRAY_SIZE(fxxxx_temp_beep_attr[0])
@@ -2169,6 +2241,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2169 if (start_reg & 0x02) { 2241 if (start_reg & 0x02) {
2170 switch (data->type) { 2242 switch (data->type) {
2171 case f71808e: 2243 case f71808e:
2244 case f71808a:
2172 case f71869: 2245 case f71869:
2173 /* These always have signed auto point temps */ 2246 /* These always have signed auto point temps */
2174 data->auto_point_temp_signed = 1; 2247 data->auto_point_temp_signed = 1;
@@ -2221,7 +2294,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2221 if (err) 2294 if (err)
2222 goto exit_unregister_sysfs; 2295 goto exit_unregister_sysfs;
2223 2296
2224 if (f71882fg_has_beep[data->type]) { 2297 if (f71882fg_fan_has_beep[data->type]) {
2225 err = f71882fg_create_sysfs_files(pdev, 2298 err = f71882fg_create_sysfs_files(pdev,
2226 fxxxx_fan_beep_attr, nr_fans); 2299 fxxxx_fan_beep_attr, nr_fans);
2227 if (err) 2300 if (err)
@@ -2230,6 +2303,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2230 2303
2231 switch (data->type) { 2304 switch (data->type) {
2232 case f71808e: 2305 case f71808e:
2306 case f71808a:
2233 case f71869: 2307 case f71869:
2234 case f71889fg: 2308 case f71889fg:
2235 case f71889ed: 2309 case f71889ed:
@@ -2255,6 +2329,16 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2255 } 2329 }
2256 2330
2257 switch (data->type) { 2331 switch (data->type) {
2332 case f71808a:
2333 err = f71882fg_create_sysfs_files(pdev,
2334 &fxxxx_auto_pwm_attr[0][0],
2335 ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
2336 if (err)
2337 goto exit_unregister_sysfs;
2338 err = f71882fg_create_sysfs_files(pdev,
2339 f71808a_fan3_attr,
2340 ARRAY_SIZE(f71808a_fan3_attr));
2341 break;
2258 case f71862fg: 2342 case f71862fg:
2259 err = f71882fg_create_sysfs_files(pdev, 2343 err = f71882fg_create_sysfs_files(pdev,
2260 f71862fg_auto_pwm_attr, 2344 f71862fg_auto_pwm_attr,
@@ -2343,7 +2427,7 @@ static int f71882fg_remove(struct platform_device *pdev)
2343 &fxxxx_temp_attr[0][0], 2427 &fxxxx_temp_attr[0][0],
2344 ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps); 2428 ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
2345 } 2429 }
2346 if (f71882fg_has_beep[data->type]) { 2430 if (f71882fg_temp_has_beep[data->type]) {
2347 f71882fg_remove_sysfs_files(pdev, 2431 f71882fg_remove_sysfs_files(pdev,
2348 &fxxxx_temp_beep_attr[0][0], 2432 &fxxxx_temp_beep_attr[0][0],
2349 ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps); 2433 ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps);
@@ -2366,12 +2450,20 @@ static int f71882fg_remove(struct platform_device *pdev)
2366 f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0], 2450 f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
2367 ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans); 2451 ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
2368 2452
2369 if (f71882fg_has_beep[data->type]) { 2453 if (f71882fg_fan_has_beep[data->type]) {
2370 f71882fg_remove_sysfs_files(pdev, 2454 f71882fg_remove_sysfs_files(pdev,
2371 fxxxx_fan_beep_attr, nr_fans); 2455 fxxxx_fan_beep_attr, nr_fans);
2372 } 2456 }
2373 2457
2374 switch (data->type) { 2458 switch (data->type) {
2459 case f71808a:
2460 f71882fg_remove_sysfs_files(pdev,
2461 &fxxxx_auto_pwm_attr[0][0],
2462 ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
2463 f71882fg_remove_sysfs_files(pdev,
2464 f71808a_fan3_attr,
2465 ARRAY_SIZE(f71808a_fan3_attr));
2466 break;
2375 case f71862fg: 2467 case f71862fg:
2376 f71882fg_remove_sysfs_files(pdev, 2468 f71882fg_remove_sysfs_files(pdev,
2377 f71862fg_auto_pwm_attr, 2469 f71862fg_auto_pwm_attr,
@@ -2424,6 +2516,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2424 case SIO_F71808E_ID: 2516 case SIO_F71808E_ID:
2425 sio_data->type = f71808e; 2517 sio_data->type = f71808e;
2426 break; 2518 break;
2519 case SIO_F71808A_ID:
2520 sio_data->type = f71808a;
2521 break;
2427 case SIO_F71858_ID: 2522 case SIO_F71858_ID:
2428 sio_data->type = f71858fg; 2523 sio_data->type = f71858fg;
2429 break; 2524 break;
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
new file mode 100644
index 000000000000..523f8fb9e7d9
--- /dev/null
+++ b/drivers/hwmon/fam15h_power.c
@@ -0,0 +1,229 @@
1/*
2 * fam15h_power.c - AMD Family 15h processor power monitoring
3 *
4 * Copyright (c) 2011 Advanced Micro Devices, Inc.
5 * Author: Andreas Herrmann <andreas.herrmann3@amd.com>
6 *
7 *
8 * This driver is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This driver is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 * See the GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this driver; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/err.h>
22#include <linux/hwmon.h>
23#include <linux/hwmon-sysfs.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/bitops.h>
28#include <asm/processor.h>
29
30MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
31MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>");
32MODULE_LICENSE("GPL");
33
34/* D18F3 */
35#define REG_NORTHBRIDGE_CAP 0xe8
36
37/* D18F4 */
38#define REG_PROCESSOR_TDP 0x1b8
39
40/* D18F5 */
41#define REG_TDP_RUNNING_AVERAGE 0xe0
42#define REG_TDP_LIMIT3 0xe8
43
44struct fam15h_power_data {
45 struct device *hwmon_dev;
46 unsigned int tdp_to_watts;
47 unsigned int base_tdp;
48 unsigned int processor_pwr_watts;
49};
50
51static ssize_t show_power(struct device *dev,
52 struct device_attribute *attr, char *buf)
53{
54 u32 val, tdp_limit, running_avg_range;
55 s32 running_avg_capture;
56 u64 curr_pwr_watts;
57 struct pci_dev *f4 = to_pci_dev(dev);
58 struct fam15h_power_data *data = dev_get_drvdata(dev);
59
60 pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
61 REG_TDP_RUNNING_AVERAGE, &val);
62 running_avg_capture = (val >> 4) & 0x3fffff;
63 running_avg_capture = sign_extend32(running_avg_capture, 22);
64 running_avg_range = val & 0xf;
65
66 pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
67 REG_TDP_LIMIT3, &val);
68
69 tdp_limit = val >> 16;
70 curr_pwr_watts = tdp_limit + data->base_tdp -
71 (s32)(running_avg_capture >> (running_avg_range + 1));
72 curr_pwr_watts *= data->tdp_to_watts;
73
74 /*
75 * Convert to microWatt
76 *
77 * power is in Watt provided as fixed point integer with
78 * scaling factor 1/(2^16). For conversion we use
79 * (10^6)/(2^16) = 15625/(2^10)
80 */
81 curr_pwr_watts = (curr_pwr_watts * 15625) >> 10;
82 return sprintf(buf, "%u\n", (unsigned int) curr_pwr_watts);
83}
84static DEVICE_ATTR(power1_input, S_IRUGO, show_power, NULL);
85
86static ssize_t show_power_crit(struct device *dev,
87 struct device_attribute *attr, char *buf)
88{
89 struct fam15h_power_data *data = dev_get_drvdata(dev);
90
91 return sprintf(buf, "%u\n", data->processor_pwr_watts);
92}
93static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
94
95static ssize_t show_name(struct device *dev,
96 struct device_attribute *attr, char *buf)
97{
98 return sprintf(buf, "fam15h_power\n");
99}
100static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
101
102static struct attribute *fam15h_power_attrs[] = {
103 &dev_attr_power1_input.attr,
104 &dev_attr_power1_crit.attr,
105 &dev_attr_name.attr,
106 NULL
107};
108
109static const struct attribute_group fam15h_power_attr_group = {
110 .attrs = fam15h_power_attrs,
111};
112
113static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
114{
115 u32 val;
116
117 pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 3),
118 REG_NORTHBRIDGE_CAP, &val);
119 if ((val & BIT(29)) && ((val >> 30) & 3))
120 return false;
121
122 return true;
123}
124
125static void __devinit fam15h_power_init_data(struct pci_dev *f4,
126 struct fam15h_power_data *data)
127{
128 u32 val;
129 u64 tmp;
130
131 pci_read_config_dword(f4, REG_PROCESSOR_TDP, &val);
132 data->base_tdp = val >> 16;
133 tmp = val & 0xffff;
134
135 pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
136 REG_TDP_LIMIT3, &val);
137
138 data->tdp_to_watts = ((val & 0x3ff) << 6) | ((val >> 10) & 0x3f);
139 tmp *= data->tdp_to_watts;
140
141 /* result not allowed to be >= 256W */
142 if ((tmp >> 16) >= 256)
143 dev_warn(&f4->dev, "Bogus value for ProcessorPwrWatts "
144 "(processor_pwr_watts>=%u)\n",
145 (unsigned int) (tmp >> 16));
146
147 /* convert to microWatt */
148 data->processor_pwr_watts = (tmp * 15625) >> 10;
149}
150
151static int __devinit fam15h_power_probe(struct pci_dev *pdev,
152 const struct pci_device_id *id)
153{
154 struct fam15h_power_data *data;
155 struct device *dev;
156 int err;
157
158 if (!fam15h_power_is_internal_node0(pdev)) {
159 err = -ENODEV;
160 goto exit;
161 }
162
163 data = kzalloc(sizeof(struct fam15h_power_data), GFP_KERNEL);
164 if (!data) {
165 err = -ENOMEM;
166 goto exit;
167 }
168 fam15h_power_init_data(pdev, data);
169 dev = &pdev->dev;
170
171 dev_set_drvdata(dev, data);
172 err = sysfs_create_group(&dev->kobj, &fam15h_power_attr_group);
173 if (err)
174 goto exit_free_data;
175
176 data->hwmon_dev = hwmon_device_register(dev);
177 if (IS_ERR(data->hwmon_dev)) {
178 err = PTR_ERR(data->hwmon_dev);
179 goto exit_remove_group;
180 }
181
182 return 0;
183
184exit_remove_group:
185 sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group);
186exit_free_data:
187 kfree(data);
188exit:
189 return err;
190}
191
192static void __devexit fam15h_power_remove(struct pci_dev *pdev)
193{
194 struct device *dev;
195 struct fam15h_power_data *data;
196
197 dev = &pdev->dev;
198 data = dev_get_drvdata(dev);
199 hwmon_device_unregister(data->hwmon_dev);
200 sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group);
201 dev_set_drvdata(dev, NULL);
202 kfree(data);
203}
204
205static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = {
206 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
207 {}
208};
209MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
210
211static struct pci_driver fam15h_power_driver = {
212 .name = "fam15h_power",
213 .id_table = fam15h_power_id_table,
214 .probe = fam15h_power_probe,
215 .remove = __devexit_p(fam15h_power_remove),
216};
217
218static int __init fam15h_power_init(void)
219{
220 return pci_register_driver(&fam15h_power_driver);
221}
222
223static void __exit fam15h_power_exit(void)
224{
225 pci_unregister_driver(&fam15h_power_driver);
226}
227
228module_init(fam15h_power_init)
229module_exit(fam15h_power_exit)
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index bc6e2ab3a361..537409d07ee7 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -523,7 +523,7 @@ static void aem_delete(struct aem_data *data)
523 aem_remove_sensors(data); 523 aem_remove_sensors(data);
524 hwmon_device_unregister(data->hwmon_dev); 524 hwmon_device_unregister(data->hwmon_dev);
525 ipmi_destroy_user(data->ipmi.user); 525 ipmi_destroy_user(data->ipmi.user);
526 dev_set_drvdata(&data->pdev->dev, NULL); 526 platform_set_drvdata(data->pdev, NULL);
527 platform_device_unregister(data->pdev); 527 platform_device_unregister(data->pdev);
528 aem_idr_put(data->id); 528 aem_idr_put(data->id);
529 kfree(data); 529 kfree(data);
@@ -594,7 +594,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle)
594 if (res) 594 if (res)
595 goto ipmi_err; 595 goto ipmi_err;
596 596
597 dev_set_drvdata(&data->pdev->dev, data); 597 platform_set_drvdata(data->pdev, data);
598 598
599 /* Set up IPMI interface */ 599 /* Set up IPMI interface */
600 if (aem_init_ipmi_data(&data->ipmi, probe->interface, 600 if (aem_init_ipmi_data(&data->ipmi, probe->interface,
@@ -630,7 +630,7 @@ sensor_err:
630hwmon_reg_err: 630hwmon_reg_err:
631 ipmi_destroy_user(data->ipmi.user); 631 ipmi_destroy_user(data->ipmi.user);
632ipmi_err: 632ipmi_err:
633 dev_set_drvdata(&data->pdev->dev, NULL); 633 platform_set_drvdata(data->pdev, NULL);
634 platform_device_unregister(data->pdev); 634 platform_device_unregister(data->pdev);
635dev_err: 635dev_err:
636 aem_idr_put(data->id); 636 aem_idr_put(data->id);
@@ -727,7 +727,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe,
727 if (res) 727 if (res)
728 goto ipmi_err; 728 goto ipmi_err;
729 729
730 dev_set_drvdata(&data->pdev->dev, data); 730 platform_set_drvdata(data->pdev, data);
731 731
732 /* Set up IPMI interface */ 732 /* Set up IPMI interface */
733 if (aem_init_ipmi_data(&data->ipmi, probe->interface, 733 if (aem_init_ipmi_data(&data->ipmi, probe->interface,
@@ -763,7 +763,7 @@ sensor_err:
763hwmon_reg_err: 763hwmon_reg_err:
764 ipmi_destroy_user(data->ipmi.user); 764 ipmi_destroy_user(data->ipmi.user);
765ipmi_err: 765ipmi_err:
766 dev_set_drvdata(&data->pdev->dev, NULL); 766 platform_set_drvdata(data->pdev, NULL);
767 platform_device_unregister(data->pdev); 767 platform_device_unregister(data->pdev);
768dev_err: 768dev_err:
769 aem_idr_put(data->id); 769 aem_idr_put(data->id);
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 316b64823f7b..bb6405b92007 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -77,15 +77,13 @@ static struct platform_device *pdev;
77#define DEVID 0x20 /* Register: Device ID */ 77#define DEVID 0x20 /* Register: Device ID */
78#define DEVREV 0x22 /* Register: Device Revision */ 78#define DEVREV 0x22 /* Register: Device Revision */
79 79
80static inline int 80static inline int superio_inb(int reg)
81superio_inb(int reg)
82{ 81{
83 outb(reg, REG); 82 outb(reg, REG);
84 return inb(VAL); 83 return inb(VAL);
85} 84}
86 85
87static inline void 86static inline void superio_outb(int reg, int val)
88superio_outb(int reg, int val)
89{ 87{
90 outb(reg, REG); 88 outb(reg, REG);
91 outb(val, VAL); 89 outb(val, VAL);
@@ -101,27 +99,32 @@ static int superio_inw(int reg)
101 return val; 99 return val;
102} 100}
103 101
104static inline void 102static inline void superio_select(int ldn)
105superio_select(int ldn)
106{ 103{
107 outb(DEV, REG); 104 outb(DEV, REG);
108 outb(ldn, VAL); 105 outb(ldn, VAL);
109} 106}
110 107
111static inline void 108static inline int superio_enter(void)
112superio_enter(void)
113{ 109{
110 /*
111 * Try to reserve REG and REG + 1 for exclusive access.
112 */
113 if (!request_muxed_region(REG, 2, DRVNAME))
114 return -EBUSY;
115
114 outb(0x87, REG); 116 outb(0x87, REG);
115 outb(0x01, REG); 117 outb(0x01, REG);
116 outb(0x55, REG); 118 outb(0x55, REG);
117 outb(0x55, REG); 119 outb(0x55, REG);
120 return 0;
118} 121}
119 122
120static inline void 123static inline void superio_exit(void)
121superio_exit(void)
122{ 124{
123 outb(0x02, REG); 125 outb(0x02, REG);
124 outb(0x02, VAL); 126 outb(0x02, VAL);
127 release_region(REG, 2);
125} 128}
126 129
127/* Logical device 4 registers */ 130/* Logical device 4 registers */
@@ -1542,11 +1545,15 @@ static const struct attribute_group it87_group_label = {
1542static int __init it87_find(unsigned short *address, 1545static int __init it87_find(unsigned short *address,
1543 struct it87_sio_data *sio_data) 1546 struct it87_sio_data *sio_data)
1544{ 1547{
1545 int err = -ENODEV; 1548 int err;
1546 u16 chip_type; 1549 u16 chip_type;
1547 const char *board_vendor, *board_name; 1550 const char *board_vendor, *board_name;
1548 1551
1549 superio_enter(); 1552 err = superio_enter();
1553 if (err)
1554 return err;
1555
1556 err = -ENODEV;
1550 chip_type = force_id ? force_id : superio_inw(DEVID); 1557 chip_type = force_id ? force_id : superio_inw(DEVID);
1551 1558
1552 switch (chip_type) { 1559 switch (chip_type) {
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 934991237061..02cebb74e206 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -213,7 +213,7 @@ static const struct dev_pm_ops jc42_dev_pm_ops = {
213 213
214/* This is the driver that will be inserted */ 214/* This is the driver that will be inserted */
215static struct i2c_driver jc42_driver = { 215static struct i2c_driver jc42_driver = {
216 .class = I2C_CLASS_HWMON, 216 .class = I2C_CLASS_SPD,
217 .driver = { 217 .driver = {
218 .name = "jc42", 218 .name = "jc42",
219 .pm = JC42_DEV_PM_OPS, 219 .pm = JC42_DEV_PM_OPS,
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 82bf65aa2968..41aa6a319870 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring 2 * k10temp.c - AMD Family 10h/11h/12h/14h/15h processor hardware monitoring
3 * 3 *
4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> 4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
5 * 5 *
@@ -25,7 +25,7 @@
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27 27
28MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor"); 28MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31 31
@@ -173,7 +173,7 @@ static int __devinit k10temp_probe(struct pci_dev *pdev,
173 err = PTR_ERR(hwmon_dev); 173 err = PTR_ERR(hwmon_dev);
174 goto exit_remove; 174 goto exit_remove;
175 } 175 }
176 dev_set_drvdata(&pdev->dev, hwmon_dev); 176 pci_set_drvdata(pdev, hwmon_dev);
177 177
178 if (unreliable && force) 178 if (unreliable && force)
179 dev_warn(&pdev->dev, 179 dev_warn(&pdev->dev,
@@ -194,7 +194,7 @@ exit:
194 194
195static void __devexit k10temp_remove(struct pci_dev *pdev) 195static void __devexit k10temp_remove(struct pci_dev *pdev)
196{ 196{
197 hwmon_device_unregister(dev_get_drvdata(&pdev->dev)); 197 hwmon_device_unregister(pci_get_drvdata(pdev));
198 device_remove_file(&pdev->dev, &dev_attr_name); 198 device_remove_file(&pdev->dev, &dev_attr_name);
199 device_remove_file(&pdev->dev, &dev_attr_temp1_input); 199 device_remove_file(&pdev->dev, &dev_attr_temp1_input);
200 device_remove_file(&pdev->dev, &dev_attr_temp1_max); 200 device_remove_file(&pdev->dev, &dev_attr_temp1_max);
@@ -202,13 +202,14 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
202 &sensor_dev_attr_temp1_crit.dev_attr); 202 &sensor_dev_attr_temp1_crit.dev_attr);
203 device_remove_file(&pdev->dev, 203 device_remove_file(&pdev->dev,
204 &sensor_dev_attr_temp1_crit_hyst.dev_attr); 204 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
205 dev_set_drvdata(&pdev->dev, NULL); 205 pci_set_drvdata(pdev, NULL);
206} 206}
207 207
208static const struct pci_device_id k10temp_id_table[] = { 208static const struct pci_device_id k10temp_id_table[] = {
209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, 210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
211 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 211 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
212 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
212 {} 213 {}
213}; 214};
214MODULE_DEVICE_TABLE(pci, k10temp_id_table); 215MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 418496f13020..b923bc2307ad 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -252,7 +252,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
252 252
253 data->name = "k8temp"; 253 data->name = "k8temp";
254 mutex_init(&data->update_lock); 254 mutex_init(&data->update_lock);
255 dev_set_drvdata(&pdev->dev, data); 255 pci_set_drvdata(pdev, data);
256 256
257 /* Register sysfs hooks */ 257 /* Register sysfs hooks */
258 err = device_create_file(&pdev->dev, 258 err = device_create_file(&pdev->dev,
@@ -307,7 +307,7 @@ exit_remove:
307 &sensor_dev_attr_temp4_input.dev_attr); 307 &sensor_dev_attr_temp4_input.dev_attr);
308 device_remove_file(&pdev->dev, &dev_attr_name); 308 device_remove_file(&pdev->dev, &dev_attr_name);
309exit_free: 309exit_free:
310 dev_set_drvdata(&pdev->dev, NULL); 310 pci_set_drvdata(pdev, NULL);
311 kfree(data); 311 kfree(data);
312exit: 312exit:
313 return err; 313 return err;
@@ -315,7 +315,7 @@ exit:
315 315
316static void __devexit k8temp_remove(struct pci_dev *pdev) 316static void __devexit k8temp_remove(struct pci_dev *pdev)
317{ 317{
318 struct k8temp_data *data = dev_get_drvdata(&pdev->dev); 318 struct k8temp_data *data = pci_get_drvdata(pdev);
319 319
320 hwmon_device_unregister(data->hwmon_dev); 320 hwmon_device_unregister(data->hwmon_dev);
321 device_remove_file(&pdev->dev, 321 device_remove_file(&pdev->dev,
@@ -327,7 +327,7 @@ static void __devexit k8temp_remove(struct pci_dev *pdev)
327 device_remove_file(&pdev->dev, 327 device_remove_file(&pdev->dev,
328 &sensor_dev_attr_temp4_input.dev_attr); 328 &sensor_dev_attr_temp4_input.dev_attr);
329 device_remove_file(&pdev->dev, &dev_attr_name); 329 device_remove_file(&pdev->dev, &dev_attr_name);
330 dev_set_drvdata(&pdev->dev, NULL); 330 pci_set_drvdata(pdev, NULL);
331 kfree(data); 331 kfree(data);
332} 332}
333 333
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 3b84fb503053..c274ea25d899 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -58,7 +58,7 @@ static ssize_t lm70_sense_temp(struct device *dev,
58 int status, val = 0; 58 int status, val = 0;
59 u8 rxbuf[2]; 59 u8 rxbuf[2];
60 s16 raw=0; 60 s16 raw=0;
61 struct lm70 *p_lm70 = dev_get_drvdata(&spi->dev); 61 struct lm70 *p_lm70 = spi_get_drvdata(spi);
62 62
63 if (mutex_lock_interruptible(&p_lm70->lock)) 63 if (mutex_lock_interruptible(&p_lm70->lock))
64 return -ERESTARTSYS; 64 return -ERESTARTSYS;
@@ -163,7 +163,7 @@ static int __devinit lm70_probe(struct spi_device *spi)
163 status = PTR_ERR(p_lm70->hwmon_dev); 163 status = PTR_ERR(p_lm70->hwmon_dev);
164 goto out_dev_reg_failed; 164 goto out_dev_reg_failed;
165 } 165 }
166 dev_set_drvdata(&spi->dev, p_lm70); 166 spi_set_drvdata(spi, p_lm70);
167 167
168 if ((status = device_create_file(&spi->dev, &dev_attr_temp1_input)) 168 if ((status = device_create_file(&spi->dev, &dev_attr_temp1_input))
169 || (status = device_create_file(&spi->dev, &dev_attr_name))) { 169 || (status = device_create_file(&spi->dev, &dev_attr_name))) {
@@ -177,19 +177,19 @@ out_dev_create_file_failed:
177 device_remove_file(&spi->dev, &dev_attr_temp1_input); 177 device_remove_file(&spi->dev, &dev_attr_temp1_input);
178 hwmon_device_unregister(p_lm70->hwmon_dev); 178 hwmon_device_unregister(p_lm70->hwmon_dev);
179out_dev_reg_failed: 179out_dev_reg_failed:
180 dev_set_drvdata(&spi->dev, NULL); 180 spi_set_drvdata(spi, NULL);
181 kfree(p_lm70); 181 kfree(p_lm70);
182 return status; 182 return status;
183} 183}
184 184
185static int __devexit lm70_remove(struct spi_device *spi) 185static int __devexit lm70_remove(struct spi_device *spi)
186{ 186{
187 struct lm70 *p_lm70 = dev_get_drvdata(&spi->dev); 187 struct lm70 *p_lm70 = spi_get_drvdata(spi);
188 188
189 device_remove_file(&spi->dev, &dev_attr_temp1_input); 189 device_remove_file(&spi->dev, &dev_attr_temp1_input);
190 device_remove_file(&spi->dev, &dev_attr_name); 190 device_remove_file(&spi->dev, &dev_attr_name);
191 hwmon_device_unregister(p_lm70->hwmon_dev); 191 hwmon_device_unregister(p_lm70->hwmon_dev);
192 dev_set_drvdata(&spi->dev, NULL); 192 spi_set_drvdata(spi, NULL);
193 kfree(p_lm70); 193 kfree(p_lm70);
194 194
195 return 0; 195 return 0;
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 9a11532ecae8..ece3aafa54b3 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -41,13 +41,6 @@
41#include <linux/err.h> 41#include <linux/err.h>
42 42
43/* 43/*
44 * Addresses to scan. There are four disjoint possibilities, by pin config.
45 */
46
47static const unsigned short normal_i2c[] = {0x1b, 0x1f, 0x48, 0x4b,
48 I2C_CLIENT_END};
49
50/*
51 * Insmod parameters 44 * Insmod parameters
52 */ 45 */
53 46
@@ -114,8 +107,6 @@ module_param(clock, int, S_IRUGO);
114 107
115static int max6650_probe(struct i2c_client *client, 108static int max6650_probe(struct i2c_client *client,
116 const struct i2c_device_id *id); 109 const struct i2c_device_id *id);
117static int max6650_detect(struct i2c_client *client,
118 struct i2c_board_info *info);
119static int max6650_init_client(struct i2c_client *client); 110static int max6650_init_client(struct i2c_client *client);
120static int max6650_remove(struct i2c_client *client); 111static int max6650_remove(struct i2c_client *client);
121static struct max6650_data *max6650_update_device(struct device *dev); 112static struct max6650_data *max6650_update_device(struct device *dev);
@@ -125,21 +116,19 @@ static struct max6650_data *max6650_update_device(struct device *dev);
125 */ 116 */
126 117
127static const struct i2c_device_id max6650_id[] = { 118static const struct i2c_device_id max6650_id[] = {
128 { "max6650", 0 }, 119 { "max6650", 1 },
120 { "max6651", 4 },
129 { } 121 { }
130}; 122};
131MODULE_DEVICE_TABLE(i2c, max6650_id); 123MODULE_DEVICE_TABLE(i2c, max6650_id);
132 124
133static struct i2c_driver max6650_driver = { 125static struct i2c_driver max6650_driver = {
134 .class = I2C_CLASS_HWMON,
135 .driver = { 126 .driver = {
136 .name = "max6650", 127 .name = "max6650",
137 }, 128 },
138 .probe = max6650_probe, 129 .probe = max6650_probe,
139 .remove = max6650_remove, 130 .remove = max6650_remove,
140 .id_table = max6650_id, 131 .id_table = max6650_id,
141 .detect = max6650_detect,
142 .address_list = normal_i2c,
143}; 132};
144 133
145/* 134/*
@@ -150,6 +139,7 @@ struct max6650_data
150{ 139{
151 struct device *hwmon_dev; 140 struct device *hwmon_dev;
152 struct mutex update_lock; 141 struct mutex update_lock;
142 int nr_fans;
153 char valid; /* zero until following fields are valid */ 143 char valid; /* zero until following fields are valid */
154 unsigned long last_updated; /* in jiffies */ 144 unsigned long last_updated; /* in jiffies */
155 145
@@ -501,9 +491,6 @@ static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
501 491
502static struct attribute *max6650_attrs[] = { 492static struct attribute *max6650_attrs[] = {
503 &sensor_dev_attr_fan1_input.dev_attr.attr, 493 &sensor_dev_attr_fan1_input.dev_attr.attr,
504 &sensor_dev_attr_fan2_input.dev_attr.attr,
505 &sensor_dev_attr_fan3_input.dev_attr.attr,
506 &sensor_dev_attr_fan4_input.dev_attr.attr,
507 &dev_attr_fan1_target.attr, 494 &dev_attr_fan1_target.attr,
508 &dev_attr_fan1_div.attr, 495 &dev_attr_fan1_div.attr,
509 &dev_attr_pwm1_enable.attr, 496 &dev_attr_pwm1_enable.attr,
@@ -521,42 +508,21 @@ static struct attribute_group max6650_attr_grp = {
521 .is_visible = max6650_attrs_visible, 508 .is_visible = max6650_attrs_visible,
522}; 509};
523 510
511static struct attribute *max6651_attrs[] = {
512 &sensor_dev_attr_fan2_input.dev_attr.attr,
513 &sensor_dev_attr_fan3_input.dev_attr.attr,
514 &sensor_dev_attr_fan4_input.dev_attr.attr,
515 NULL
516};
517
518static const struct attribute_group max6651_attr_grp = {
519 .attrs = max6651_attrs,
520};
521
524/* 522/*
525 * Real code 523 * Real code
526 */ 524 */
527 525
528/* Return 0 if detection is successful, -ENODEV otherwise */
529static int max6650_detect(struct i2c_client *client,
530 struct i2c_board_info *info)
531{
532 struct i2c_adapter *adapter = client->adapter;
533 int address = client->addr;
534
535 dev_dbg(&adapter->dev, "max6650_detect called\n");
536
537 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
538 dev_dbg(&adapter->dev, "max6650: I2C bus doesn't support "
539 "byte read mode, skipping.\n");
540 return -ENODEV;
541 }
542
543 if (((i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG) & 0xC0)
544 ||(i2c_smbus_read_byte_data(client, MAX6650_REG_GPIO_STAT) & 0xE0)
545 ||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN) & 0xE0)
546 ||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM) & 0xE0)
547 ||(i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT) & 0xFC))) {
548 dev_dbg(&adapter->dev,
549 "max6650: detection failed at 0x%02x.\n", address);
550 return -ENODEV;
551 }
552
553 dev_info(&adapter->dev, "max6650: chip found at 0x%02x.\n", address);
554
555 strlcpy(info->type, "max6650", I2C_NAME_SIZE);
556
557 return 0;
558}
559
560static int max6650_probe(struct i2c_client *client, 526static int max6650_probe(struct i2c_client *client,
561 const struct i2c_device_id *id) 527 const struct i2c_device_id *id)
562{ 528{
@@ -570,6 +536,7 @@ static int max6650_probe(struct i2c_client *client,
570 536
571 i2c_set_clientdata(client, data); 537 i2c_set_clientdata(client, data);
572 mutex_init(&data->update_lock); 538 mutex_init(&data->update_lock);
539 data->nr_fans = id->driver_data;
573 540
574 /* 541 /*
575 * Initialize the max6650 chip 542 * Initialize the max6650 chip
@@ -581,6 +548,12 @@ static int max6650_probe(struct i2c_client *client,
581 err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp); 548 err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
582 if (err) 549 if (err)
583 goto err_free; 550 goto err_free;
551 /* 3 additional fan inputs for the MAX6651 */
552 if (data->nr_fans == 4) {
553 err = sysfs_create_group(&client->dev.kobj, &max6651_attr_grp);
554 if (err)
555 goto err_remove;
556 }
584 557
585 data->hwmon_dev = hwmon_device_register(&client->dev); 558 data->hwmon_dev = hwmon_device_register(&client->dev);
586 if (!IS_ERR(data->hwmon_dev)) 559 if (!IS_ERR(data->hwmon_dev))
@@ -588,6 +561,9 @@ static int max6650_probe(struct i2c_client *client,
588 561
589 err = PTR_ERR(data->hwmon_dev); 562 err = PTR_ERR(data->hwmon_dev);
590 dev_err(&client->dev, "error registering hwmon device.\n"); 563 dev_err(&client->dev, "error registering hwmon device.\n");
564 if (data->nr_fans == 4)
565 sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
566err_remove:
591 sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); 567 sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
592err_free: 568err_free:
593 kfree(data); 569 kfree(data);
@@ -598,8 +574,10 @@ static int max6650_remove(struct i2c_client *client)
598{ 574{
599 struct max6650_data *data = i2c_get_clientdata(client); 575 struct max6650_data *data = i2c_get_clientdata(client);
600 576
601 sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
602 hwmon_device_unregister(data->hwmon_dev); 577 hwmon_device_unregister(data->hwmon_dev);
578 if (data->nr_fans == 4)
579 sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
580 sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
603 kfree(data); 581 kfree(data);
604 return 0; 582 return 0;
605} 583}
@@ -712,7 +690,7 @@ static struct max6650_data *max6650_update_device(struct device *dev)
712 MAX6650_REG_SPEED); 690 MAX6650_REG_SPEED);
713 data->config = i2c_smbus_read_byte_data(client, 691 data->config = i2c_smbus_read_byte_data(client,
714 MAX6650_REG_CONFIG); 692 MAX6650_REG_CONFIG);
715 for (i = 0; i < 4; i++) { 693 for (i = 0; i < data->nr_fans; i++) {
716 data->tach[i] = i2c_smbus_read_byte_data(client, 694 data->tach[i] = i2c_smbus_read_byte_data(client,
717 tach_reg[i]); 695 tach_reg[i]);
718 } 696 }
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index 98799bab69ce..354770ed3186 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -707,6 +707,7 @@ do { \
707 struct sensor_device_attribute *a \ 707 struct sensor_device_attribute *a \
708 = &data->_type##s[data->num_##_type##s].attribute; \ 708 = &data->_type##s[data->num_##_type##s].attribute; \
709 BUG_ON(data->num_attributes >= data->max_attributes); \ 709 BUG_ON(data->num_attributes >= data->max_attributes); \
710 sysfs_attr_init(&a->dev_attr.attr); \
710 a->dev_attr.attr.name = _name; \ 711 a->dev_attr.attr.name = _name; \
711 a->dev_attr.attr.mode = _mode; \ 712 a->dev_attr.attr.mode = _mode; \
712 a->dev_attr.show = _show; \ 713 a->dev_attr.show = _show; \
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 9a51dcca9b0d..020c87273ea1 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -52,6 +52,9 @@
52#define SCH5627_COMPANY_ID 0x5c 52#define SCH5627_COMPANY_ID 0x5c
53#define SCH5627_PRIMARY_ID 0xa0 53#define SCH5627_PRIMARY_ID 0xa0
54 54
55#define SCH5627_CMD_READ 0x02
56#define SCH5627_CMD_WRITE 0x03
57
55#define SCH5627_REG_BUILD_CODE 0x39 58#define SCH5627_REG_BUILD_CODE 0x39
56#define SCH5627_REG_BUILD_ID 0x3a 59#define SCH5627_REG_BUILD_ID 0x3a
57#define SCH5627_REG_HWMON_ID 0x3c 60#define SCH5627_REG_HWMON_ID 0x3c
@@ -94,11 +97,13 @@ static const char * const SCH5627_IN_LABELS[SCH5627_NO_IN] = {
94struct sch5627_data { 97struct sch5627_data {
95 unsigned short addr; 98 unsigned short addr;
96 struct device *hwmon_dev; 99 struct device *hwmon_dev;
100 u8 control;
97 u8 temp_max[SCH5627_NO_TEMPS]; 101 u8 temp_max[SCH5627_NO_TEMPS];
98 u8 temp_crit[SCH5627_NO_TEMPS]; 102 u8 temp_crit[SCH5627_NO_TEMPS];
99 u16 fan_min[SCH5627_NO_FANS]; 103 u16 fan_min[SCH5627_NO_FANS];
100 104
101 struct mutex update_lock; 105 struct mutex update_lock;
106 unsigned long last_battery; /* In jiffies */
102 char valid; /* !=0 if following fields are valid */ 107 char valid; /* !=0 if following fields are valid */
103 unsigned long last_updated; /* In jiffies */ 108 unsigned long last_updated; /* In jiffies */
104 u16 temp[SCH5627_NO_TEMPS]; 109 u16 temp[SCH5627_NO_TEMPS];
@@ -140,7 +145,7 @@ static inline void superio_exit(int base)
140 release_region(base, 2); 145 release_region(base, 2);
141} 146}
142 147
143static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg) 148static int sch5627_send_cmd(struct sch5627_data *data, u8 cmd, u16 reg, u8 v)
144{ 149{
145 u8 val; 150 u8 val;
146 int i; 151 int i;
@@ -163,10 +168,14 @@ static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg)
163 outb(0x80, data->addr + 3); 168 outb(0x80, data->addr + 3);
164 169
165 /* Write Request Packet Header */ 170 /* Write Request Packet Header */
166 outb(0x02, data->addr + 4); /* Access Type: VREG read */ 171 outb(cmd, data->addr + 4); /* VREG Access Type read:0x02 write:0x03 */
167 outb(0x01, data->addr + 5); /* # of Entries: 1 Byte (8-bit) */ 172 outb(0x01, data->addr + 5); /* # of Entries: 1 Byte (8-bit) */
168 outb(0x04, data->addr + 2); /* Mailbox AP to first data entry loc. */ 173 outb(0x04, data->addr + 2); /* Mailbox AP to first data entry loc. */
169 174
175 /* Write Value field */
176 if (cmd == SCH5627_CMD_WRITE)
177 outb(v, data->addr + 4);
178
170 /* Write Address field */ 179 /* Write Address field */
171 outb(reg & 0xff, data->addr + 6); 180 outb(reg & 0xff, data->addr + 6);
172 outb(reg >> 8, data->addr + 7); 181 outb(reg >> 8, data->addr + 7);
@@ -224,8 +233,22 @@ static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg)
224 * But if we do that things don't work, so let's not. 233 * But if we do that things don't work, so let's not.
225 */ 234 */
226 235
227 /* Read Data from Mailbox */ 236 /* Read Value field */
228 return inb(data->addr + 4); 237 if (cmd == SCH5627_CMD_READ)
238 return inb(data->addr + 4);
239
240 return 0;
241}
242
243static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg)
244{
245 return sch5627_send_cmd(data, SCH5627_CMD_READ, reg, 0);
246}
247
248static int sch5627_write_virtual_reg(struct sch5627_data *data,
249 u16 reg, u8 val)
250{
251 return sch5627_send_cmd(data, SCH5627_CMD_WRITE, reg, val);
229} 252}
230 253
231static int sch5627_read_virtual_reg16(struct sch5627_data *data, u16 reg) 254static int sch5627_read_virtual_reg16(struct sch5627_data *data, u16 reg)
@@ -272,6 +295,13 @@ static struct sch5627_data *sch5627_update_device(struct device *dev)
272 295
273 mutex_lock(&data->update_lock); 296 mutex_lock(&data->update_lock);
274 297
298 /* Trigger a Vbat voltage measurement every 5 minutes */
299 if (time_after(jiffies, data->last_battery + 300 * HZ)) {
300 sch5627_write_virtual_reg(data, SCH5627_REG_CTRL,
301 data->control | 0x10);
302 data->last_battery = jiffies;
303 }
304
275 /* Cache the values for 1 second */ 305 /* Cache the values for 1 second */
276 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { 306 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
277 for (i = 0; i < SCH5627_NO_TEMPS; i++) { 307 for (i = 0; i < SCH5627_NO_TEMPS; i++) {
@@ -696,11 +726,17 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
696 err = val; 726 err = val;
697 goto error; 727 goto error;
698 } 728 }
699 if (!(val & 0x01)) { 729 data->control = val;
730 if (!(data->control & 0x01)) {
700 pr_err("hardware monitoring not enabled\n"); 731 pr_err("hardware monitoring not enabled\n");
701 err = -ENODEV; 732 err = -ENODEV;
702 goto error; 733 goto error;
703 } 734 }
735 /* Trigger a Vbat voltage measurement, so that we get a valid reading
736 the first time we read Vbat */
737 sch5627_write_virtual_reg(data, SCH5627_REG_CTRL,
738 data->control | 0x10);
739 data->last_battery = jiffies;
704 740
705 /* 741 /*
706 * Read limits, we do this only once as reading a register on 742 * Read limits, we do this only once as reading a register on
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 1f36c635d933..27a62711e0a6 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -258,7 +258,7 @@ static int __devinit env_probe(struct platform_device *op)
258 goto out_sysfs_remove_group; 258 goto out_sysfs_remove_group;
259 } 259 }
260 260
261 dev_set_drvdata(&op->dev, p); 261 platform_set_drvdata(op, p);
262 err = 0; 262 err = 0;
263 263
264out: 264out:
@@ -277,7 +277,7 @@ out_free:
277 277
278static int __devexit env_remove(struct platform_device *op) 278static int __devexit env_remove(struct platform_device *op)
279{ 279{
280 struct env *p = dev_get_drvdata(&op->dev); 280 struct env *p = platform_get_drvdata(op);
281 281
282 if (p) { 282 if (p) {
283 sysfs_remove_group(&op->dev.kobj, &env_group); 283 sysfs_remove_group(&op->dev.kobj, &env_group);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index fee1a2613861..1b46a9d9f907 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -49,7 +49,6 @@
49#include <linux/init.h> 49#include <linux/init.h>
50#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/platform_device.h> 51#include <linux/platform_device.h>
52#include <linux/mfd/core.h>
53#include <linux/i2c.h> 52#include <linux/i2c.h>
54#include <linux/interrupt.h> 53#include <linux/interrupt.h>
55#include <linux/wait.h> 54#include <linux/wait.h>
@@ -306,7 +305,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
306 return -EIO; 305 return -EIO;
307 } 306 }
308 307
309 pdata = mfd_get_data(pdev); 308 pdata = pdev->dev.platform_data;
310 if (pdata) { 309 if (pdata) {
311 i2c->regstep = pdata->regstep; 310 i2c->regstep = pdata->regstep;
312 i2c->clock_khz = pdata->clock_khz; 311 i2c->clock_khz = pdata->clock_khz;
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index e9d5ff4d1496..4bb68f35caf2 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -34,7 +34,6 @@
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/mfd/core.h>
38#include <linux/i2c.h> 37#include <linux/i2c.h>
39#include <linux/interrupt.h> 38#include <linux/interrupt.h>
40#include <linux/wait.h> 39#include <linux/wait.h>
@@ -705,7 +704,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
705 if (irq < 0) 704 if (irq < 0)
706 goto resource_missing; 705 goto resource_missing;
707 706
708 pdata = mfd_get_data(pdev); 707 pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
709 if (!pdata) 708 if (!pdata)
710 return -EINVAL; 709 return -EINVAL;
711 710
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index a5ec5a7cb381..6e5123b1d341 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1781,7 +1781,8 @@ static int ide_cd_probe(ide_drive_t *drive)
1781 1781
1782 ide_cd_read_toc(drive, &sense); 1782 ide_cd_read_toc(drive, &sense);
1783 g->fops = &idecd_ops; 1783 g->fops = &idecd_ops;
1784 g->flags |= GENHD_FL_REMOVABLE; 1784 g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
1785 g->events = DISK_EVENT_MEDIA_CHANGE;
1785 add_disk(g); 1786 add_disk(g);
1786 return 0; 1787 return 0;
1787 1788
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 6e35eccc9caa..0f9a84c1046a 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -2,6 +2,7 @@ menuconfig INFINIBAND
2 tristate "InfiniBand support" 2 tristate "InfiniBand support"
3 depends on PCI || BROKEN 3 depends on PCI || BROKEN
4 depends on HAS_IOMEM 4 depends on HAS_IOMEM
5 depends on NET
5 ---help--- 6 ---help---
6 Core support for InfiniBand (IB). Make sure to also select 7 Core support for InfiniBand (IB). Make sure to also select
7 any protocols you wish to use as well as drivers for your 8 any protocols you wish to use as well as drivers for your
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index cb1ab3ea4998..c8bbaef1becb 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
8 $(user_access-y) 8 $(user_access-y)
9 9
10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
11 device.o fmr_pool.o cache.o 11 device.o fmr_pool.o cache.o netlink.o
12ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o 12ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
13 13
14ib_mad-y := mad.o smi.o agent.o mad_rmpp.o 14ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f804e28e1ebb..f62f52fb9ece 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = {
3639 .release = cm_release_port_obj 3639 .release = cm_release_port_obj
3640}; 3640};
3641 3641
3642static char *cm_devnode(struct device *dev, mode_t *mode)
3643{
3644 *mode = 0666;
3645 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3646}
3647
3642struct class cm_class = { 3648struct class cm_class = {
3649 .owner = THIS_MODULE,
3643 .name = "infiniband_cm", 3650 .name = "infiniband_cm",
3651 .devnode = cm_devnode,
3644}; 3652};
3645EXPORT_SYMBOL(cm_class); 3653EXPORT_SYMBOL(cm_class);
3646 3654
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 99dde874fbbd..b6a33b3c516d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -47,6 +47,7 @@
47 47
48#include <rdma/rdma_cm.h> 48#include <rdma/rdma_cm.h>
49#include <rdma/rdma_cm_ib.h> 49#include <rdma/rdma_cm_ib.h>
50#include <rdma/rdma_netlink.h>
50#include <rdma/ib_cache.h> 51#include <rdma/ib_cache.h>
51#include <rdma/ib_cm.h> 52#include <rdma/ib_cm.h>
52#include <rdma/ib_sa.h> 53#include <rdma/ib_sa.h>
@@ -89,20 +90,6 @@ struct cma_device {
89 struct list_head id_list; 90 struct list_head id_list;
90}; 91};
91 92
92enum cma_state {
93 CMA_IDLE,
94 CMA_ADDR_QUERY,
95 CMA_ADDR_RESOLVED,
96 CMA_ROUTE_QUERY,
97 CMA_ROUTE_RESOLVED,
98 CMA_CONNECT,
99 CMA_DISCONNECT,
100 CMA_ADDR_BOUND,
101 CMA_LISTEN,
102 CMA_DEVICE_REMOVAL,
103 CMA_DESTROYING
104};
105
106struct rdma_bind_list { 93struct rdma_bind_list {
107 struct idr *ps; 94 struct idr *ps;
108 struct hlist_head owners; 95 struct hlist_head owners;
@@ -126,7 +113,7 @@ struct rdma_id_private {
126 struct list_head mc_list; 113 struct list_head mc_list;
127 114
128 int internal_id; 115 int internal_id;
129 enum cma_state state; 116 enum rdma_cm_state state;
130 spinlock_t lock; 117 spinlock_t lock;
131 struct mutex qp_mutex; 118 struct mutex qp_mutex;
132 119
@@ -146,6 +133,7 @@ struct rdma_id_private {
146 u32 seq_num; 133 u32 seq_num;
147 u32 qkey; 134 u32 qkey;
148 u32 qp_num; 135 u32 qp_num;
136 pid_t owner;
149 u8 srq; 137 u8 srq;
150 u8 tos; 138 u8 tos;
151 u8 reuseaddr; 139 u8 reuseaddr;
@@ -165,8 +153,8 @@ struct cma_multicast {
165struct cma_work { 153struct cma_work {
166 struct work_struct work; 154 struct work_struct work;
167 struct rdma_id_private *id; 155 struct rdma_id_private *id;
168 enum cma_state old_state; 156 enum rdma_cm_state old_state;
169 enum cma_state new_state; 157 enum rdma_cm_state new_state;
170 struct rdma_cm_event event; 158 struct rdma_cm_event event;
171}; 159};
172 160
@@ -217,7 +205,7 @@ struct sdp_hah {
217#define CMA_VERSION 0x00 205#define CMA_VERSION 0x00
218#define SDP_MAJ_VERSION 0x2 206#define SDP_MAJ_VERSION 0x2
219 207
220static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 208static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
221{ 209{
222 unsigned long flags; 210 unsigned long flags;
223 int ret; 211 int ret;
@@ -229,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
229} 217}
230 218
231static int cma_comp_exch(struct rdma_id_private *id_priv, 219static int cma_comp_exch(struct rdma_id_private *id_priv,
232 enum cma_state comp, enum cma_state exch) 220 enum rdma_cm_state comp, enum rdma_cm_state exch)
233{ 221{
234 unsigned long flags; 222 unsigned long flags;
235 int ret; 223 int ret;
@@ -241,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
241 return ret; 229 return ret;
242} 230}
243 231
244static enum cma_state cma_exch(struct rdma_id_private *id_priv, 232static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
245 enum cma_state exch) 233 enum rdma_cm_state exch)
246{ 234{
247 unsigned long flags; 235 unsigned long flags;
248 enum cma_state old; 236 enum rdma_cm_state old;
249 237
250 spin_lock_irqsave(&id_priv->lock, flags); 238 spin_lock_irqsave(&id_priv->lock, flags);
251 old = id_priv->state; 239 old = id_priv->state;
@@ -279,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
279 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); 267 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
280} 268}
281 269
282static inline int cma_is_ud_ps(enum rdma_port_space ps)
283{
284 return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
285}
286
287static void cma_attach_to_dev(struct rdma_id_private *id_priv, 270static void cma_attach_to_dev(struct rdma_id_private *id_priv,
288 struct cma_device *cma_dev) 271 struct cma_device *cma_dev)
289{ 272{
@@ -413,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
413} 396}
414 397
415static int cma_disable_callback(struct rdma_id_private *id_priv, 398static int cma_disable_callback(struct rdma_id_private *id_priv,
416 enum cma_state state) 399 enum rdma_cm_state state)
417{ 400{
418 mutex_lock(&id_priv->handler_mutex); 401 mutex_lock(&id_priv->handler_mutex);
419 if (id_priv->state != state) { 402 if (id_priv->state != state) {
@@ -429,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv)
429} 412}
430 413
431struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 414struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
432 void *context, enum rdma_port_space ps) 415 void *context, enum rdma_port_space ps,
416 enum ib_qp_type qp_type)
433{ 417{
434 struct rdma_id_private *id_priv; 418 struct rdma_id_private *id_priv;
435 419
@@ -437,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
437 if (!id_priv) 421 if (!id_priv)
438 return ERR_PTR(-ENOMEM); 422 return ERR_PTR(-ENOMEM);
439 423
440 id_priv->state = CMA_IDLE; 424 id_priv->owner = task_pid_nr(current);
425 id_priv->state = RDMA_CM_IDLE;
441 id_priv->id.context = context; 426 id_priv->id.context = context;
442 id_priv->id.event_handler = event_handler; 427 id_priv->id.event_handler = event_handler;
443 id_priv->id.ps = ps; 428 id_priv->id.ps = ps;
429 id_priv->id.qp_type = qp_type;
444 spin_lock_init(&id_priv->lock); 430 spin_lock_init(&id_priv->lock);
445 mutex_init(&id_priv->qp_mutex); 431 mutex_init(&id_priv->qp_mutex);
446 init_completion(&id_priv->comp); 432 init_completion(&id_priv->comp);
@@ -508,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
508 if (IS_ERR(qp)) 494 if (IS_ERR(qp))
509 return PTR_ERR(qp); 495 return PTR_ERR(qp);
510 496
511 if (cma_is_ud_ps(id_priv->id.ps)) 497 if (id->qp_type == IB_QPT_UD)
512 ret = cma_init_ud_qp(id_priv, qp); 498 ret = cma_init_ud_qp(id_priv, qp);
513 else 499 else
514 ret = cma_init_conn_qp(id_priv, qp); 500 ret = cma_init_conn_qp(id_priv, qp);
@@ -636,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
636 qp_attr->port_num = id_priv->id.port_num; 622 qp_attr->port_num = id_priv->id.port_num;
637 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 623 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
638 624
639 if (cma_is_ud_ps(id_priv->id.ps)) { 625 if (id_priv->id.qp_type == IB_QPT_UD) {
640 ret = cma_set_qkey(id_priv); 626 ret = cma_set_qkey(id_priv);
641 if (ret) 627 if (ret)
642 return ret; 628 return ret;
@@ -659,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
659 id_priv = container_of(id, struct rdma_id_private, id); 645 id_priv = container_of(id, struct rdma_id_private, id);
660 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 646 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
661 case RDMA_TRANSPORT_IB: 647 case RDMA_TRANSPORT_IB:
662 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) 648 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
663 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 649 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
664 else 650 else
665 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 651 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
@@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
858} 844}
859 845
860static void cma_cancel_operation(struct rdma_id_private *id_priv, 846static void cma_cancel_operation(struct rdma_id_private *id_priv,
861 enum cma_state state) 847 enum rdma_cm_state state)
862{ 848{
863 switch (state) { 849 switch (state) {
864 case CMA_ADDR_QUERY: 850 case RDMA_CM_ADDR_QUERY:
865 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 851 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
866 break; 852 break;
867 case CMA_ROUTE_QUERY: 853 case RDMA_CM_ROUTE_QUERY:
868 cma_cancel_route(id_priv); 854 cma_cancel_route(id_priv);
869 break; 855 break;
870 case CMA_LISTEN: 856 case RDMA_CM_LISTEN:
871 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 857 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
872 && !id_priv->cma_dev) 858 && !id_priv->cma_dev)
873 cma_cancel_listens(id_priv); 859 cma_cancel_listens(id_priv);
@@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
918void rdma_destroy_id(struct rdma_cm_id *id) 904void rdma_destroy_id(struct rdma_cm_id *id)
919{ 905{
920 struct rdma_id_private *id_priv; 906 struct rdma_id_private *id_priv;
921 enum cma_state state; 907 enum rdma_cm_state state;
922 908
923 id_priv = container_of(id, struct rdma_id_private, id); 909 id_priv = container_of(id, struct rdma_id_private, id);
924 state = cma_exch(id_priv, CMA_DESTROYING); 910 state = cma_exch(id_priv, RDMA_CM_DESTROYING);
925 cma_cancel_operation(id_priv, state); 911 cma_cancel_operation(id_priv, state);
926 912
927 /* 913 /*
@@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1015 int ret = 0; 1001 int ret = 0;
1016 1002
1017 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1003 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
1018 cma_disable_callback(id_priv, CMA_CONNECT)) || 1004 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
1019 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1005 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
1020 cma_disable_callback(id_priv, CMA_DISCONNECT))) 1006 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
1021 return 0; 1007 return 0;
1022 1008
1023 memset(&event, 0, sizeof event); 1009 memset(&event, 0, sizeof event);
@@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1048 event.status = -ETIMEDOUT; /* fall through */ 1034 event.status = -ETIMEDOUT; /* fall through */
1049 case IB_CM_DREQ_RECEIVED: 1035 case IB_CM_DREQ_RECEIVED:
1050 case IB_CM_DREP_RECEIVED: 1036 case IB_CM_DREP_RECEIVED:
1051 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 1037 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
1038 RDMA_CM_DISCONNECT))
1052 goto out; 1039 goto out;
1053 event.event = RDMA_CM_EVENT_DISCONNECTED; 1040 event.event = RDMA_CM_EVENT_DISCONNECTED;
1054 break; 1041 break;
@@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1075 if (ret) { 1062 if (ret) {
1076 /* Destroy the CM ID by returning a non-zero value. */ 1063 /* Destroy the CM ID by returning a non-zero value. */
1077 id_priv->cm_id.ib = NULL; 1064 id_priv->cm_id.ib = NULL;
1078 cma_exch(id_priv, CMA_DESTROYING); 1065 cma_exch(id_priv, RDMA_CM_DESTROYING);
1079 mutex_unlock(&id_priv->handler_mutex); 1066 mutex_unlock(&id_priv->handler_mutex);
1080 rdma_destroy_id(&id_priv->id); 1067 rdma_destroy_id(&id_priv->id);
1081 return ret; 1068 return ret;
@@ -1101,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1101 goto err; 1088 goto err;
1102 1089
1103 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1090 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1104 listen_id->ps); 1091 listen_id->ps, ib_event->param.req_rcvd.qp_type);
1105 if (IS_ERR(id)) 1092 if (IS_ERR(id))
1106 goto err; 1093 goto err;
1107 1094
@@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1132 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1119 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1133 1120
1134 id_priv = container_of(id, struct rdma_id_private, id); 1121 id_priv = container_of(id, struct rdma_id_private, id);
1135 id_priv->state = CMA_CONNECT; 1122 id_priv->state = RDMA_CM_CONNECT;
1136 return id_priv; 1123 return id_priv;
1137 1124
1138destroy_id: 1125destroy_id:
@@ -1152,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1152 int ret; 1139 int ret;
1153 1140
1154 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1141 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1155 listen_id->ps); 1142 listen_id->ps, IB_QPT_UD);
1156 if (IS_ERR(id)) 1143 if (IS_ERR(id))
1157 return NULL; 1144 return NULL;
1158 1145
@@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1172 } 1159 }
1173 1160
1174 id_priv = container_of(id, struct rdma_id_private, id); 1161 id_priv = container_of(id, struct rdma_id_private, id);
1175 id_priv->state = CMA_CONNECT; 1162 id_priv->state = RDMA_CM_CONNECT;
1176 return id_priv; 1163 return id_priv;
1177err: 1164err:
1178 rdma_destroy_id(id); 1165 rdma_destroy_id(id);
@@ -1201,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1201 int offset, ret; 1188 int offset, ret;
1202 1189
1203 listen_id = cm_id->context; 1190 listen_id = cm_id->context;
1204 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1191 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
1205 return -ECONNABORTED; 1192 return -ECONNABORTED;
1206 1193
1207 memset(&event, 0, sizeof event); 1194 memset(&event, 0, sizeof event);
1208 offset = cma_user_data_offset(listen_id->id.ps); 1195 offset = cma_user_data_offset(listen_id->id.ps);
1209 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1196 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1210 if (cma_is_ud_ps(listen_id->id.ps)) { 1197 if (listen_id->id.qp_type == IB_QPT_UD) {
1211 conn_id = cma_new_udp_id(&listen_id->id, ib_event); 1198 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1212 event.param.ud.private_data = ib_event->private_data + offset; 1199 event.param.ud.private_data = ib_event->private_data + offset;
1213 event.param.ud.private_data_len = 1200 event.param.ud.private_data_len =
@@ -1243,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1243 * while we're accessing the cm_id. 1230 * while we're accessing the cm_id.
1244 */ 1231 */
1245 mutex_lock(&lock); 1232 mutex_lock(&lock);
1246 if (cma_comp(conn_id, CMA_CONNECT) && 1233 if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
1247 !cma_is_ud_ps(conn_id->id.ps))
1248 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1234 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1249 mutex_unlock(&lock); 1235 mutex_unlock(&lock);
1250 mutex_unlock(&conn_id->handler_mutex); 1236 mutex_unlock(&conn_id->handler_mutex);
@@ -1257,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1257 conn_id->cm_id.ib = NULL; 1243 conn_id->cm_id.ib = NULL;
1258 1244
1259release_conn_id: 1245release_conn_id:
1260 cma_exch(conn_id, CMA_DESTROYING); 1246 cma_exch(conn_id, RDMA_CM_DESTROYING);
1261 mutex_unlock(&conn_id->handler_mutex); 1247 mutex_unlock(&conn_id->handler_mutex);
1262 rdma_destroy_id(&conn_id->id); 1248 rdma_destroy_id(&conn_id->id);
1263 1249
@@ -1328,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1328 struct sockaddr_in *sin; 1314 struct sockaddr_in *sin;
1329 int ret = 0; 1315 int ret = 0;
1330 1316
1331 if (cma_disable_callback(id_priv, CMA_CONNECT)) 1317 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
1332 return 0; 1318 return 0;
1333 1319
1334 memset(&event, 0, sizeof event); 1320 memset(&event, 0, sizeof event);
@@ -1371,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1371 if (ret) { 1357 if (ret) {
1372 /* Destroy the CM ID by returning a non-zero value. */ 1358 /* Destroy the CM ID by returning a non-zero value. */
1373 id_priv->cm_id.iw = NULL; 1359 id_priv->cm_id.iw = NULL;
1374 cma_exch(id_priv, CMA_DESTROYING); 1360 cma_exch(id_priv, RDMA_CM_DESTROYING);
1375 mutex_unlock(&id_priv->handler_mutex); 1361 mutex_unlock(&id_priv->handler_mutex);
1376 rdma_destroy_id(&id_priv->id); 1362 rdma_destroy_id(&id_priv->id);
1377 return ret; 1363 return ret;
@@ -1393,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1393 struct ib_device_attr attr; 1379 struct ib_device_attr attr;
1394 1380
1395 listen_id = cm_id->context; 1381 listen_id = cm_id->context;
1396 if (cma_disable_callback(listen_id, CMA_LISTEN)) 1382 if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
1397 return -ECONNABORTED; 1383 return -ECONNABORTED;
1398 1384
1399 /* Create a new RDMA id for the new IW CM ID */ 1385 /* Create a new RDMA id for the new IW CM ID */
1400 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1386 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1401 listen_id->id.context, 1387 listen_id->id.context,
1402 RDMA_PS_TCP); 1388 RDMA_PS_TCP, IB_QPT_RC);
1403 if (IS_ERR(new_cm_id)) { 1389 if (IS_ERR(new_cm_id)) {
1404 ret = -ENOMEM; 1390 ret = -ENOMEM;
1405 goto out; 1391 goto out;
1406 } 1392 }
1407 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1393 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1408 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1394 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1409 conn_id->state = CMA_CONNECT; 1395 conn_id->state = RDMA_CM_CONNECT;
1410 1396
1411 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); 1397 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
1412 if (!dev) { 1398 if (!dev) {
@@ -1461,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1461 if (ret) { 1447 if (ret) {
1462 /* User wants to destroy the CM ID */ 1448 /* User wants to destroy the CM ID */
1463 conn_id->cm_id.iw = NULL; 1449 conn_id->cm_id.iw = NULL;
1464 cma_exch(conn_id, CMA_DESTROYING); 1450 cma_exch(conn_id, RDMA_CM_DESTROYING);
1465 mutex_unlock(&conn_id->handler_mutex); 1451 mutex_unlock(&conn_id->handler_mutex);
1466 cma_deref_id(conn_id); 1452 cma_deref_id(conn_id);
1467 rdma_destroy_id(&conn_id->id); 1453 rdma_destroy_id(&conn_id->id);
@@ -1548,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1548 struct rdma_cm_id *id; 1534 struct rdma_cm_id *id;
1549 int ret; 1535 int ret;
1550 1536
1551 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); 1537 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
1538 id_priv->id.qp_type);
1552 if (IS_ERR(id)) 1539 if (IS_ERR(id))
1553 return; 1540 return;
1554 1541
1555 dev_id_priv = container_of(id, struct rdma_id_private, id); 1542 dev_id_priv = container_of(id, struct rdma_id_private, id);
1556 1543
1557 dev_id_priv->state = CMA_ADDR_BOUND; 1544 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
1558 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1545 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1559 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1546 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
1560 1547
@@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1601 route->num_paths = 1; 1588 route->num_paths = 1;
1602 *route->path_rec = *path_rec; 1589 *route->path_rec = *path_rec;
1603 } else { 1590 } else {
1604 work->old_state = CMA_ROUTE_QUERY; 1591 work->old_state = RDMA_CM_ROUTE_QUERY;
1605 work->new_state = CMA_ADDR_RESOLVED; 1592 work->new_state = RDMA_CM_ADDR_RESOLVED;
1606 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1593 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1607 work->event.status = status; 1594 work->event.status = status;
1608 } 1595 }
@@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work)
1660 goto out; 1647 goto out;
1661 1648
1662 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1649 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1663 cma_exch(id_priv, CMA_DESTROYING); 1650 cma_exch(id_priv, RDMA_CM_DESTROYING);
1664 destroy = 1; 1651 destroy = 1;
1665 } 1652 }
1666out: 1653out:
@@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)
1678 int destroy = 0; 1665 int destroy = 0;
1679 1666
1680 mutex_lock(&id_priv->handler_mutex); 1667 mutex_lock(&id_priv->handler_mutex);
1681 if (id_priv->state == CMA_DESTROYING || 1668 if (id_priv->state == RDMA_CM_DESTROYING ||
1682 id_priv->state == CMA_DEVICE_REMOVAL) 1669 id_priv->state == RDMA_CM_DEVICE_REMOVAL)
1683 goto out; 1670 goto out;
1684 1671
1685 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1672 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1686 cma_exch(id_priv, CMA_DESTROYING); 1673 cma_exch(id_priv, RDMA_CM_DESTROYING);
1687 destroy = 1; 1674 destroy = 1;
1688 } 1675 }
1689 1676
@@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1707 1694
1708 work->id = id_priv; 1695 work->id = id_priv;
1709 INIT_WORK(&work->work, cma_work_handler); 1696 INIT_WORK(&work->work, cma_work_handler);
1710 work->old_state = CMA_ROUTE_QUERY; 1697 work->old_state = RDMA_CM_ROUTE_QUERY;
1711 work->new_state = CMA_ROUTE_RESOLVED; 1698 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1712 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1699 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1713 1700
1714 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 1701 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
@@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1737 int ret; 1724 int ret;
1738 1725
1739 id_priv = container_of(id, struct rdma_id_private, id); 1726 id_priv = container_of(id, struct rdma_id_private, id);
1740 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1727 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
1728 RDMA_CM_ROUTE_RESOLVED))
1741 return -EINVAL; 1729 return -EINVAL;
1742 1730
1743 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, 1731 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
@@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
1750 id->route.num_paths = num_paths; 1738 id->route.num_paths = num_paths;
1751 return 0; 1739 return 0;
1752err: 1740err:
1753 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1741 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
1754 return ret; 1742 return ret;
1755} 1743}
1756EXPORT_SYMBOL(rdma_set_ib_paths); 1744EXPORT_SYMBOL(rdma_set_ib_paths);
@@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1765 1753
1766 work->id = id_priv; 1754 work->id = id_priv;
1767 INIT_WORK(&work->work, cma_work_handler); 1755 INIT_WORK(&work->work, cma_work_handler);
1768 work->old_state = CMA_ROUTE_QUERY; 1756 work->old_state = RDMA_CM_ROUTE_QUERY;
1769 work->new_state = CMA_ROUTE_RESOLVED; 1757 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1770 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1758 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1771 queue_work(cma_wq, &work->work); 1759 queue_work(cma_wq, &work->work);
1772 return 0; 1760 return 0;
@@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1830 goto err2; 1818 goto err2;
1831 } 1819 }
1832 1820
1833 work->old_state = CMA_ROUTE_QUERY; 1821 work->old_state = RDMA_CM_ROUTE_QUERY;
1834 work->new_state = CMA_ROUTE_RESOLVED; 1822 work->new_state = RDMA_CM_ROUTE_RESOLVED;
1835 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1823 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1836 work->event.status = 0; 1824 work->event.status = 0;
1837 1825
@@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1853 int ret; 1841 int ret;
1854 1842
1855 id_priv = container_of(id, struct rdma_id_private, id); 1843 id_priv = container_of(id, struct rdma_id_private, id);
1856 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1844 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
1857 return -EINVAL; 1845 return -EINVAL;
1858 1846
1859 atomic_inc(&id_priv->refcount); 1847 atomic_inc(&id_priv->refcount);
@@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1882 1870
1883 return 0; 1871 return 0;
1884err: 1872err:
1885 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1873 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
1886 cma_deref_id(id_priv); 1874 cma_deref_id(id_priv);
1887 return ret; 1875 return ret;
1888} 1876}
@@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1941 1929
1942 memset(&event, 0, sizeof event); 1930 memset(&event, 0, sizeof event);
1943 mutex_lock(&id_priv->handler_mutex); 1931 mutex_lock(&id_priv->handler_mutex);
1944 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) 1932 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
1933 RDMA_CM_ADDR_RESOLVED))
1945 goto out; 1934 goto out;
1946 1935
1947 if (!status && !id_priv->cma_dev) 1936 if (!status && !id_priv->cma_dev)
1948 status = cma_acquire_dev(id_priv); 1937 status = cma_acquire_dev(id_priv);
1949 1938
1950 if (status) { 1939 if (status) {
1951 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1940 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
1941 RDMA_CM_ADDR_BOUND))
1952 goto out; 1942 goto out;
1953 event.event = RDMA_CM_EVENT_ADDR_ERROR; 1943 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1954 event.status = status; 1944 event.status = status;
@@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1959 } 1949 }
1960 1950
1961 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1951 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1962 cma_exch(id_priv, CMA_DESTROYING); 1952 cma_exch(id_priv, RDMA_CM_DESTROYING);
1963 mutex_unlock(&id_priv->handler_mutex); 1953 mutex_unlock(&id_priv->handler_mutex);
1964 cma_deref_id(id_priv); 1954 cma_deref_id(id_priv);
1965 rdma_destroy_id(&id_priv->id); 1955 rdma_destroy_id(&id_priv->id);
@@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
2004 1994
2005 work->id = id_priv; 1995 work->id = id_priv;
2006 INIT_WORK(&work->work, cma_work_handler); 1996 INIT_WORK(&work->work, cma_work_handler);
2007 work->old_state = CMA_ADDR_QUERY; 1997 work->old_state = RDMA_CM_ADDR_QUERY;
2008 work->new_state = CMA_ADDR_RESOLVED; 1998 work->new_state = RDMA_CM_ADDR_RESOLVED;
2009 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1999 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2010 queue_work(cma_wq, &work->work); 2000 queue_work(cma_wq, &work->work);
2011 return 0; 2001 return 0;
@@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2034 int ret; 2024 int ret;
2035 2025
2036 id_priv = container_of(id, struct rdma_id_private, id); 2026 id_priv = container_of(id, struct rdma_id_private, id);
2037 if (id_priv->state == CMA_IDLE) { 2027 if (id_priv->state == RDMA_CM_IDLE) {
2038 ret = cma_bind_addr(id, src_addr, dst_addr); 2028 ret = cma_bind_addr(id, src_addr, dst_addr);
2039 if (ret) 2029 if (ret)
2040 return ret; 2030 return ret;
2041 } 2031 }
2042 2032
2043 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 2033 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
2044 return -EINVAL; 2034 return -EINVAL;
2045 2035
2046 atomic_inc(&id_priv->refcount); 2036 atomic_inc(&id_priv->refcount);
@@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2056 2046
2057 return 0; 2047 return 0;
2058err: 2048err:
2059 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 2049 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
2060 cma_deref_id(id_priv); 2050 cma_deref_id(id_priv);
2061 return ret; 2051 return ret;
2062} 2052}
@@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2070 2060
2071 id_priv = container_of(id, struct rdma_id_private, id); 2061 id_priv = container_of(id, struct rdma_id_private, id);
2072 spin_lock_irqsave(&id_priv->lock, flags); 2062 spin_lock_irqsave(&id_priv->lock, flags);
2073 if (id_priv->state == CMA_IDLE) { 2063 if (id_priv->state == RDMA_CM_IDLE) {
2074 id_priv->reuseaddr = reuse; 2064 id_priv->reuseaddr = reuse;
2075 ret = 0; 2065 ret = 0;
2076 } else { 2066 } else {
@@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
2177 if (id_priv == cur_id) 2167 if (id_priv == cur_id)
2178 continue; 2168 continue;
2179 2169
2180 if ((cur_id->state == CMA_LISTEN) || 2170 if ((cur_id->state == RDMA_CM_LISTEN) ||
2181 !reuseaddr || !cur_id->reuseaddr) { 2171 !reuseaddr || !cur_id->reuseaddr) {
2182 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; 2172 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
2183 if (cma_any_addr(cur_addr)) 2173 if (cma_any_addr(cur_addr))
@@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
2280 int ret; 2270 int ret;
2281 2271
2282 id_priv = container_of(id, struct rdma_id_private, id); 2272 id_priv = container_of(id, struct rdma_id_private, id);
2283 if (id_priv->state == CMA_IDLE) { 2273 if (id_priv->state == RDMA_CM_IDLE) {
2284 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; 2274 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
2285 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); 2275 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
2286 if (ret) 2276 if (ret)
2287 return ret; 2277 return ret;
2288 } 2278 }
2289 2279
2290 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 2280 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
2291 return -EINVAL; 2281 return -EINVAL;
2292 2282
2293 if (id_priv->reuseaddr) { 2283 if (id_priv->reuseaddr) {
@@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
2319 return 0; 2309 return 0;
2320err: 2310err:
2321 id_priv->backlog = 0; 2311 id_priv->backlog = 0;
2322 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 2312 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
2323 return ret; 2313 return ret;
2324} 2314}
2325EXPORT_SYMBOL(rdma_listen); 2315EXPORT_SYMBOL(rdma_listen);
@@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2333 return -EAFNOSUPPORT; 2323 return -EAFNOSUPPORT;
2334 2324
2335 id_priv = container_of(id, struct rdma_id_private, id); 2325 id_priv = container_of(id, struct rdma_id_private, id);
2336 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2326 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
2337 return -EINVAL; 2327 return -EINVAL;
2338 2328
2339 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); 2329 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
@@ -2360,7 +2350,7 @@ err2:
2360 if (id_priv->cma_dev) 2350 if (id_priv->cma_dev)
2361 cma_release_dev(id_priv); 2351 cma_release_dev(id_priv);
2362err1: 2352err1:
2363 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2353 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
2364 return ret; 2354 return ret;
2365} 2355}
2366EXPORT_SYMBOL(rdma_bind_addr); 2356EXPORT_SYMBOL(rdma_bind_addr);
@@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2433 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2423 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2434 int ret = 0; 2424 int ret = 0;
2435 2425
2436 if (cma_disable_callback(id_priv, CMA_CONNECT)) 2426 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
2437 return 0; 2427 return 0;
2438 2428
2439 memset(&event, 0, sizeof event); 2429 memset(&event, 0, sizeof event);
@@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2479 if (ret) { 2469 if (ret) {
2480 /* Destroy the CM ID by returning a non-zero value. */ 2470 /* Destroy the CM ID by returning a non-zero value. */
2481 id_priv->cm_id.ib = NULL; 2471 id_priv->cm_id.ib = NULL;
2482 cma_exch(id_priv, CMA_DESTROYING); 2472 cma_exch(id_priv, RDMA_CM_DESTROYING);
2483 mutex_unlock(&id_priv->handler_mutex); 2473 mutex_unlock(&id_priv->handler_mutex);
2484 rdma_destroy_id(&id_priv->id); 2474 rdma_destroy_id(&id_priv->id);
2485 return ret; 2475 return ret;
@@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2645 int ret; 2635 int ret;
2646 2636
2647 id_priv = container_of(id, struct rdma_id_private, id); 2637 id_priv = container_of(id, struct rdma_id_private, id);
2648 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2638 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
2649 return -EINVAL; 2639 return -EINVAL;
2650 2640
2651 if (!id->qp) { 2641 if (!id->qp) {
@@ -2655,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2655 2645
2656 switch (rdma_node_get_transport(id->device->node_type)) { 2646 switch (rdma_node_get_transport(id->device->node_type)) {
2657 case RDMA_TRANSPORT_IB: 2647 case RDMA_TRANSPORT_IB:
2658 if (cma_is_ud_ps(id->ps)) 2648 if (id->qp_type == IB_QPT_UD)
2659 ret = cma_resolve_ib_udp(id_priv, conn_param); 2649 ret = cma_resolve_ib_udp(id_priv, conn_param);
2660 else 2650 else
2661 ret = cma_connect_ib(id_priv, conn_param); 2651 ret = cma_connect_ib(id_priv, conn_param);
@@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2672 2662
2673 return 0; 2663 return 0;
2674err: 2664err:
2675 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2665 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
2676 return ret; 2666 return ret;
2677} 2667}
2678EXPORT_SYMBOL(rdma_connect); 2668EXPORT_SYMBOL(rdma_connect);
@@ -2758,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2758 int ret; 2748 int ret;
2759 2749
2760 id_priv = container_of(id, struct rdma_id_private, id); 2750 id_priv = container_of(id, struct rdma_id_private, id);
2761 if (!cma_comp(id_priv, CMA_CONNECT)) 2751
2752 id_priv->owner = task_pid_nr(current);
2753
2754 if (!cma_comp(id_priv, RDMA_CM_CONNECT))
2762 return -EINVAL; 2755 return -EINVAL;
2763 2756
2764 if (!id->qp && conn_param) { 2757 if (!id->qp && conn_param) {
@@ -2768,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2768 2761
2769 switch (rdma_node_get_transport(id->device->node_type)) { 2762 switch (rdma_node_get_transport(id->device->node_type)) {
2770 case RDMA_TRANSPORT_IB: 2763 case RDMA_TRANSPORT_IB:
2771 if (cma_is_ud_ps(id->ps)) 2764 if (id->qp_type == IB_QPT_UD)
2772 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2765 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2773 conn_param->private_data, 2766 conn_param->private_data,
2774 conn_param->private_data_len); 2767 conn_param->private_data_len);
@@ -2829,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2829 2822
2830 switch (rdma_node_get_transport(id->device->node_type)) { 2823 switch (rdma_node_get_transport(id->device->node_type)) {
2831 case RDMA_TRANSPORT_IB: 2824 case RDMA_TRANSPORT_IB:
2832 if (cma_is_ud_ps(id->ps)) 2825 if (id->qp_type == IB_QPT_UD)
2833 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2826 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2834 private_data, private_data_len); 2827 private_data, private_data_len);
2835 else 2828 else
@@ -2887,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2887 int ret; 2880 int ret;
2888 2881
2889 id_priv = mc->id_priv; 2882 id_priv = mc->id_priv;
2890 if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 2883 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
2891 cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 2884 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
2892 return 0; 2885 return 0;
2893 2886
2894 mutex_lock(&id_priv->qp_mutex); 2887 mutex_lock(&id_priv->qp_mutex);
@@ -2912,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2912 2905
2913 ret = id_priv->id.event_handler(&id_priv->id, &event); 2906 ret = id_priv->id.event_handler(&id_priv->id, &event);
2914 if (ret) { 2907 if (ret) {
2915 cma_exch(id_priv, CMA_DESTROYING); 2908 cma_exch(id_priv, RDMA_CM_DESTROYING);
2916 mutex_unlock(&id_priv->handler_mutex); 2909 mutex_unlock(&id_priv->handler_mutex);
2917 rdma_destroy_id(&id_priv->id); 2910 rdma_destroy_id(&id_priv->id);
2918 return 0; 2911 return 0;
@@ -3095,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3095 int ret; 3088 int ret;
3096 3089
3097 id_priv = container_of(id, struct rdma_id_private, id); 3090 id_priv = container_of(id, struct rdma_id_private, id);
3098 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 3091 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
3099 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 3092 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
3100 return -EINVAL; 3093 return -EINVAL;
3101 3094
3102 mc = kmalloc(sizeof *mc, GFP_KERNEL); 3095 mc = kmalloc(sizeof *mc, GFP_KERNEL);
@@ -3261,19 +3254,19 @@ static void cma_add_one(struct ib_device *device)
3261static int cma_remove_id_dev(struct rdma_id_private *id_priv) 3254static int cma_remove_id_dev(struct rdma_id_private *id_priv)
3262{ 3255{
3263 struct rdma_cm_event event; 3256 struct rdma_cm_event event;
3264 enum cma_state state; 3257 enum rdma_cm_state state;
3265 int ret = 0; 3258 int ret = 0;
3266 3259
3267 /* Record that we want to remove the device */ 3260 /* Record that we want to remove the device */
3268 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 3261 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
3269 if (state == CMA_DESTROYING) 3262 if (state == RDMA_CM_DESTROYING)
3270 return 0; 3263 return 0;
3271 3264
3272 cma_cancel_operation(id_priv, state); 3265 cma_cancel_operation(id_priv, state);
3273 mutex_lock(&id_priv->handler_mutex); 3266 mutex_lock(&id_priv->handler_mutex);
3274 3267
3275 /* Check for destruction from another callback. */ 3268 /* Check for destruction from another callback. */
3276 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 3269 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
3277 goto out; 3270 goto out;
3278 3271
3279 memset(&event, 0, sizeof event); 3272 memset(&event, 0, sizeof event);
@@ -3328,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device)
3328 kfree(cma_dev); 3321 kfree(cma_dev);
3329} 3322}
3330 3323
3324static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
3325{
3326 struct nlmsghdr *nlh;
3327 struct rdma_cm_id_stats *id_stats;
3328 struct rdma_id_private *id_priv;
3329 struct rdma_cm_id *id = NULL;
3330 struct cma_device *cma_dev;
3331 int i_dev = 0, i_id = 0;
3332
3333 /*
3334 * We export all of the IDs as a sequence of messages. Each
3335 * ID gets its own netlink message.
3336 */
3337 mutex_lock(&lock);
3338
3339 list_for_each_entry(cma_dev, &dev_list, list) {
3340 if (i_dev < cb->args[0]) {
3341 i_dev++;
3342 continue;
3343 }
3344
3345 i_id = 0;
3346 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
3347 if (i_id < cb->args[1]) {
3348 i_id++;
3349 continue;
3350 }
3351
3352 id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
3353 sizeof *id_stats, RDMA_NL_RDMA_CM,
3354 RDMA_NL_RDMA_CM_ID_STATS);
3355 if (!id_stats)
3356 goto out;
3357
3358 memset(id_stats, 0, sizeof *id_stats);
3359 id = &id_priv->id;
3360 id_stats->node_type = id->route.addr.dev_addr.dev_type;
3361 id_stats->port_num = id->port_num;
3362 id_stats->bound_dev_if =
3363 id->route.addr.dev_addr.bound_dev_if;
3364
3365 if (id->route.addr.src_addr.ss_family == AF_INET) {
3366 if (ibnl_put_attr(skb, nlh,
3367 sizeof(struct sockaddr_in),
3368 &id->route.addr.src_addr,
3369 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
3370 goto out;
3371 }
3372 if (ibnl_put_attr(skb, nlh,
3373 sizeof(struct sockaddr_in),
3374 &id->route.addr.dst_addr,
3375 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
3376 goto out;
3377 }
3378 } else if (id->route.addr.src_addr.ss_family == AF_INET6) {
3379 if (ibnl_put_attr(skb, nlh,
3380 sizeof(struct sockaddr_in6),
3381 &id->route.addr.src_addr,
3382 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
3383 goto out;
3384 }
3385 if (ibnl_put_attr(skb, nlh,
3386 sizeof(struct sockaddr_in6),
3387 &id->route.addr.dst_addr,
3388 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
3389 goto out;
3390 }
3391 }
3392
3393 id_stats->pid = id_priv->owner;
3394 id_stats->port_space = id->ps;
3395 id_stats->cm_state = id_priv->state;
3396 id_stats->qp_num = id_priv->qp_num;
3397 id_stats->qp_type = id->qp_type;
3398
3399 i_id++;
3400 }
3401
3402 cb->args[1] = 0;
3403 i_dev++;
3404 }
3405
3406out:
3407 mutex_unlock(&lock);
3408 cb->args[0] = i_dev;
3409 cb->args[1] = i_id;
3410
3411 return skb->len;
3412}
3413
3414static const struct ibnl_client_cbs cma_cb_table[] = {
3415 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
3416};
3417
3331static int __init cma_init(void) 3418static int __init cma_init(void)
3332{ 3419{
3333 int ret; 3420 int ret;
@@ -3343,6 +3430,10 @@ static int __init cma_init(void)
3343 ret = ib_register_client(&cma_client); 3430 ret = ib_register_client(&cma_client);
3344 if (ret) 3431 if (ret)
3345 goto err; 3432 goto err;
3433
3434 if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
3435 printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
3436
3346 return 0; 3437 return 0;
3347 3438
3348err: 3439err:
@@ -3355,6 +3446,7 @@ err:
3355 3446
3356static void __exit cma_cleanup(void) 3447static void __exit cma_cleanup(void)
3357{ 3448{
3449 ibnl_remove_client(RDMA_NL_RDMA_CM);
3358 ib_unregister_client(&cma_client); 3450 ib_unregister_client(&cma_client);
3359 unregister_netdevice_notifier(&cma_nb); 3451 unregister_netdevice_notifier(&cma_nb);
3360 rdma_addr_unregister_client(&addr_client); 3452 rdma_addr_unregister_client(&addr_client);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index f793bf2f5da7..4007f721d25d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <rdma/rdma_netlink.h>
41 42
42#include "core_priv.h" 43#include "core_priv.h"
43 44
@@ -725,22 +726,40 @@ static int __init ib_core_init(void)
725 return -ENOMEM; 726 return -ENOMEM;
726 727
727 ret = ib_sysfs_setup(); 728 ret = ib_sysfs_setup();
728 if (ret) 729 if (ret) {
729 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); 730 printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
731 goto err;
732 }
733
734 ret = ibnl_init();
735 if (ret) {
736 printk(KERN_WARNING "Couldn't init IB netlink interface\n");
737 goto err_sysfs;
738 }
730 739
731 ret = ib_cache_setup(); 740 ret = ib_cache_setup();
732 if (ret) { 741 if (ret) {
733 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); 742 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
734 ib_sysfs_cleanup(); 743 goto err_nl;
735 destroy_workqueue(ib_wq);
736 } 744 }
737 745
746 return 0;
747
748err_nl:
749 ibnl_cleanup();
750
751err_sysfs:
752 ib_sysfs_cleanup();
753
754err:
755 destroy_workqueue(ib_wq);
738 return ret; 756 return ret;
739} 757}
740 758
741static void __exit ib_core_cleanup(void) 759static void __exit ib_core_cleanup(void)
742{ 760{
743 ib_cache_cleanup(); 761 ib_cache_cleanup();
762 ibnl_cleanup();
744 ib_sysfs_cleanup(); 763 ib_sysfs_cleanup();
745 /* Make sure that any pending umem accounting work is done. */ 764 /* Make sure that any pending umem accounting work is done. */
746 destroy_workqueue(ib_wq); 765 destroy_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 822cfdcd9f78..b4d8672a3e4e 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -276,6 +276,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
276 goto error1; 276 goto error1;
277 } 277 }
278 278
279 /* Verify the QP requested is supported. For example, Ethernet devices
280 * will not have QP0 */
281 if (!port_priv->qp_info[qpn].qp) {
282 ret = ERR_PTR(-EPROTONOSUPPORT);
283 goto error1;
284 }
285
279 /* Allocate structures */ 286 /* Allocate structures */
280 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); 287 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
281 if (!mad_agent_priv) { 288 if (!mad_agent_priv) {
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
new file mode 100644
index 000000000000..4a5abaf0a25c
--- /dev/null
+++ b/drivers/infiniband/core/netlink.c
@@ -0,0 +1,190 @@
1/*
2 * Copyright (c) 2010 Voltaire Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
34
35#include <net/netlink.h>
36#include <net/net_namespace.h>
37#include <net/sock.h>
38#include <rdma/rdma_netlink.h>
39
40struct ibnl_client {
41 struct list_head list;
42 int index;
43 int nops;
44 const struct ibnl_client_cbs *cb_table;
45};
46
47static DEFINE_MUTEX(ibnl_mutex);
48static struct sock *nls;
49static LIST_HEAD(client_list);
50
51int ibnl_add_client(int index, int nops,
52 const struct ibnl_client_cbs cb_table[])
53{
54 struct ibnl_client *cur;
55 struct ibnl_client *nl_client;
56
57 nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL);
58 if (!nl_client)
59 return -ENOMEM;
60
61 nl_client->index = index;
62 nl_client->nops = nops;
63 nl_client->cb_table = cb_table;
64
65 mutex_lock(&ibnl_mutex);
66
67 list_for_each_entry(cur, &client_list, list) {
68 if (cur->index == index) {
69 pr_warn("Client for %d already exists\n", index);
70 mutex_unlock(&ibnl_mutex);
71 kfree(nl_client);
72 return -EINVAL;
73 }
74 }
75
76 list_add_tail(&nl_client->list, &client_list);
77
78 mutex_unlock(&ibnl_mutex);
79
80 return 0;
81}
82EXPORT_SYMBOL(ibnl_add_client);
83
84int ibnl_remove_client(int index)
85{
86 struct ibnl_client *cur, *next;
87
88 mutex_lock(&ibnl_mutex);
89 list_for_each_entry_safe(cur, next, &client_list, list) {
90 if (cur->index == index) {
91 list_del(&(cur->list));
92 mutex_unlock(&ibnl_mutex);
93 kfree(cur);
94 return 0;
95 }
96 }
97 pr_warn("Can't remove callback for client idx %d. Not found\n", index);
98 mutex_unlock(&ibnl_mutex);
99
100 return -EINVAL;
101}
102EXPORT_SYMBOL(ibnl_remove_client);
103
104void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
105 int len, int client, int op)
106{
107 unsigned char *prev_tail;
108
109 prev_tail = skb_tail_pointer(skb);
110 *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
111 len, NLM_F_MULTI);
112 (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
113 return NLMSG_DATA(*nlh);
114
115nlmsg_failure:
116 nlmsg_trim(skb, prev_tail);
117 return NULL;
118}
119EXPORT_SYMBOL(ibnl_put_msg);
120
121int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
122 int len, void *data, int type)
123{
124 unsigned char *prev_tail;
125
126 prev_tail = skb_tail_pointer(skb);
127 NLA_PUT(skb, type, len, data);
128 nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
129 return 0;
130
131nla_put_failure:
132 nlmsg_trim(skb, prev_tail - nlh->nlmsg_len);
133 return -EMSGSIZE;
134}
135EXPORT_SYMBOL(ibnl_put_attr);
136
137static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
138{
139 struct ibnl_client *client;
140 int type = nlh->nlmsg_type;
141 int index = RDMA_NL_GET_CLIENT(type);
142 int op = RDMA_NL_GET_OP(type);
143
144 list_for_each_entry(client, &client_list, list) {
145 if (client->index == index) {
146 if (op < 0 || op >= client->nops ||
147 !client->cb_table[RDMA_NL_GET_OP(op)].dump)
148 return -EINVAL;
149 return netlink_dump_start(nls, skb, nlh,
150 client->cb_table[op].dump,
151 NULL);
152 }
153 }
154
155 pr_info("Index %d wasn't found in client list\n", index);
156 return -EINVAL;
157}
158
159static void ibnl_rcv(struct sk_buff *skb)
160{
161 mutex_lock(&ibnl_mutex);
162 netlink_rcv_skb(skb, &ibnl_rcv_msg);
163 mutex_unlock(&ibnl_mutex);
164}
165
166int __init ibnl_init(void)
167{
168 nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv,
169 NULL, THIS_MODULE);
170 if (!nls) {
171 pr_warn("Failed to create netlink socket\n");
172 return -ENOMEM;
173 }
174
175 return 0;
176}
177
178void ibnl_cleanup(void)
179{
180 struct ibnl_client *cur, *next;
181
182 mutex_lock(&ibnl_mutex);
183 list_for_each_entry_safe(cur, next, &client_list, list) {
184 list_del(&(cur->list));
185 kfree(cur);
186 }
187 mutex_unlock(&ibnl_mutex);
188
189 netlink_kernel_release(nls);
190}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b3fa798525b2..71be5eebd683 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -367,13 +367,28 @@ done:
367 return ret; 367 return ret;
368} 368}
369 369
370static ssize_t ucma_create_id(struct ucma_file *file, 370static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
371 const char __user *inbuf, 371{
372 int in_len, int out_len) 372 switch (cmd->ps) {
373 case RDMA_PS_TCP:
374 *qp_type = IB_QPT_RC;
375 return 0;
376 case RDMA_PS_UDP:
377 case RDMA_PS_IPOIB:
378 *qp_type = IB_QPT_UD;
379 return 0;
380 default:
381 return -EINVAL;
382 }
383}
384
385static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
386 int in_len, int out_len)
373{ 387{
374 struct rdma_ucm_create_id cmd; 388 struct rdma_ucm_create_id cmd;
375 struct rdma_ucm_create_id_resp resp; 389 struct rdma_ucm_create_id_resp resp;
376 struct ucma_context *ctx; 390 struct ucma_context *ctx;
391 enum ib_qp_type qp_type;
377 int ret; 392 int ret;
378 393
379 if (out_len < sizeof(resp)) 394 if (out_len < sizeof(resp))
@@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file,
382 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 397 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
383 return -EFAULT; 398 return -EFAULT;
384 399
400 ret = ucma_get_qp_type(&cmd, &qp_type);
401 if (ret)
402 return ret;
403
385 mutex_lock(&file->mut); 404 mutex_lock(&file->mut);
386 ctx = ucma_alloc_ctx(file); 405 ctx = ucma_alloc_ctx(file);
387 mutex_unlock(&file->mut); 406 mutex_unlock(&file->mut);
@@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file,
389 return -ENOMEM; 408 return -ENOMEM;
390 409
391 ctx->uid = cmd.uid; 410 ctx->uid = cmd.uid;
392 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); 411 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
393 if (IS_ERR(ctx->cm_id)) { 412 if (IS_ERR(ctx->cm_id)) {
394 ret = PTR_ERR(ctx->cm_id); 413 ret = PTR_ERR(ctx->cm_id);
395 goto err1; 414 goto err1;
@@ -1338,9 +1357,11 @@ static const struct file_operations ucma_fops = {
1338}; 1357};
1339 1358
1340static struct miscdevice ucma_misc = { 1359static struct miscdevice ucma_misc = {
1341 .minor = MISC_DYNAMIC_MINOR, 1360 .minor = MISC_DYNAMIC_MINOR,
1342 .name = "rdma_cm", 1361 .name = "rdma_cm",
1343 .fops = &ucma_fops, 1362 .nodename = "infiniband/rdma_cm",
1363 .mode = 0666,
1364 .fops = &ucma_fops,
1344}; 1365};
1345 1366
1346static ssize_t show_abi_version(struct device *dev, 1367static ssize_t show_abi_version(struct device *dev,
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index cd1996d0ad08..8d261b6ea5fe 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1176,6 +1176,11 @@ static void ib_umad_remove_one(struct ib_device *device)
1176 kref_put(&umad_dev->ref, ib_umad_release_dev); 1176 kref_put(&umad_dev->ref, ib_umad_release_dev);
1177} 1177}
1178 1178
1179static char *umad_devnode(struct device *dev, mode_t *mode)
1180{
1181 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1182}
1183
1179static int __init ib_umad_init(void) 1184static int __init ib_umad_init(void)
1180{ 1185{
1181 int ret; 1186 int ret;
@@ -1194,6 +1199,8 @@ static int __init ib_umad_init(void)
1194 goto out_chrdev; 1199 goto out_chrdev;
1195 } 1200 }
1196 1201
1202 umad_class->devnode = umad_devnode;
1203
1197 ret = class_create_file(umad_class, &class_attr_abi_version.attr); 1204 ret = class_create_file(umad_class, &class_attr_abi_version.attr);
1198 if (ret) { 1205 if (ret) {
1199 printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); 1206 printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index ec83e9fe387b..e49a85f8a44d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -824,6 +824,12 @@ static void ib_uverbs_remove_one(struct ib_device *device)
824 kfree(uverbs_dev); 824 kfree(uverbs_dev);
825} 825}
826 826
827static char *uverbs_devnode(struct device *dev, mode_t *mode)
828{
829 *mode = 0666;
830 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
831}
832
827static int __init ib_uverbs_init(void) 833static int __init ib_uverbs_init(void)
828{ 834{
829 int ret; 835 int ret;
@@ -842,6 +848,8 @@ static int __init ib_uverbs_init(void)
842 goto out_chrdev; 848 goto out_chrdev;
843 } 849 }
844 850
851 uverbs_class->devnode = uverbs_devnode;
852
845 ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); 853 ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
846 if (ret) { 854 if (ret) {
847 printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); 855 printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 239184138994..0a5008fbebac 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
914 goto err; 914 goto err;
915 915
916 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { 916 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
917 iwch_post_zb_read(ep->com.qp); 917 iwch_post_zb_read(ep);
918 } 918 }
919 919
920 goto out; 920 goto out;
@@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1078 struct iwch_ep *ep = ctx; 1078 struct iwch_ep *ep = ctx;
1079 struct cpl_wr_ack *hdr = cplhdr(skb); 1079 struct cpl_wr_ack *hdr = cplhdr(skb);
1080 unsigned int credits = ntohs(hdr->credits); 1080 unsigned int credits = ntohs(hdr->credits);
1081 unsigned long flags;
1082 int post_zb = 0;
1081 1083
1082 PDBG("%s ep %p credits %u\n", __func__, ep, credits); 1084 PDBG("%s ep %p credits %u\n", __func__, ep, credits);
1083 1085
@@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1087 return CPL_RET_BUF_DONE; 1089 return CPL_RET_BUF_DONE;
1088 } 1090 }
1089 1091
1092 spin_lock_irqsave(&ep->com.lock, flags);
1090 BUG_ON(credits != 1); 1093 BUG_ON(credits != 1);
1091 dst_confirm(ep->dst); 1094 dst_confirm(ep->dst);
1092 if (!ep->mpa_skb) { 1095 if (!ep->mpa_skb) {
1093 PDBG("%s rdma_init wr_ack ep %p state %u\n", 1096 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1094 __func__, ep, state_read(&ep->com)); 1097 __func__, ep, ep->com.state);
1095 if (ep->mpa_attr.initiator) { 1098 if (ep->mpa_attr.initiator) {
1096 PDBG("%s initiator ep %p state %u\n", 1099 PDBG("%s initiator ep %p state %u\n",
1097 __func__, ep, state_read(&ep->com)); 1100 __func__, ep, ep->com.state);
1098 if (peer2peer) 1101 if (peer2peer && ep->com.state == FPDU_MODE)
1099 iwch_post_zb_read(ep->com.qp); 1102 post_zb = 1;
1100 } else { 1103 } else {
1101 PDBG("%s responder ep %p state %u\n", 1104 PDBG("%s responder ep %p state %u\n",
1102 __func__, ep, state_read(&ep->com)); 1105 __func__, ep, ep->com.state);
1103 ep->com.rpl_done = 1; 1106 if (ep->com.state == MPA_REQ_RCVD) {
1104 wake_up(&ep->com.waitq); 1107 ep->com.rpl_done = 1;
1108 wake_up(&ep->com.waitq);
1109 }
1105 } 1110 }
1106 } else { 1111 } else {
1107 PDBG("%s lsm ack ep %p state %u freeing skb\n", 1112 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1108 __func__, ep, state_read(&ep->com)); 1113 __func__, ep, ep->com.state);
1109 kfree_skb(ep->mpa_skb); 1114 kfree_skb(ep->mpa_skb);
1110 ep->mpa_skb = NULL; 1115 ep->mpa_skb = NULL;
1111 } 1116 }
1117 spin_unlock_irqrestore(&ep->com.lock, flags);
1118 if (post_zb)
1119 iwch_post_zb_read(ep);
1112 return CPL_RET_BUF_DONE; 1120 return CPL_RET_BUF_DONE;
1113} 1121}
1114 1122
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index c5406da3f4cd..9a342c9b220d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -332,7 +332,7 @@ int iwch_bind_mw(struct ib_qp *qp,
332 struct ib_mw_bind *mw_bind); 332 struct ib_mw_bind *mw_bind);
333int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 333int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
334int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); 334int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
335int iwch_post_zb_read(struct iwch_qp *qhp); 335int iwch_post_zb_read(struct iwch_ep *ep);
336int iwch_register_device(struct iwch_dev *dev); 336int iwch_register_device(struct iwch_dev *dev);
337void iwch_unregister_device(struct iwch_dev *dev); 337void iwch_unregister_device(struct iwch_dev *dev);
338void stop_read_rep_timer(struct iwch_qp *qhp); 338void stop_read_rep_timer(struct iwch_qp *qhp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 1b4cd09f74dc..ecd313f359a4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
738 } 738 }
739} 739}
740 740
741int iwch_post_zb_read(struct iwch_qp *qhp) 741int iwch_post_zb_read(struct iwch_ep *ep)
742{ 742{
743 union t3_wr *wqe; 743 union t3_wr *wqe;
744 struct sk_buff *skb; 744 struct sk_buff *skb;
@@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
761 wqe->read.local_len = cpu_to_be32(0); 761 wqe->read.local_len = cpu_to_be32(0);
762 wqe->read.local_to = cpu_to_be64(1); 762 wqe->read.local_to = cpu_to_be64(1);
763 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); 763 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
764 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| 764 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
765 V_FW_RIWR_LEN(flit_cnt)); 765 V_FW_RIWR_LEN(flit_cnt));
766 skb->priority = CPL_PRIORITY_DATA; 766 skb->priority = CPL_PRIORITY_DATA;
767 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); 767 return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
768} 768}
769 769
770/* 770/*
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 35d2a5dd9bb4..4f045375c8e2 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -35,7 +35,7 @@
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/spinlock.h> 36#include <linux/spinlock.h>
37#include <linux/idr.h> 37#include <linux/idr.h>
38#include <linux/workqueue.h> 38#include <linux/completion.h>
39#include <linux/netdevice.h> 39#include <linux/netdevice.h>
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/pci.h> 41#include <linux/pci.h>
@@ -131,28 +131,21 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
131 131
132#define C4IW_WR_TO (10*HZ) 132#define C4IW_WR_TO (10*HZ)
133 133
134enum {
135 REPLY_READY = 0,
136};
137
138struct c4iw_wr_wait { 134struct c4iw_wr_wait {
139 wait_queue_head_t wait; 135 struct completion completion;
140 unsigned long status;
141 int ret; 136 int ret;
142}; 137};
143 138
144static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) 139static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
145{ 140{
146 wr_waitp->ret = 0; 141 wr_waitp->ret = 0;
147 wr_waitp->status = 0; 142 init_completion(&wr_waitp->completion);
148 init_waitqueue_head(&wr_waitp->wait);
149} 143}
150 144
151static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) 145static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
152{ 146{
153 wr_waitp->ret = ret; 147 wr_waitp->ret = ret;
154 set_bit(REPLY_READY, &wr_waitp->status); 148 complete(&wr_waitp->completion);
155 wake_up(&wr_waitp->wait);
156} 149}
157 150
158static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, 151static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
@@ -164,8 +157,7 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
164 int ret; 157 int ret;
165 158
166 do { 159 do {
167 ret = wait_event_timeout(wr_waitp->wait, 160 ret = wait_for_completion_timeout(&wr_waitp->completion, to);
168 test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
169 if (!ret) { 161 if (!ret) {
170 printk(KERN_ERR MOD "%s - Device %s not responding - " 162 printk(KERN_ERR MOD "%s - Device %s not responding - "
171 "tid %u qpid %u\n", func, 163 "tid %u qpid %u\n", func,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 13de1192927c..2d668c69f6d9 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -1138,7 +1138,9 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
1138 u32 i = 0; 1138 u32 i = 0;
1139 struct nes_device *nesdev; 1139 struct nes_device *nesdev;
1140 1140
1141 strict_strtoul(buf, 0, &wqm_quanta_value); 1141 if (kstrtoul(buf, 0, &wqm_quanta_value) < 0)
1142 return -EINVAL;
1143
1142 list_for_each_entry(nesdev, &nes_dev_list, list) { 1144 list_for_each_entry(nesdev, &nes_dev_list, list) {
1143 if (i == ee_flsh_adapter) { 1145 if (i == ee_flsh_adapter) {
1144 nesdev->nesadapter->wqm_quanta = wqm_quanta_value; 1146 nesdev->nesadapter->wqm_quanta = wqm_quanta_value;
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index 7c03a70c55a2..8349f9c5064c 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_QIB 1config INFINIBAND_QIB
2 tristate "QLogic PCIe HCA support" 2 tristate "QLogic PCIe HCA support"
3 depends on 64BIT && NET 3 depends on 64BIT
4 ---help--- 4 ---help---
5 This is a low-level driver for QLogic PCIe QLE InfiniBand host 5 This is a low-level driver for QLogic PCIe QLE InfiniBand host
6 channel adapters. This driver does not support the QLogic 6 channel adapters. This driver does not support the QLogic
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 9876865732f7..ede1475bee09 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -548,7 +548,7 @@ int iser_connect(struct iser_conn *ib_conn,
548 iser_conn_get(ib_conn); /* ref ib conn's cma id */ 548 iser_conn_get(ib_conn); /* ref ib conn's cma id */
549 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 549 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
550 (void *)ib_conn, 550 (void *)ib_conn,
551 RDMA_PS_TCP); 551 RDMA_PS_TCP, IB_QPT_RC);
552 if (IS_ERR(ib_conn->cma_id)) { 552 if (IS_ERR(ib_conn->cma_id)) {
553 err = PTR_ERR(ib_conn->cma_id); 553 err = PTR_ERR(ib_conn->cma_id);
554 iser_err("rdma_create_id failed: %d\n", err); 554 iser_err("rdma_create_id failed: %d\n", err);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 376d640487d2..ee165fdcb596 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1147,7 +1147,7 @@ static void srp_process_aer_req(struct srp_target_port *target,
1147static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1147static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1148{ 1148{
1149 struct ib_device *dev = target->srp_host->srp_dev->dev; 1149 struct ib_device *dev = target->srp_host->srp_dev->dev;
1150 struct srp_iu *iu = (struct srp_iu *) wc->wr_id; 1150 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1151 int res; 1151 int res;
1152 u8 opcode; 1152 u8 opcode;
1153 1153
@@ -1231,7 +1231,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1231 break; 1231 break;
1232 } 1232 }
1233 1233
1234 iu = (struct srp_iu *) wc.wr_id; 1234 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1235 list_add(&iu->list, &target->free_tx); 1235 list_add(&iu->list, &target->free_tx);
1236 } 1236 }
1237} 1237}
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 4d8ea32e8a00..22be27b424de 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -19,7 +19,7 @@
19 19
20/* Note to the author of this code: did it ever occur to 20/* Note to the author of this code: did it ever occur to
21 you why the ifdefs are needed? Think about it again. -AK */ 21 you why the ifdefs are needed? Think about it again. -AK */
22#ifdef CONFIG_X86_64 22#if defined(CONFIG_X86_64) || defined(CONFIG_TILE)
23# define INPUT_COMPAT_TEST is_compat_task() 23# define INPUT_COMPAT_TEST is_compat_task()
24#elif defined(CONFIG_S390) 24#elif defined(CONFIG_S390)
25# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT) 25# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT)
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 69badb4e06aa..b4dee9d5a055 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -412,6 +412,17 @@ config KEYBOARD_PXA930_ROTARY
412 To compile this driver as a module, choose M here: the 412 To compile this driver as a module, choose M here: the
413 module will be called pxa930_rotary. 413 module will be called pxa930_rotary.
414 414
415config KEYBOARD_PMIC8XXX
416 tristate "Qualcomm PMIC8XXX keypad support"
417 depends on MFD_PM8XXX
418 help
419 Say Y here if you want to enable the driver for the PMIC8XXX
420 keypad provided as a reference design from Qualcomm. This is intended
421 to support upto 18x8 matrix based keypad design.
422
423 To compile this driver as a module, choose M here: the module will
424 be called pmic8xxx-keypad.
425
415config KEYBOARD_SAMSUNG 426config KEYBOARD_SAMSUNG
416 tristate "Samsung keypad support" 427 tristate "Samsung keypad support"
417 depends on SAMSUNG_DEV_KEYPAD 428 depends on SAMSUNG_DEV_KEYPAD
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index c49cf8e04cd7..ddde0fd476f7 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o
34obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o 34obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
35obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o 35obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o
36obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o 36obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
37obj-$(CONFIG_KEYBOARD_PMIC8XXX) += pmic8xxx-keypad.o
37obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o 38obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
38obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o 39obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
39obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o 40obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
new file mode 100644
index 000000000000..40b02ae96f86
--- /dev/null
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -0,0 +1,799 @@
1/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/kernel.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/input.h>
19#include <linux/bitops.h>
20#include <linux/delay.h>
21#include <linux/mutex.h>
22
23#include <linux/mfd/pm8xxx/core.h>
24#include <linux/mfd/pm8xxx/gpio.h>
25#include <linux/input/pmic8xxx-keypad.h>
26
27#define PM8XXX_MAX_ROWS 18
28#define PM8XXX_MAX_COLS 8
29#define PM8XXX_ROW_SHIFT 3
30#define PM8XXX_MATRIX_MAX_SIZE (PM8XXX_MAX_ROWS * PM8XXX_MAX_COLS)
31
32#define PM8XXX_MIN_ROWS 5
33#define PM8XXX_MIN_COLS 5
34
35#define MAX_SCAN_DELAY 128
36#define MIN_SCAN_DELAY 1
37
38/* in nanoseconds */
39#define MAX_ROW_HOLD_DELAY 122000
40#define MIN_ROW_HOLD_DELAY 30500
41
42#define MAX_DEBOUNCE_TIME 20
43#define MIN_DEBOUNCE_TIME 5
44
45#define KEYP_CTRL 0x148
46
47#define KEYP_CTRL_EVNTS BIT(0)
48#define KEYP_CTRL_EVNTS_MASK 0x3
49
50#define KEYP_CTRL_SCAN_COLS_SHIFT 5
51#define KEYP_CTRL_SCAN_COLS_MIN 5
52#define KEYP_CTRL_SCAN_COLS_BITS 0x3
53
54#define KEYP_CTRL_SCAN_ROWS_SHIFT 2
55#define KEYP_CTRL_SCAN_ROWS_MIN 5
56#define KEYP_CTRL_SCAN_ROWS_BITS 0x7
57
58#define KEYP_CTRL_KEYP_EN BIT(7)
59
60#define KEYP_SCAN 0x149
61
62#define KEYP_SCAN_READ_STATE BIT(0)
63#define KEYP_SCAN_DBOUNCE_SHIFT 1
64#define KEYP_SCAN_PAUSE_SHIFT 3
65#define KEYP_SCAN_ROW_HOLD_SHIFT 6
66
67#define KEYP_TEST 0x14A
68
69#define KEYP_TEST_CLEAR_RECENT_SCAN BIT(6)
70#define KEYP_TEST_CLEAR_OLD_SCAN BIT(5)
71#define KEYP_TEST_READ_RESET BIT(4)
72#define KEYP_TEST_DTEST_EN BIT(3)
73#define KEYP_TEST_ABORT_READ BIT(0)
74
75#define KEYP_TEST_DBG_SELECT_SHIFT 1
76
77/* bits of these registers represent
78 * '0' for key press
79 * '1' for key release
80 */
81#define KEYP_RECENT_DATA 0x14B
82#define KEYP_OLD_DATA 0x14C
83
84#define KEYP_CLOCK_FREQ 32768
85
86/**
87 * struct pmic8xxx_kp - internal keypad data structure
88 * @pdata - keypad platform data pointer
89 * @input - input device pointer for keypad
90 * @key_sense_irq - key press/release irq number
91 * @key_stuck_irq - key stuck notification irq number
92 * @keycodes - array to hold the key codes
93 * @dev - parent device pointer
94 * @keystate - present key press/release state
95 * @stuckstate - present state when key stuck irq
96 * @ctrl_reg - control register value
97 */
98struct pmic8xxx_kp {
99 const struct pm8xxx_keypad_platform_data *pdata;
100 struct input_dev *input;
101 int key_sense_irq;
102 int key_stuck_irq;
103
104 unsigned short keycodes[PM8XXX_MATRIX_MAX_SIZE];
105
106 struct device *dev;
107 u16 keystate[PM8XXX_MAX_ROWS];
108 u16 stuckstate[PM8XXX_MAX_ROWS];
109
110 u8 ctrl_reg;
111};
112
113static int pmic8xxx_kp_write_u8(struct pmic8xxx_kp *kp,
114 u8 data, u16 reg)
115{
116 int rc;
117
118 rc = pm8xxx_writeb(kp->dev->parent, reg, data);
119 return rc;
120}
121
122static int pmic8xxx_kp_read(struct pmic8xxx_kp *kp,
123 u8 *data, u16 reg, unsigned num_bytes)
124{
125 int rc;
126
127 rc = pm8xxx_read_buf(kp->dev->parent, reg, data, num_bytes);
128 return rc;
129}
130
131static int pmic8xxx_kp_read_u8(struct pmic8xxx_kp *kp,
132 u8 *data, u16 reg)
133{
134 int rc;
135
136 rc = pmic8xxx_kp_read(kp, data, reg, 1);
137 return rc;
138}
139
140static u8 pmic8xxx_col_state(struct pmic8xxx_kp *kp, u8 col)
141{
142 /* all keys pressed on that particular row? */
143 if (col == 0x00)
144 return 1 << kp->pdata->num_cols;
145 else
146 return col & ((1 << kp->pdata->num_cols) - 1);
147}
148
149/*
150 * Synchronous read protocol for RevB0 onwards:
151 *
152 * 1. Write '1' to ReadState bit in KEYP_SCAN register
153 * 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode
154 * synchronously
155 * 3. Read rows in old array first if events are more than one
156 * 4. Read rows in recent array
157 * 5. Wait 4*32KHz clocks
158 * 6. Write '0' to ReadState bit of KEYP_SCAN register so that hw can
159 * synchronously exit read mode.
160 */
161static int pmic8xxx_chk_sync_read(struct pmic8xxx_kp *kp)
162{
163 int rc;
164 u8 scan_val;
165
166 rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN);
167 if (rc < 0) {
168 dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc);
169 return rc;
170 }
171
172 scan_val |= 0x1;
173
174 rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
175 if (rc < 0) {
176 dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
177 return rc;
178 }
179
180 /* 2 * 32KHz clocks */
181 udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
182
183 return rc;
184}
185
186static int pmic8xxx_kp_read_data(struct pmic8xxx_kp *kp, u16 *state,
187 u16 data_reg, int read_rows)
188{
189 int rc, row;
190 u8 new_data[PM8XXX_MAX_ROWS];
191
192 rc = pmic8xxx_kp_read(kp, new_data, data_reg, read_rows);
193 if (rc)
194 return rc;
195
196 for (row = 0; row < kp->pdata->num_rows; row++) {
197 dev_dbg(kp->dev, "new_data[%d] = %d\n", row,
198 new_data[row]);
199 state[row] = pmic8xxx_col_state(kp, new_data[row]);
200 }
201
202 return rc;
203}
204
205static int pmic8xxx_kp_read_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
206 u16 *old_state)
207{
208 int rc, read_rows;
209 u8 scan_val;
210
211 if (kp->pdata->num_rows < PM8XXX_MIN_ROWS)
212 read_rows = PM8XXX_MIN_ROWS;
213 else
214 read_rows = kp->pdata->num_rows;
215
216 pmic8xxx_chk_sync_read(kp);
217
218 if (old_state) {
219 rc = pmic8xxx_kp_read_data(kp, old_state, KEYP_OLD_DATA,
220 read_rows);
221 if (rc < 0) {
222 dev_err(kp->dev,
223 "Error reading KEYP_OLD_DATA, rc=%d\n", rc);
224 return rc;
225 }
226 }
227
228 rc = pmic8xxx_kp_read_data(kp, new_state, KEYP_RECENT_DATA,
229 read_rows);
230 if (rc < 0) {
231 dev_err(kp->dev,
232 "Error reading KEYP_RECENT_DATA, rc=%d\n", rc);
233 return rc;
234 }
235
236 /* 4 * 32KHz clocks */
237 udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
238
239 rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN);
240 if (rc < 0) {
241 dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc);
242 return rc;
243 }
244
245 scan_val &= 0xFE;
246 rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
247 if (rc < 0)
248 dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
249
250 return rc;
251}
252
253static void __pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
254 u16 *old_state)
255{
256 int row, col, code;
257
258 for (row = 0; row < kp->pdata->num_rows; row++) {
259 int bits_changed = new_state[row] ^ old_state[row];
260
261 if (!bits_changed)
262 continue;
263
264 for (col = 0; col < kp->pdata->num_cols; col++) {
265 if (!(bits_changed & (1 << col)))
266 continue;
267
268 dev_dbg(kp->dev, "key [%d:%d] %s\n", row, col,
269 !(new_state[row] & (1 << col)) ?
270 "pressed" : "released");
271
272 code = MATRIX_SCAN_CODE(row, col, PM8XXX_ROW_SHIFT);
273
274 input_event(kp->input, EV_MSC, MSC_SCAN, code);
275 input_report_key(kp->input,
276 kp->keycodes[code],
277 !(new_state[row] & (1 << col)));
278
279 input_sync(kp->input);
280 }
281 }
282}
283
284static bool pmic8xxx_detect_ghost_keys(struct pmic8xxx_kp *kp, u16 *new_state)
285{
286 int row, found_first = -1;
287 u16 check, row_state;
288
289 check = 0;
290 for (row = 0; row < kp->pdata->num_rows; row++) {
291 row_state = (~new_state[row]) &
292 ((1 << kp->pdata->num_cols) - 1);
293
294 if (hweight16(row_state) > 1) {
295 if (found_first == -1)
296 found_first = row;
297 if (check & row_state) {
298 dev_dbg(kp->dev, "detected ghost key on row[%d]"
299 " and row[%d]\n", found_first, row);
300 return true;
301 }
302 }
303 check |= row_state;
304 }
305 return false;
306}
307
308static int pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, unsigned int events)
309{
310 u16 new_state[PM8XXX_MAX_ROWS];
311 u16 old_state[PM8XXX_MAX_ROWS];
312 int rc;
313
314 switch (events) {
315 case 0x1:
316 rc = pmic8xxx_kp_read_matrix(kp, new_state, NULL);
317 if (rc < 0)
318 return rc;
319
320 /* detecting ghost key is not an error */
321 if (pmic8xxx_detect_ghost_keys(kp, new_state))
322 return 0;
323 __pmic8xxx_kp_scan_matrix(kp, new_state, kp->keystate);
324 memcpy(kp->keystate, new_state, sizeof(new_state));
325 break;
326 case 0x3: /* two events - eventcounter is gray-coded */
327 rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
328 if (rc < 0)
329 return rc;
330
331 __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate);
332 __pmic8xxx_kp_scan_matrix(kp, new_state, old_state);
333 memcpy(kp->keystate, new_state, sizeof(new_state));
334 break;
335 case 0x2:
336 dev_dbg(kp->dev, "Some key events were lost\n");
337 rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
338 if (rc < 0)
339 return rc;
340 __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate);
341 __pmic8xxx_kp_scan_matrix(kp, new_state, old_state);
342 memcpy(kp->keystate, new_state, sizeof(new_state));
343 break;
344 default:
345 rc = -EINVAL;
346 }
347 return rc;
348}
349
350/*
351 * NOTE: We are reading recent and old data registers blindly
352 * whenever key-stuck interrupt happens, because events counter doesn't
353 * get updated when this interrupt happens due to key stuck doesn't get
354 * considered as key state change.
355 *
356 * We are not using old data register contents after they are being read
357 * because it might report the key which was pressed before the key being stuck
358 * as stuck key because it's pressed status is stored in the old data
359 * register.
360 */
361static irqreturn_t pmic8xxx_kp_stuck_irq(int irq, void *data)
362{
363 u16 new_state[PM8XXX_MAX_ROWS];
364 u16 old_state[PM8XXX_MAX_ROWS];
365 int rc;
366 struct pmic8xxx_kp *kp = data;
367
368 rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
369 if (rc < 0) {
370 dev_err(kp->dev, "failed to read keypad matrix\n");
371 return IRQ_HANDLED;
372 }
373
374 __pmic8xxx_kp_scan_matrix(kp, new_state, kp->stuckstate);
375
376 return IRQ_HANDLED;
377}
378
379static irqreturn_t pmic8xxx_kp_irq(int irq, void *data)
380{
381 struct pmic8xxx_kp *kp = data;
382 u8 ctrl_val, events;
383 int rc;
384
385 rc = pmic8xxx_kp_read(kp, &ctrl_val, KEYP_CTRL, 1);
386 if (rc < 0) {
387 dev_err(kp->dev, "failed to read keyp_ctrl register\n");
388 return IRQ_HANDLED;
389 }
390
391 events = ctrl_val & KEYP_CTRL_EVNTS_MASK;
392
393 rc = pmic8xxx_kp_scan_matrix(kp, events);
394 if (rc < 0)
395 dev_err(kp->dev, "failed to scan matrix\n");
396
397 return IRQ_HANDLED;
398}
399
400static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
401{
402 int bits, rc, cycles;
403 u8 scan_val = 0, ctrl_val = 0;
404 static const u8 row_bits[] = {
405 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7,
406 };
407
408 /* Find column bits */
409 if (kp->pdata->num_cols < KEYP_CTRL_SCAN_COLS_MIN)
410 bits = 0;
411 else
412 bits = kp->pdata->num_cols - KEYP_CTRL_SCAN_COLS_MIN;
413 ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) <<
414 KEYP_CTRL_SCAN_COLS_SHIFT;
415
416 /* Find row bits */
417 if (kp->pdata->num_rows < KEYP_CTRL_SCAN_ROWS_MIN)
418 bits = 0;
419 else
420 bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN];
421
422 ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT);
423
424 rc = pmic8xxx_kp_write_u8(kp, ctrl_val, KEYP_CTRL);
425 if (rc < 0) {
426 dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc);
427 return rc;
428 }
429
430 bits = (kp->pdata->debounce_ms / 5) - 1;
431
432 scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT);
433
434 bits = fls(kp->pdata->scan_delay_ms) - 1;
435 scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT);
436
437 /* Row hold time is a multiple of 32KHz cycles. */
438 cycles = (kp->pdata->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
439
440 scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT);
441
442 rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
443 if (rc)
444 dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
445
446 return rc;
447
448}
449
450static int __devinit pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
451 struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config)
452{
453 int rc, i;
454
455 if (gpio_start < 0 || num_gpios < 0)
456 return -EINVAL;
457
458 for (i = 0; i < num_gpios; i++) {
459 rc = pm8xxx_gpio_config(gpio_start + i, gpio_config);
460 if (rc) {
461 dev_err(kp->dev, "%s: FAIL pm8xxx_gpio_config():"
462 "for PM GPIO [%d] rc=%d.\n",
463 __func__, gpio_start + i, rc);
464 return rc;
465 }
466 }
467
468 return 0;
469}
470
471static int pmic8xxx_kp_enable(struct pmic8xxx_kp *kp)
472{
473 int rc;
474
475 kp->ctrl_reg |= KEYP_CTRL_KEYP_EN;
476
477 rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
478 if (rc < 0)
479 dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc);
480
481 return rc;
482}
483
484static int pmic8xxx_kp_disable(struct pmic8xxx_kp *kp)
485{
486 int rc;
487
488 kp->ctrl_reg &= ~KEYP_CTRL_KEYP_EN;
489
490 rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
491 if (rc < 0)
492 return rc;
493
494 return rc;
495}
496
497static int pmic8xxx_kp_open(struct input_dev *dev)
498{
499 struct pmic8xxx_kp *kp = input_get_drvdata(dev);
500
501 return pmic8xxx_kp_enable(kp);
502}
503
504static void pmic8xxx_kp_close(struct input_dev *dev)
505{
506 struct pmic8xxx_kp *kp = input_get_drvdata(dev);
507
508 pmic8xxx_kp_disable(kp);
509}
510
511/*
512 * keypad controller should be initialized in the following sequence
513 * only, otherwise it might get into FSM stuck state.
514 *
515 * - Initialize keypad control parameters, like no. of rows, columns,
516 * timing values etc.,
517 * - configure rows and column gpios pull up/down.
518 * - set irq edge type.
519 * - enable the keypad controller.
520 */
521static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
522{
523 const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev);
524 const struct matrix_keymap_data *keymap_data;
525 struct pmic8xxx_kp *kp;
526 int rc;
527 u8 ctrl_val;
528
529 struct pm_gpio kypd_drv = {
530 .direction = PM_GPIO_DIR_OUT,
531 .output_buffer = PM_GPIO_OUT_BUF_OPEN_DRAIN,
532 .output_value = 0,
533 .pull = PM_GPIO_PULL_NO,
534 .vin_sel = PM_GPIO_VIN_S3,
535 .out_strength = PM_GPIO_STRENGTH_LOW,
536 .function = PM_GPIO_FUNC_1,
537 .inv_int_pol = 1,
538 };
539
540 struct pm_gpio kypd_sns = {
541 .direction = PM_GPIO_DIR_IN,
542 .pull = PM_GPIO_PULL_UP_31P5,
543 .vin_sel = PM_GPIO_VIN_S3,
544 .out_strength = PM_GPIO_STRENGTH_NO,
545 .function = PM_GPIO_FUNC_NORMAL,
546 .inv_int_pol = 1,
547 };
548
549
550 if (!pdata || !pdata->num_cols || !pdata->num_rows ||
551 pdata->num_cols > PM8XXX_MAX_COLS ||
552 pdata->num_rows > PM8XXX_MAX_ROWS ||
553 pdata->num_cols < PM8XXX_MIN_COLS) {
554 dev_err(&pdev->dev, "invalid platform data\n");
555 return -EINVAL;
556 }
557
558 if (!pdata->scan_delay_ms ||
559 pdata->scan_delay_ms > MAX_SCAN_DELAY ||
560 pdata->scan_delay_ms < MIN_SCAN_DELAY ||
561 !is_power_of_2(pdata->scan_delay_ms)) {
562 dev_err(&pdev->dev, "invalid keypad scan time supplied\n");
563 return -EINVAL;
564 }
565
566 if (!pdata->row_hold_ns ||
567 pdata->row_hold_ns > MAX_ROW_HOLD_DELAY ||
568 pdata->row_hold_ns < MIN_ROW_HOLD_DELAY ||
569 ((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) {
570 dev_err(&pdev->dev, "invalid keypad row hold time supplied\n");
571 return -EINVAL;
572 }
573
574 if (!pdata->debounce_ms ||
575 ((pdata->debounce_ms % 5) != 0) ||
576 pdata->debounce_ms > MAX_DEBOUNCE_TIME ||
577 pdata->debounce_ms < MIN_DEBOUNCE_TIME) {
578 dev_err(&pdev->dev, "invalid debounce time supplied\n");
579 return -EINVAL;
580 }
581
582 keymap_data = pdata->keymap_data;
583 if (!keymap_data) {
584 dev_err(&pdev->dev, "no keymap data supplied\n");
585 return -EINVAL;
586 }
587
588 kp = kzalloc(sizeof(*kp), GFP_KERNEL);
589 if (!kp)
590 return -ENOMEM;
591
592 platform_set_drvdata(pdev, kp);
593
594 kp->pdata = pdata;
595 kp->dev = &pdev->dev;
596
597 kp->input = input_allocate_device();
598 if (!kp->input) {
599 dev_err(&pdev->dev, "unable to allocate input device\n");
600 rc = -ENOMEM;
601 goto err_alloc_device;
602 }
603
604 kp->key_sense_irq = platform_get_irq(pdev, 0);
605 if (kp->key_sense_irq < 0) {
606 dev_err(&pdev->dev, "unable to get keypad sense irq\n");
607 rc = -ENXIO;
608 goto err_get_irq;
609 }
610
611 kp->key_stuck_irq = platform_get_irq(pdev, 1);
612 if (kp->key_stuck_irq < 0) {
613 dev_err(&pdev->dev, "unable to get keypad stuck irq\n");
614 rc = -ENXIO;
615 goto err_get_irq;
616 }
617
618 kp->input->name = pdata->input_name ? : "PMIC8XXX keypad";
619 kp->input->phys = pdata->input_phys_device ? : "pmic8xxx_keypad/input0";
620
621 kp->input->dev.parent = &pdev->dev;
622
623 kp->input->id.bustype = BUS_I2C;
624 kp->input->id.version = 0x0001;
625 kp->input->id.product = 0x0001;
626 kp->input->id.vendor = 0x0001;
627
628 kp->input->evbit[0] = BIT_MASK(EV_KEY);
629
630 if (pdata->rep)
631 __set_bit(EV_REP, kp->input->evbit);
632
633 kp->input->keycode = kp->keycodes;
634 kp->input->keycodemax = PM8XXX_MATRIX_MAX_SIZE;
635 kp->input->keycodesize = sizeof(kp->keycodes);
636 kp->input->open = pmic8xxx_kp_open;
637 kp->input->close = pmic8xxx_kp_close;
638
639 matrix_keypad_build_keymap(keymap_data, PM8XXX_ROW_SHIFT,
640 kp->input->keycode, kp->input->keybit);
641
642 input_set_capability(kp->input, EV_MSC, MSC_SCAN);
643 input_set_drvdata(kp->input, kp);
644
645 /* initialize keypad state */
646 memset(kp->keystate, 0xff, sizeof(kp->keystate));
647 memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate));
648
649 rc = pmic8xxx_kpd_init(kp);
650 if (rc < 0) {
651 dev_err(&pdev->dev, "unable to initialize keypad controller\n");
652 goto err_get_irq;
653 }
654
655 rc = pmic8xxx_kp_config_gpio(pdata->cols_gpio_start,
656 pdata->num_cols, kp, &kypd_sns);
657 if (rc < 0) {
658 dev_err(&pdev->dev, "unable to configure keypad sense lines\n");
659 goto err_gpio_config;
660 }
661
662 rc = pmic8xxx_kp_config_gpio(pdata->rows_gpio_start,
663 pdata->num_rows, kp, &kypd_drv);
664 if (rc < 0) {
665 dev_err(&pdev->dev, "unable to configure keypad drive lines\n");
666 goto err_gpio_config;
667 }
668
669 rc = request_any_context_irq(kp->key_sense_irq, pmic8xxx_kp_irq,
670 IRQF_TRIGGER_RISING, "pmic-keypad", kp);
671 if (rc < 0) {
672 dev_err(&pdev->dev, "failed to request keypad sense irq\n");
673 goto err_get_irq;
674 }
675
676 rc = request_any_context_irq(kp->key_stuck_irq, pmic8xxx_kp_stuck_irq,
677 IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp);
678 if (rc < 0) {
679 dev_err(&pdev->dev, "failed to request keypad stuck irq\n");
680 goto err_req_stuck_irq;
681 }
682
683 rc = pmic8xxx_kp_read_u8(kp, &ctrl_val, KEYP_CTRL);
684 if (rc < 0) {
685 dev_err(&pdev->dev, "failed to read KEYP_CTRL register\n");
686 goto err_pmic_reg_read;
687 }
688
689 kp->ctrl_reg = ctrl_val;
690
691 rc = input_register_device(kp->input);
692 if (rc < 0) {
693 dev_err(&pdev->dev, "unable to register keypad input device\n");
694 goto err_pmic_reg_read;
695 }
696
697 device_init_wakeup(&pdev->dev, pdata->wakeup);
698
699 return 0;
700
701err_pmic_reg_read:
702 free_irq(kp->key_stuck_irq, NULL);
703err_req_stuck_irq:
704 free_irq(kp->key_sense_irq, NULL);
705err_gpio_config:
706err_get_irq:
707 input_free_device(kp->input);
708err_alloc_device:
709 platform_set_drvdata(pdev, NULL);
710 kfree(kp);
711 return rc;
712}
713
714static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
715{
716 struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
717
718 device_init_wakeup(&pdev->dev, 0);
719 free_irq(kp->key_stuck_irq, NULL);
720 free_irq(kp->key_sense_irq, NULL);
721 input_unregister_device(kp->input);
722 kfree(kp);
723
724 platform_set_drvdata(pdev, NULL);
725 return 0;
726}
727
728#ifdef CONFIG_PM_SLEEP
729static int pmic8xxx_kp_suspend(struct device *dev)
730{
731 struct platform_device *pdev = to_platform_device(dev);
732 struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
733 struct input_dev *input_dev = kp->input;
734
735 if (device_may_wakeup(dev)) {
736 enable_irq_wake(kp->key_sense_irq);
737 } else {
738 mutex_lock(&input_dev->mutex);
739
740 if (input_dev->users)
741 pmic8xxx_kp_disable(kp);
742
743 mutex_unlock(&input_dev->mutex);
744 }
745
746 return 0;
747}
748
749static int pmic8xxx_kp_resume(struct device *dev)
750{
751 struct platform_device *pdev = to_platform_device(dev);
752 struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
753 struct input_dev *input_dev = kp->input;
754
755 if (device_may_wakeup(dev)) {
756 disable_irq_wake(kp->key_sense_irq);
757 } else {
758 mutex_lock(&input_dev->mutex);
759
760 if (input_dev->users)
761 pmic8xxx_kp_enable(kp);
762
763 mutex_unlock(&input_dev->mutex);
764 }
765
766 return 0;
767}
768#endif
769
770static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops,
771 pmic8xxx_kp_suspend, pmic8xxx_kp_resume);
772
773static struct platform_driver pmic8xxx_kp_driver = {
774 .probe = pmic8xxx_kp_probe,
775 .remove = __devexit_p(pmic8xxx_kp_remove),
776 .driver = {
777 .name = PM8XXX_KEYPAD_DEV_NAME,
778 .owner = THIS_MODULE,
779 .pm = &pm8xxx_kp_pm_ops,
780 },
781};
782
783static int __init pmic8xxx_kp_init(void)
784{
785 return platform_driver_register(&pmic8xxx_kp_driver);
786}
787module_init(pmic8xxx_kp_init);
788
789static void __exit pmic8xxx_kp_exit(void)
790{
791 platform_driver_unregister(&pmic8xxx_kp_driver);
792}
793module_exit(pmic8xxx_kp_exit);
794
795MODULE_LICENSE("GPL v2");
796MODULE_DESCRIPTION("PMIC8XXX keypad driver");
797MODULE_VERSION("1.0");
798MODULE_ALIAS("platform:pmic8xxx_keypad");
799MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index f9cf0881b0e3..45dc6aa62ba4 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -330,6 +330,17 @@ config INPUT_PWM_BEEPER
330 To compile this driver as a module, choose M here: the module will be 330 To compile this driver as a module, choose M here: the module will be
331 called pwm-beeper. 331 called pwm-beeper.
332 332
333config INPUT_PMIC8XXX_PWRKEY
334 tristate "PMIC8XXX power key support"
335 depends on MFD_PM8XXX
336 help
337 Say Y here if you want support for the PMIC8XXX power key.
338
339 If unsure, say N.
340
341 To compile this driver as a module, choose M here: the
342 module will be called pmic8xxx-pwrkey.
343
333config INPUT_GPIO_ROTARY_ENCODER 344config INPUT_GPIO_ROTARY_ENCODER
334 tristate "Rotary encoders connected to GPIO pins" 345 tristate "Rotary encoders connected to GPIO pins"
335 depends on GPIOLIB && GENERIC_GPIO 346 depends on GPIOLIB && GENERIC_GPIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index e3f7984e6274..38efb2cb182b 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
33obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o 33obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
34obj-$(CONFIG_INPUT_POWERMATE) += powermate.o 34obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
35obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o 35obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o
36obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
36obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o 37obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o
37obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o 38obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
38obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o 39obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
new file mode 100644
index 000000000000..97e07e786e41
--- /dev/null
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -0,0 +1,231 @@
1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/input.h>
19#include <linux/interrupt.h>
20#include <linux/platform_device.h>
21#include <linux/log2.h>
22
23#include <linux/mfd/pm8xxx/core.h>
24#include <linux/input/pmic8xxx-pwrkey.h>
25
26#define PON_CNTL_1 0x1C
27#define PON_CNTL_PULL_UP BIT(7)
28#define PON_CNTL_TRIG_DELAY_MASK (0x7)
29
30/**
31 * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information
32 * @key_press_irq: key press irq number
33 */
34struct pmic8xxx_pwrkey {
35 struct input_dev *pwr;
36 int key_press_irq;
37};
38
39static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey)
40{
41 struct pmic8xxx_pwrkey *pwrkey = _pwrkey;
42
43 input_report_key(pwrkey->pwr, KEY_POWER, 1);
44 input_sync(pwrkey->pwr);
45
46 return IRQ_HANDLED;
47}
48
49static irqreturn_t pwrkey_release_irq(int irq, void *_pwrkey)
50{
51 struct pmic8xxx_pwrkey *pwrkey = _pwrkey;
52
53 input_report_key(pwrkey->pwr, KEY_POWER, 0);
54 input_sync(pwrkey->pwr);
55
56 return IRQ_HANDLED;
57}
58
59#ifdef CONFIG_PM_SLEEP
60static int pmic8xxx_pwrkey_suspend(struct device *dev)
61{
62 struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
63
64 if (device_may_wakeup(dev))
65 enable_irq_wake(pwrkey->key_press_irq);
66
67 return 0;
68}
69
70static int pmic8xxx_pwrkey_resume(struct device *dev)
71{
72 struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
73
74 if (device_may_wakeup(dev))
75 disable_irq_wake(pwrkey->key_press_irq);
76
77 return 0;
78}
79#endif
80
81static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
82 pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
83
84static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
85{
86 struct input_dev *pwr;
87 int key_release_irq = platform_get_irq(pdev, 0);
88 int key_press_irq = platform_get_irq(pdev, 1);
89 int err;
90 unsigned int delay;
91 u8 pon_cntl;
92 struct pmic8xxx_pwrkey *pwrkey;
93 const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev);
94
95 if (!pdata) {
96 dev_err(&pdev->dev, "power key platform data not supplied\n");
97 return -EINVAL;
98 }
99
100 if (pdata->kpd_trigger_delay_us > 62500) {
101 dev_err(&pdev->dev, "invalid power key trigger delay\n");
102 return -EINVAL;
103 }
104
105 pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL);
106 if (!pwrkey)
107 return -ENOMEM;
108
109 pwr = input_allocate_device();
110 if (!pwr) {
111 dev_dbg(&pdev->dev, "Can't allocate power button\n");
112 err = -ENOMEM;
113 goto free_pwrkey;
114 }
115
116 input_set_capability(pwr, EV_KEY, KEY_POWER);
117
118 pwr->name = "pmic8xxx_pwrkey";
119 pwr->phys = "pmic8xxx_pwrkey/input0";
120 pwr->dev.parent = &pdev->dev;
121
122 delay = (pdata->kpd_trigger_delay_us << 10) / USEC_PER_SEC;
123 delay = 1 + ilog2(delay);
124
125 err = pm8xxx_readb(pdev->dev.parent, PON_CNTL_1, &pon_cntl);
126 if (err < 0) {
127 dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err);
128 goto free_input_dev;
129 }
130
131 pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK;
132 pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK);
133 if (pdata->pull_up)
134 pon_cntl |= PON_CNTL_PULL_UP;
135 else
136 pon_cntl &= ~PON_CNTL_PULL_UP;
137
138 err = pm8xxx_writeb(pdev->dev.parent, PON_CNTL_1, pon_cntl);
139 if (err < 0) {
140 dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err);
141 goto free_input_dev;
142 }
143
144 err = input_register_device(pwr);
145 if (err) {
146 dev_dbg(&pdev->dev, "Can't register power key: %d\n", err);
147 goto free_input_dev;
148 }
149
150 pwrkey->key_press_irq = key_press_irq;
151 pwrkey->pwr = pwr;
152
153 platform_set_drvdata(pdev, pwrkey);
154
155 err = request_irq(key_press_irq, pwrkey_press_irq,
156 IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey);
157 if (err < 0) {
158 dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
159 key_press_irq, err);
160 goto unreg_input_dev;
161 }
162
163 err = request_irq(key_release_irq, pwrkey_release_irq,
164 IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey);
165 if (err < 0) {
166 dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
167 key_release_irq, err);
168
169 goto free_press_irq;
170 }
171
172 device_init_wakeup(&pdev->dev, pdata->wakeup);
173
174 return 0;
175
176free_press_irq:
177 free_irq(key_press_irq, NULL);
178unreg_input_dev:
179 platform_set_drvdata(pdev, NULL);
180 input_unregister_device(pwr);
181 pwr = NULL;
182free_input_dev:
183 input_free_device(pwr);
184free_pwrkey:
185 kfree(pwrkey);
186 return err;
187}
188
189static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev)
190{
191 struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
192 int key_release_irq = platform_get_irq(pdev, 0);
193 int key_press_irq = platform_get_irq(pdev, 1);
194
195 device_init_wakeup(&pdev->dev, 0);
196
197 free_irq(key_press_irq, pwrkey);
198 free_irq(key_release_irq, pwrkey);
199 input_unregister_device(pwrkey->pwr);
200 platform_set_drvdata(pdev, NULL);
201 kfree(pwrkey);
202
203 return 0;
204}
205
206static struct platform_driver pmic8xxx_pwrkey_driver = {
207 .probe = pmic8xxx_pwrkey_probe,
208 .remove = __devexit_p(pmic8xxx_pwrkey_remove),
209 .driver = {
210 .name = PM8XXX_PWRKEY_DEV_NAME,
211 .owner = THIS_MODULE,
212 .pm = &pm8xxx_pwr_key_pm_ops,
213 },
214};
215
216static int __init pmic8xxx_pwrkey_init(void)
217{
218 return platform_driver_register(&pmic8xxx_pwrkey_driver);
219}
220module_init(pmic8xxx_pwrkey_init);
221
222static void __exit pmic8xxx_pwrkey_exit(void)
223{
224 platform_driver_unregister(&pmic8xxx_pwrkey_driver);
225}
226module_exit(pmic8xxx_pwrkey_exit);
227
228MODULE_ALIAS("platform:pmic8xxx_pwrkey");
229MODULE_DESCRIPTION("PMIC8XXX Power Key driver");
230MODULE_LICENSE("GPL v2");
231MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 6a11694e3fc7..014dd4ad0d4f 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -29,7 +29,6 @@
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/i2c/twl.h> 30#include <linux/i2c/twl.h>
31#include <linux/mfd/twl4030-codec.h> 31#include <linux/mfd/twl4030-codec.h>
32#include <linux/mfd/core.h>
33#include <linux/input.h> 32#include <linux/input.h>
34#include <linux/slab.h> 33#include <linux/slab.h>
35 34
@@ -197,7 +196,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
197 196
198static int __devinit twl4030_vibra_probe(struct platform_device *pdev) 197static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
199{ 198{
200 struct twl4030_codec_vibra_data *pdata = mfd_get_data(pdev); 199 struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data;
201 struct vibra_info *info; 200 struct vibra_info *info;
202 int ret; 201 int ret;
203 202
diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
index d36a4c09e25d..0bbee7824d78 100644
--- a/drivers/isdn/hardware/eicon/divasfunc.c
+++ b/drivers/isdn/hardware/eicon/divasfunc.c
@@ -113,9 +113,8 @@ void diva_xdi_didd_remove_adapter(int card)
113static void start_dbg(void) 113static void start_dbg(void)
114{ 114{
115 DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT); 115 DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT);
116 DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s]-%s-%s)", 116 DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s])",
117 DIVA_BUILD, diva_xdi_common_code_build, __DATE__, 117 DIVA_BUILD, diva_xdi_common_code_build))
118 __TIME__))
119} 118}
120 119
121/* 120/*
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 54ae71a907f9..db25b6b2ae39 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1072,6 +1072,12 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1072 return -ENODEV; 1072 return -ENODEV;
1073 } 1073 }
1074 1074
1075 if (pdev->subsystem_vendor == 0xb100 &&
1076 pdev->subsystem_device == 0x0003 ) {
1077 pr_notice("Netjet: Digium TDM400P not handled yet\n");
1078 return -ENODEV;
1079 }
1080
1075 card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC); 1081 card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC);
1076 if (!card) { 1082 if (!card) {
1077 pr_info("No kmem for Netjet\n"); 1083 pr_info("No kmem for Netjet\n");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9bec8699b8a3..23f0d5e99f35 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -14,6 +14,13 @@ config LEDS_CLASS
14 This option enables the led sysfs class in /sys/class/leds. You'll 14 This option enables the led sysfs class in /sys/class/leds. You'll
15 need this to do anything useful with LEDs. If unsure, say N. 15 need this to do anything useful with LEDs. If unsure, say N.
16 16
17config LEDS_GPIO_REGISTER
18 bool
19 help
20 This option provides the function gpio_led_register_device.
21 As this function is used by arch code it must not be compiled as a
22 module.
23
17if NEW_LEDS 24if NEW_LEDS
18 25
19comment "LED drivers" 26comment "LED drivers"
@@ -115,13 +122,6 @@ config LEDS_ALIX2
115 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. 122 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
116 You have to set leds-alix2.force=1 for boards with Award BIOS. 123 You have to set leds-alix2.force=1 for boards with Award BIOS.
117 124
118config LEDS_H1940
119 tristate "LED Support for iPAQ H1940 device"
120 depends on LEDS_CLASS
121 depends on ARCH_H1940
122 help
123 This option enables support for the LEDs on the h1940.
124
125config LEDS_COBALT_QUBE 125config LEDS_COBALT_QUBE
126 tristate "LED Support for the Cobalt Qube series front LED" 126 tristate "LED Support for the Cobalt Qube series front LED"
127 depends on LEDS_CLASS 127 depends on LEDS_CLASS
@@ -162,6 +162,16 @@ config LEDS_PCA9532
162 LED controller. It is generally only useful 162 LED controller. It is generally only useful
163 as a platform driver 163 as a platform driver
164 164
165config LEDS_PCA9532_GPIO
166 bool "Enable GPIO support for PCA9532"
167 depends on LEDS_PCA9532
168 depends on GPIOLIB
169 help
170 Allow unused pins on PCA9532 to be used as gpio.
171
172 To use a pin as gpio pca9532_type in pca9532_platform data needs to
173 set to PCA9532_TYPE_GPIO.
174
165config LEDS_GPIO 175config LEDS_GPIO
166 tristate "LED Support for GPIO connected LEDs" 176 tristate "LED Support for GPIO connected LEDs"
167 depends on LEDS_CLASS 177 depends on LEDS_CLASS
@@ -379,6 +389,16 @@ config LEDS_NETXBIG
379 and 5Big Network v2 boards. The LEDs are wired to a CPLD and are 389 and 5Big Network v2 boards. The LEDs are wired to a CPLD and are
380 controlled through a GPIO extension bus. 390 controlled through a GPIO extension bus.
381 391
392config LEDS_ASIC3
393 bool "LED support for the HTC ASIC3"
394 depends on MFD_ASIC3
395 default y
396 help
397 This option enables support for the LEDs on the HTC ASIC3. The HTC
398 ASIC3 LED GPIOs are inputs, not outputs, thus the leds-gpio driver
399 cannot be used. This driver supports hardware blinking with an on+off
400 period from 62ms to 125s. Say Y to enable LEDs on the HP iPAQ hx4700.
401
382config LEDS_TRIGGERS 402config LEDS_TRIGGERS
383 bool "LED Trigger support" 403 bool "LED Trigger support"
384 depends on LEDS_CLASS 404 depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 39c80fca84d2..bbfd2e367dc0 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -17,11 +17,11 @@ obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
17obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o 17obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o
18obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o 18obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
19obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o 19obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o
20obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
21obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o 20obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
22obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o 21obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
23obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o 22obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
24obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o 23obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
24obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
25obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 25obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
26obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o 26obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
27obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o 27obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
@@ -42,6 +42,7 @@ obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o
42obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o 42obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
43obj-$(CONFIG_LEDS_NS2) += leds-ns2.o 43obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
44obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o 44obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
45obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
45 46
46# LED SPI Drivers 47# LED SPI Drivers
47obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o 48obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index d5a4ade88991..dc3d3d83191a 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -131,7 +131,8 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
131 if (!led_cdev->blink_brightness) 131 if (!led_cdev->blink_brightness)
132 led_cdev->blink_brightness = led_cdev->max_brightness; 132 led_cdev->blink_brightness = led_cdev->max_brightness;
133 133
134 if (delay_on == led_cdev->blink_delay_on && 134 if (led_get_trigger_data(led_cdev) &&
135 delay_on == led_cdev->blink_delay_on &&
135 delay_off == led_cdev->blink_delay_off) 136 delay_off == led_cdev->blink_delay_off)
136 return; 137 return;
137 138
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 416def84d045..0d4c16678ace 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -17,7 +17,6 @@
17#include <linux/leds.h> 17#include <linux/leds.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/mfd/core.h>
21#include <linux/mfd/88pm860x.h> 20#include <linux/mfd/88pm860x.h>
22 21
23#define LED_PWM_SHIFT (3) 22#define LED_PWM_SHIFT (3)
@@ -171,7 +170,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
171 struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); 170 struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
172 struct pm860x_led_pdata *pdata; 171 struct pm860x_led_pdata *pdata;
173 struct pm860x_led *data; 172 struct pm860x_led *data;
174 struct mfd_cell *cell;
175 struct resource *res; 173 struct resource *res;
176 int ret; 174 int ret;
177 175
@@ -181,10 +179,7 @@ static int pm860x_led_probe(struct platform_device *pdev)
181 return -EINVAL; 179 return -EINVAL;
182 } 180 }
183 181
184 cell = pdev->dev.platform_data; 182 pdata = pdev->dev.platform_data;
185 if (cell == NULL)
186 return -ENODEV;
187 pdata = cell->mfd_data;
188 if (pdata == NULL) { 183 if (pdata == NULL) {
189 dev_err(&pdev->dev, "No platform data!\n"); 184 dev_err(&pdev->dev, "No platform data!\n");
190 return -EINVAL; 185 return -EINVAL;
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
new file mode 100644
index 000000000000..22f847c890c9
--- /dev/null
+++ b/drivers/leds/leds-asic3.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright (C) 2011 Paul Parsons <lost.distance@yahoo.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/leds.h>
13#include <linux/slab.h>
14
15#include <linux/mfd/asic3.h>
16#include <linux/mfd/core.h>
17
18/*
19 * The HTC ASIC3 LED GPIOs are inputs, not outputs.
20 * Hence we turn the LEDs on/off via the TimeBase register.
21 */
22
23/*
24 * When TimeBase is 4 the clock resolution is about 32Hz.
25 * This driver supports hardware blinking with an on+off
26 * period from 62ms (2 clocks) to 125s (4000 clocks).
27 */
28#define MS_TO_CLK(ms) DIV_ROUND_CLOSEST(((ms)*1024), 32000)
29#define CLK_TO_MS(clk) (((clk)*32000)/1024)
30#define MAX_CLK 4000 /* Fits into 12-bit Time registers */
31#define MAX_MS CLK_TO_MS(MAX_CLK)
32
33static const unsigned int led_n_base[ASIC3_NUM_LEDS] = {
34 [0] = ASIC3_LED_0_Base,
35 [1] = ASIC3_LED_1_Base,
36 [2] = ASIC3_LED_2_Base,
37};
38
39static void brightness_set(struct led_classdev *cdev,
40 enum led_brightness value)
41{
42 struct platform_device *pdev = to_platform_device(cdev->dev->parent);
43 const struct mfd_cell *cell = mfd_get_cell(pdev);
44 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
45 u32 timebase;
46 unsigned int base;
47
48 timebase = (value == LED_OFF) ? 0 : (LED_EN|0x4);
49
50 base = led_n_base[cell->id];
51 asic3_write_register(asic, (base + ASIC3_LED_PeriodTime), 32);
52 asic3_write_register(asic, (base + ASIC3_LED_DutyTime), 32);
53 asic3_write_register(asic, (base + ASIC3_LED_AutoStopCount), 0);
54 asic3_write_register(asic, (base + ASIC3_LED_TimeBase), timebase);
55}
56
57static int blink_set(struct led_classdev *cdev,
58 unsigned long *delay_on,
59 unsigned long *delay_off)
60{
61 struct platform_device *pdev = to_platform_device(cdev->dev->parent);
62 const struct mfd_cell *cell = mfd_get_cell(pdev);
63 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
64 u32 on;
65 u32 off;
66 unsigned int base;
67
68 if (*delay_on > MAX_MS || *delay_off > MAX_MS)
69 return -EINVAL;
70
71 if (*delay_on == 0 && *delay_off == 0) {
72 /* If both are zero then a sensible default should be chosen */
73 on = MS_TO_CLK(500);
74 off = MS_TO_CLK(500);
75 } else {
76 on = MS_TO_CLK(*delay_on);
77 off = MS_TO_CLK(*delay_off);
78 if ((on + off) > MAX_CLK)
79 return -EINVAL;
80 }
81
82 base = led_n_base[cell->id];
83 asic3_write_register(asic, (base + ASIC3_LED_PeriodTime), (on + off));
84 asic3_write_register(asic, (base + ASIC3_LED_DutyTime), on);
85 asic3_write_register(asic, (base + ASIC3_LED_AutoStopCount), 0);
86 asic3_write_register(asic, (base + ASIC3_LED_TimeBase), (LED_EN|0x4));
87
88 *delay_on = CLK_TO_MS(on);
89 *delay_off = CLK_TO_MS(off);
90
91 return 0;
92}
93
94static int __devinit asic3_led_probe(struct platform_device *pdev)
95{
96 struct asic3_led *led = pdev->dev.platform_data;
97 int ret;
98
99 ret = mfd_cell_enable(pdev);
100 if (ret < 0)
101 goto ret0;
102
103 led->cdev = kzalloc(sizeof(struct led_classdev), GFP_KERNEL);
104 if (!led->cdev) {
105 ret = -ENOMEM;
106 goto ret1;
107 }
108
109 led->cdev->name = led->name;
110 led->cdev->default_trigger = led->default_trigger;
111 led->cdev->brightness_set = brightness_set;
112 led->cdev->blink_set = blink_set;
113
114 ret = led_classdev_register(&pdev->dev, led->cdev);
115 if (ret < 0)
116 goto ret2;
117
118 return 0;
119
120ret2:
121 kfree(led->cdev);
122ret1:
123 (void) mfd_cell_disable(pdev);
124ret0:
125 return ret;
126}
127
128static int __devexit asic3_led_remove(struct platform_device *pdev)
129{
130 struct asic3_led *led = pdev->dev.platform_data;
131
132 led_classdev_unregister(led->cdev);
133
134 kfree(led->cdev);
135
136 return mfd_cell_disable(pdev);
137}
138
139static struct platform_driver asic3_led_driver = {
140 .probe = asic3_led_probe,
141 .remove = __devexit_p(asic3_led_remove),
142 .driver = {
143 .name = "leds-asic3",
144 .owner = THIS_MODULE,
145 },
146};
147
148MODULE_ALIAS("platform:leds-asic3");
149
150static int __init asic3_led_init(void)
151{
152 return platform_driver_register(&asic3_led_driver);
153}
154
155static void __exit asic3_led_exit(void)
156{
157 platform_driver_unregister(&asic3_led_driver);
158}
159
160module_init(asic3_led_init);
161module_exit(asic3_led_exit);
162
163MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
164MODULE_DESCRIPTION("HTC ASIC3 LED driver");
165MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-gpio-register.c b/drivers/leds/leds-gpio-register.c
new file mode 100644
index 000000000000..1c4ed5510f35
--- /dev/null
+++ b/drivers/leds/leds-gpio-register.c
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2011 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <linux/err.h>
10#include <linux/platform_device.h>
11#include <linux/slab.h>
12#include <linux/leds.h>
13
14/**
15 * gpio_led_register_device - register a gpio-led device
16 * @pdata: the platform data used for the new device
17 *
18 * Makes a copy of pdata and pdata->leds and registers a new leds-gpio device
19 * with the result. This allows to have pdata and pdata-leds in .init.rodata
20 * and so saves some bytes compared to a static struct platform_device with
21 * static platform data.
22 *
23 * Returns the registered device or an error pointer.
24 */
25struct platform_device *__init gpio_led_register_device(
26 int id, const struct gpio_led_platform_data *pdata)
27{
28 struct platform_device *ret;
29 struct gpio_led_platform_data _pdata = *pdata;
30
31 _pdata.leds = kmemdup(pdata->leds,
32 pdata->num_leds * sizeof(*pdata->leds), GFP_KERNEL);
33 if (!_pdata.leds)
34 return ERR_PTR(-ENOMEM);
35
36 ret = platform_device_register_resndata(NULL, "leds-gpio", id,
37 NULL, 0, &_pdata, sizeof(_pdata));
38 if (IS_ERR(ret))
39 kfree(_pdata.leds);
40
41 return ret;
42}
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
deleted file mode 100644
index 173d104d9ff2..000000000000
--- a/drivers/leds/leds-h1940.c
+++ /dev/null
@@ -1,170 +0,0 @@
1/*
2 * drivers/leds/leds-h1940.c
3 * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org>
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file COPYING in the main directory of this archive for
7 * more details.
8 *
9 * H1940 leds driver
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/delay.h>
16#include <linux/string.h>
17#include <linux/ctype.h>
18#include <linux/leds.h>
19#include <linux/gpio.h>
20
21#include <mach/regs-gpio.h>
22#include <mach/hardware.h>
23#include <mach/h1940-latch.h>
24
25/*
26 * Green led.
27 */
28static void h1940_greenled_set(struct led_classdev *led_dev,
29 enum led_brightness value)
30{
31 switch (value) {
32 case LED_HALF:
33 h1940_latch_control(0, H1940_LATCH_LED_FLASH);
34 s3c2410_gpio_setpin(S3C2410_GPA7, 1);
35 break;
36 case LED_FULL:
37 h1940_latch_control(0, H1940_LATCH_LED_GREEN);
38 s3c2410_gpio_setpin(S3C2410_GPA7, 1);
39 break;
40 default:
41 case LED_OFF:
42 h1940_latch_control(H1940_LATCH_LED_FLASH, 0);
43 h1940_latch_control(H1940_LATCH_LED_GREEN, 0);
44 s3c2410_gpio_setpin(S3C2410_GPA7, 0);
45 break;
46 }
47}
48
49static struct led_classdev h1940_greenled = {
50 .name = "h1940:green",
51 .brightness_set = h1940_greenled_set,
52 .default_trigger = "h1940-charger",
53};
54
55/*
56 * Red led.
57 */
58static void h1940_redled_set(struct led_classdev *led_dev,
59 enum led_brightness value)
60{
61 switch (value) {
62 case LED_HALF:
63 h1940_latch_control(0, H1940_LATCH_LED_FLASH);
64 s3c2410_gpio_setpin(S3C2410_GPA1, 1);
65 break;
66 case LED_FULL:
67 h1940_latch_control(0, H1940_LATCH_LED_RED);
68 s3c2410_gpio_setpin(S3C2410_GPA1, 1);
69 break;
70 default:
71 case LED_OFF:
72 h1940_latch_control(H1940_LATCH_LED_FLASH, 0);
73 h1940_latch_control(H1940_LATCH_LED_RED, 0);
74 s3c2410_gpio_setpin(S3C2410_GPA1, 0);
75 break;
76 }
77}
78
79static struct led_classdev h1940_redled = {
80 .name = "h1940:red",
81 .brightness_set = h1940_redled_set,
82 .default_trigger = "h1940-charger",
83};
84
85/*
86 * Blue led.
87 * (it can only be blue flashing led)
88 */
89static void h1940_blueled_set(struct led_classdev *led_dev,
90 enum led_brightness value)
91{
92 if (value) {
93 /* flashing Blue */
94 h1940_latch_control(0, H1940_LATCH_LED_FLASH);
95 s3c2410_gpio_setpin(S3C2410_GPA3, 1);
96 } else {
97 h1940_latch_control(H1940_LATCH_LED_FLASH, 0);
98 s3c2410_gpio_setpin(S3C2410_GPA3, 0);
99 }
100
101}
102
103static struct led_classdev h1940_blueled = {
104 .name = "h1940:blue",
105 .brightness_set = h1940_blueled_set,
106 .default_trigger = "h1940-bluetooth",
107};
108
109static int __devinit h1940leds_probe(struct platform_device *pdev)
110{
111 int ret;
112
113 ret = led_classdev_register(&pdev->dev, &h1940_greenled);
114 if (ret)
115 goto err_green;
116
117 ret = led_classdev_register(&pdev->dev, &h1940_redled);
118 if (ret)
119 goto err_red;
120
121 ret = led_classdev_register(&pdev->dev, &h1940_blueled);
122 if (ret)
123 goto err_blue;
124
125 return 0;
126
127err_blue:
128 led_classdev_unregister(&h1940_redled);
129err_red:
130 led_classdev_unregister(&h1940_greenled);
131err_green:
132 return ret;
133}
134
135static int h1940leds_remove(struct platform_device *pdev)
136{
137 led_classdev_unregister(&h1940_greenled);
138 led_classdev_unregister(&h1940_redled);
139 led_classdev_unregister(&h1940_blueled);
140 return 0;
141}
142
143
144static struct platform_driver h1940leds_driver = {
145 .driver = {
146 .name = "h1940-leds",
147 .owner = THIS_MODULE,
148 },
149 .probe = h1940leds_probe,
150 .remove = h1940leds_remove,
151};
152
153
154static int __init h1940leds_init(void)
155{
156 return platform_driver_register(&h1940leds_driver);
157}
158
159static void __exit h1940leds_exit(void)
160{
161 platform_driver_unregister(&h1940leds_driver);
162}
163
164module_init(h1940leds_init);
165module_exit(h1940leds_exit);
166
167MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
168MODULE_DESCRIPTION("LED driver for the iPAQ H1940");
169MODULE_LICENSE("GPL");
170MODULE_ALIAS("platform:h1940-leds");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index b37e6186d0fa..4d7ce7631acf 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -17,6 +17,7 @@
17#include <linux/input.h> 17#include <linux/input.h>
18#include <linux/led-lm3530.h> 18#include <linux/led-lm3530.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/regulator/consumer.h>
20 21
21#define LM3530_LED_DEV "lcd-backlight" 22#define LM3530_LED_DEV "lcd-backlight"
22#define LM3530_NAME "lm3530-led" 23#define LM3530_NAME "lm3530-led"
@@ -96,12 +97,18 @@ static struct lm3530_mode_map mode_map[] = {
96 * @client: i2c client 97 * @client: i2c client
97 * @pdata: LM3530 platform data 98 * @pdata: LM3530 platform data
98 * @mode: mode of operation - manual, ALS, PWM 99 * @mode: mode of operation - manual, ALS, PWM
100 * @regulator: regulator
101 * @brighness: previous brightness value
102 * @enable: regulator is enabled
99 */ 103 */
100struct lm3530_data { 104struct lm3530_data {
101 struct led_classdev led_dev; 105 struct led_classdev led_dev;
102 struct i2c_client *client; 106 struct i2c_client *client;
103 struct lm3530_platform_data *pdata; 107 struct lm3530_platform_data *pdata;
104 enum lm3530_mode mode; 108 enum lm3530_mode mode;
109 struct regulator *regulator;
110 enum led_brightness brightness;
111 bool enable;
105}; 112};
106 113
107static const u8 lm3530_reg[LM3530_REG_MAX] = { 114static const u8 lm3530_reg[LM3530_REG_MAX] = {
@@ -172,7 +179,10 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
172 brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) | 179 brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
173 (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT); 180 (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
174 181
175 brightness = pltfm->brt_val; 182 if (drvdata->brightness)
183 brightness = drvdata->brightness;
184 else
185 brightness = drvdata->brightness = pltfm->brt_val;
176 186
177 reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */ 187 reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
178 reg_val[1] = als_config; /* LM3530_ALS_CONFIG */ 188 reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
@@ -190,6 +200,16 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
190 reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */ 200 reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
191 reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */ 201 reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
192 202
203 if (!drvdata->enable) {
204 ret = regulator_enable(drvdata->regulator);
205 if (ret) {
206 dev_err(&drvdata->client->dev,
207 "Enable regulator failed\n");
208 return ret;
209 }
210 drvdata->enable = true;
211 }
212
193 for (i = 0; i < LM3530_REG_MAX; i++) { 213 for (i = 0; i < LM3530_REG_MAX; i++) {
194 ret = i2c_smbus_write_byte_data(client, 214 ret = i2c_smbus_write_byte_data(client,
195 lm3530_reg[i], reg_val[i]); 215 lm3530_reg[i], reg_val[i]);
@@ -210,12 +230,31 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
210 switch (drvdata->mode) { 230 switch (drvdata->mode) {
211 case LM3530_BL_MODE_MANUAL: 231 case LM3530_BL_MODE_MANUAL:
212 232
233 if (!drvdata->enable) {
234 err = lm3530_init_registers(drvdata);
235 if (err) {
236 dev_err(&drvdata->client->dev,
237 "Register Init failed: %d\n", err);
238 break;
239 }
240 }
241
213 /* set the brightness in brightness control register*/ 242 /* set the brightness in brightness control register*/
214 err = i2c_smbus_write_byte_data(drvdata->client, 243 err = i2c_smbus_write_byte_data(drvdata->client,
215 LM3530_BRT_CTRL_REG, brt_val / 2); 244 LM3530_BRT_CTRL_REG, brt_val / 2);
216 if (err) 245 if (err)
217 dev_err(&drvdata->client->dev, 246 dev_err(&drvdata->client->dev,
218 "Unable to set brightness: %d\n", err); 247 "Unable to set brightness: %d\n", err);
248 else
249 drvdata->brightness = brt_val / 2;
250
251 if (brt_val == 0) {
252 err = regulator_disable(drvdata->regulator);
253 if (err)
254 dev_err(&drvdata->client->dev,
255 "Disable regulator failed\n");
256 drvdata->enable = false;
257 }
219 break; 258 break;
220 case LM3530_BL_MODE_ALS: 259 case LM3530_BL_MODE_ALS:
221 break; 260 break;
@@ -297,20 +336,31 @@ static int __devinit lm3530_probe(struct i2c_client *client,
297 drvdata->mode = pdata->mode; 336 drvdata->mode = pdata->mode;
298 drvdata->client = client; 337 drvdata->client = client;
299 drvdata->pdata = pdata; 338 drvdata->pdata = pdata;
339 drvdata->brightness = LED_OFF;
340 drvdata->enable = false;
300 drvdata->led_dev.name = LM3530_LED_DEV; 341 drvdata->led_dev.name = LM3530_LED_DEV;
301 drvdata->led_dev.brightness_set = lm3530_brightness_set; 342 drvdata->led_dev.brightness_set = lm3530_brightness_set;
302 343
303 i2c_set_clientdata(client, drvdata); 344 i2c_set_clientdata(client, drvdata);
304 345
305 err = lm3530_init_registers(drvdata); 346 drvdata->regulator = regulator_get(&client->dev, "vin");
306 if (err < 0) { 347 if (IS_ERR(drvdata->regulator)) {
307 dev_err(&client->dev, "Register Init failed: %d\n", err); 348 dev_err(&client->dev, "regulator get failed\n");
308 err = -ENODEV; 349 err = PTR_ERR(drvdata->regulator);
309 goto err_reg_init; 350 drvdata->regulator = NULL;
351 goto err_regulator_get;
310 } 352 }
311 353
312 err = led_classdev_register((struct device *) 354 if (drvdata->pdata->brt_val) {
313 &client->dev, &drvdata->led_dev); 355 err = lm3530_init_registers(drvdata);
356 if (err < 0) {
357 dev_err(&client->dev,
358 "Register Init failed: %d\n", err);
359 err = -ENODEV;
360 goto err_reg_init;
361 }
362 }
363 err = led_classdev_register(&client->dev, &drvdata->led_dev);
314 if (err < 0) { 364 if (err < 0) {
315 dev_err(&client->dev, "Register led class failed: %d\n", err); 365 dev_err(&client->dev, "Register led class failed: %d\n", err);
316 err = -ENODEV; 366 err = -ENODEV;
@@ -330,6 +380,9 @@ err_create_file:
330 led_classdev_unregister(&drvdata->led_dev); 380 led_classdev_unregister(&drvdata->led_dev);
331err_class_register: 381err_class_register:
332err_reg_init: 382err_reg_init:
383 regulator_put(drvdata->regulator);
384err_regulator_get:
385 i2c_set_clientdata(client, NULL);
333 kfree(drvdata); 386 kfree(drvdata);
334err_out: 387err_out:
335 return err; 388 return err;
@@ -340,6 +393,10 @@ static int __devexit lm3530_remove(struct i2c_client *client)
340 struct lm3530_data *drvdata = i2c_get_clientdata(client); 393 struct lm3530_data *drvdata = i2c_get_clientdata(client);
341 394
342 device_remove_file(drvdata->led_dev.dev, &dev_attr_mode); 395 device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
396
397 if (drvdata->enable)
398 regulator_disable(drvdata->regulator);
399 regulator_put(drvdata->regulator);
343 led_classdev_unregister(&drvdata->led_dev); 400 led_classdev_unregister(&drvdata->led_dev);
344 kfree(drvdata); 401 kfree(drvdata);
345 return 0; 402 return 0;
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 126ca7955f6e..f369e56d6547 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -22,7 +22,6 @@
22#include <linux/leds.h> 22#include <linux/leds.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/mfd/mc13783.h> 24#include <linux/mfd/mc13783.h>
25#include <linux/mfd/core.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27 26
28struct mc13783_led { 27struct mc13783_led {
@@ -184,7 +183,7 @@ static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
184 183
185static int __devinit mc13783_leds_prepare(struct platform_device *pdev) 184static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
186{ 185{
187 struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev); 186 struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
188 struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); 187 struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
189 int ret = 0; 188 int ret = 0;
190 int reg = 0; 189 int reg = 0;
@@ -265,7 +264,7 @@ out:
265 264
266static int __devinit mc13783_led_probe(struct platform_device *pdev) 265static int __devinit mc13783_led_probe(struct platform_device *pdev)
267{ 266{
268 struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev); 267 struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
269 struct mc13783_led_platform_data *led_cur; 268 struct mc13783_led_platform_data *led_cur;
270 struct mc13783_led *led, *led_dat; 269 struct mc13783_led *led, *led_dat;
271 int ret, i; 270 int ret, i;
@@ -352,7 +351,7 @@ err_free:
352 351
353static int __devexit mc13783_led_remove(struct platform_device *pdev) 352static int __devexit mc13783_led_remove(struct platform_device *pdev)
354{ 353{
355 struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev); 354 struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
356 struct mc13783_led *led = platform_get_drvdata(pdev); 355 struct mc13783_led *led = platform_get_drvdata(pdev);
357 struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); 356 struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
358 int i; 357 int i;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 5bf63af09ddf..d8d3a1e910a1 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -1,13 +1,14 @@
1/* 1/*
2 * pca9532.c - 16-bit Led dimmer 2 * pca9532.c - 16-bit Led dimmer
3 * 3 *
4 * Copyright (C) 2011 Jan Weitzel
4 * Copyright (C) 2008 Riku Voipio 5 * Copyright (C) 2008 Riku Voipio
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License. 9 * the Free Software Foundation; version 2 of the License.
9 * 10 *
10 * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf 11 * Datasheet: http://www.nxp.com/documents/data_sheet/PCA9532.pdf
11 * 12 *
12 */ 13 */
13 14
@@ -19,21 +20,32 @@
19#include <linux/mutex.h> 20#include <linux/mutex.h>
20#include <linux/workqueue.h> 21#include <linux/workqueue.h>
21#include <linux/leds-pca9532.h> 22#include <linux/leds-pca9532.h>
23#include <linux/gpio.h>
22 24
23#define PCA9532_REG_PSC(i) (0x2+(i)*2) 25/* m = num_leds*/
24#define PCA9532_REG_PWM(i) (0x3+(i)*2) 26#define PCA9532_REG_INPUT(i) ((i) >> 3)
25#define PCA9532_REG_LS0 0x6 27#define PCA9532_REG_OFFSET(m) ((m) >> 4)
26#define LED_REG(led) ((led>>2)+PCA9532_REG_LS0) 28#define PCA9532_REG_PSC(m, i) (PCA9532_REG_OFFSET(m) + 0x1 + (i) * 2)
27#define LED_NUM(led) (led & 0x3) 29#define PCA9532_REG_PWM(m, i) (PCA9532_REG_OFFSET(m) + 0x2 + (i) * 2)
30#define LED_REG(m, led) (PCA9532_REG_OFFSET(m) + 0x5 + (led >> 2))
31#define LED_NUM(led) (led & 0x3)
28 32
29#define ldev_to_led(c) container_of(c, struct pca9532_led, ldev) 33#define ldev_to_led(c) container_of(c, struct pca9532_led, ldev)
30 34
35struct pca9532_chip_info {
36 u8 num_leds;
37};
38
31struct pca9532_data { 39struct pca9532_data {
32 struct i2c_client *client; 40 struct i2c_client *client;
33 struct pca9532_led leds[16]; 41 struct pca9532_led leds[16];
34 struct mutex update_lock; 42 struct mutex update_lock;
35 struct input_dev *idev; 43 struct input_dev *idev;
36 struct work_struct work; 44 struct work_struct work;
45#ifdef CONFIG_LEDS_PCA9532_GPIO
46 struct gpio_chip gpio;
47#endif
48 const struct pca9532_chip_info *chip_info;
37 u8 pwm[2]; 49 u8 pwm[2];
38 u8 psc[2]; 50 u8 psc[2];
39}; 51};
@@ -42,16 +54,41 @@ static int pca9532_probe(struct i2c_client *client,
42 const struct i2c_device_id *id); 54 const struct i2c_device_id *id);
43static int pca9532_remove(struct i2c_client *client); 55static int pca9532_remove(struct i2c_client *client);
44 56
57enum {
58 pca9530,
59 pca9531,
60 pca9532,
61 pca9533,
62};
63
45static const struct i2c_device_id pca9532_id[] = { 64static const struct i2c_device_id pca9532_id[] = {
46 { "pca9532", 0 }, 65 { "pca9530", pca9530 },
66 { "pca9531", pca9531 },
67 { "pca9532", pca9532 },
68 { "pca9533", pca9533 },
47 { } 69 { }
48}; 70};
49 71
50MODULE_DEVICE_TABLE(i2c, pca9532_id); 72MODULE_DEVICE_TABLE(i2c, pca9532_id);
51 73
74static const struct pca9532_chip_info pca9532_chip_info_tbl[] = {
75 [pca9530] = {
76 .num_leds = 2,
77 },
78 [pca9531] = {
79 .num_leds = 8,
80 },
81 [pca9532] = {
82 .num_leds = 16,
83 },
84 [pca9533] = {
85 .num_leds = 4,
86 },
87};
88
52static struct i2c_driver pca9532_driver = { 89static struct i2c_driver pca9532_driver = {
53 .driver = { 90 .driver = {
54 .name = "pca9532", 91 .name = "pca953x",
55 }, 92 },
56 .probe = pca9532_probe, 93 .probe = pca9532_probe,
57 .remove = pca9532_remove, 94 .remove = pca9532_remove,
@@ -68,7 +105,7 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
68{ 105{
69 int a = 0, b = 0, i = 0; 106 int a = 0, b = 0, i = 0;
70 struct pca9532_data *data = i2c_get_clientdata(client); 107 struct pca9532_data *data = i2c_get_clientdata(client);
71 for (i = 0; i < 16; i++) { 108 for (i = 0; i < data->chip_info->num_leds; i++) {
72 if (data->leds[i].type == PCA9532_TYPE_LED && 109 if (data->leds[i].type == PCA9532_TYPE_LED &&
73 data->leds[i].state == PCA9532_PWM0+pwm) { 110 data->leds[i].state == PCA9532_PWM0+pwm) {
74 a++; 111 a++;
@@ -92,10 +129,12 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
92static int pca9532_setpwm(struct i2c_client *client, int pwm) 129static int pca9532_setpwm(struct i2c_client *client, int pwm)
93{ 130{
94 struct pca9532_data *data = i2c_get_clientdata(client); 131 struct pca9532_data *data = i2c_get_clientdata(client);
132 u8 maxleds = data->chip_info->num_leds;
133
95 mutex_lock(&data->update_lock); 134 mutex_lock(&data->update_lock);
96 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), 135 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(maxleds, pwm),
97 data->pwm[pwm]); 136 data->pwm[pwm]);
98 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), 137 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(maxleds, pwm),
99 data->psc[pwm]); 138 data->psc[pwm]);
100 mutex_unlock(&data->update_lock); 139 mutex_unlock(&data->update_lock);
101 return 0; 140 return 0;
@@ -106,15 +145,16 @@ static void pca9532_setled(struct pca9532_led *led)
106{ 145{
107 struct i2c_client *client = led->client; 146 struct i2c_client *client = led->client;
108 struct pca9532_data *data = i2c_get_clientdata(client); 147 struct pca9532_data *data = i2c_get_clientdata(client);
148 u8 maxleds = data->chip_info->num_leds;
109 char reg; 149 char reg;
110 150
111 mutex_lock(&data->update_lock); 151 mutex_lock(&data->update_lock);
112 reg = i2c_smbus_read_byte_data(client, LED_REG(led->id)); 152 reg = i2c_smbus_read_byte_data(client, LED_REG(maxleds, led->id));
113 /* zero led bits */ 153 /* zero led bits */
114 reg = reg & ~(0x3<<LED_NUM(led->id)*2); 154 reg = reg & ~(0x3<<LED_NUM(led->id)*2);
115 /* set the new value */ 155 /* set the new value */
116 reg = reg | (led->state << LED_NUM(led->id)*2); 156 reg = reg | (led->state << LED_NUM(led->id)*2);
117 i2c_smbus_write_byte_data(client, LED_REG(led->id), reg); 157 i2c_smbus_write_byte_data(client, LED_REG(maxleds, led->id), reg);
118 mutex_unlock(&data->update_lock); 158 mutex_unlock(&data->update_lock);
119} 159}
120 160
@@ -183,10 +223,12 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
183 223
184static void pca9532_input_work(struct work_struct *work) 224static void pca9532_input_work(struct work_struct *work)
185{ 225{
186 struct pca9532_data *data; 226 struct pca9532_data *data =
187 data = container_of(work, struct pca9532_data, work); 227 container_of(work, struct pca9532_data, work);
228 u8 maxleds = data->chip_info->num_leds;
229
188 mutex_lock(&data->update_lock); 230 mutex_lock(&data->update_lock);
189 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), 231 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(maxleds, 1),
190 data->pwm[1]); 232 data->pwm[1]);
191 mutex_unlock(&data->update_lock); 233 mutex_unlock(&data->update_lock);
192} 234}
@@ -200,16 +242,68 @@ static void pca9532_led_work(struct work_struct *work)
200 pca9532_setled(led); 242 pca9532_setled(led);
201} 243}
202 244
203static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs) 245#ifdef CONFIG_LEDS_PCA9532_GPIO
246static int pca9532_gpio_request_pin(struct gpio_chip *gc, unsigned offset)
247{
248 struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio);
249 struct pca9532_led *led = &data->leds[offset];
250
251 if (led->type == PCA9532_TYPE_GPIO)
252 return 0;
253
254 return -EBUSY;
255}
256
257static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int val)
258{
259 struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio);
260 struct pca9532_led *led = &data->leds[offset];
261
262 if (val)
263 led->state = PCA9532_ON;
264 else
265 led->state = PCA9532_OFF;
266
267 pca9532_setled(led);
268}
269
270static int pca9532_gpio_get_value(struct gpio_chip *gc, unsigned offset)
271{
272 struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio);
273 unsigned char reg;
274
275 reg = i2c_smbus_read_byte_data(data->client, PCA9532_REG_INPUT(offset));
276
277 return !!(reg & (1 << (offset % 8)));
278}
279
280static int pca9532_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
281{
282 /* To use as input ensure pin is not driven */
283 pca9532_gpio_set_value(gc, offset, 0);
284
285 return 0;
286}
287
288static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int val)
289{
290 pca9532_gpio_set_value(gc, offset, val);
291
292 return 0;
293}
294#endif /* CONFIG_LEDS_PCA9532_GPIO */
295
296static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
204{ 297{
205 int i = n_devs; 298 int i = n_devs;
206 299
207 if (!data) 300 if (!data)
208 return; 301 return -EINVAL;
209 302
210 while (--i >= 0) { 303 while (--i >= 0) {
211 switch (data->leds[i].type) { 304 switch (data->leds[i].type) {
212 case PCA9532_TYPE_NONE: 305 case PCA9532_TYPE_NONE:
306 case PCA9532_TYPE_GPIO:
213 break; 307 break;
214 case PCA9532_TYPE_LED: 308 case PCA9532_TYPE_LED:
215 led_classdev_unregister(&data->leds[i].ldev); 309 led_classdev_unregister(&data->leds[i].ldev);
@@ -224,23 +318,38 @@ static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
224 break; 318 break;
225 } 319 }
226 } 320 }
321
322#ifdef CONFIG_LEDS_PCA9532_GPIO
323 if (data->gpio.dev) {
324 int err = gpiochip_remove(&data->gpio);
325 if (err) {
326 dev_err(&data->client->dev, "%s failed, %d\n",
327 "gpiochip_remove()", err);
328 return err;
329 }
330 }
331#endif
332
333 return 0;
227} 334}
228 335
229static int pca9532_configure(struct i2c_client *client, 336static int pca9532_configure(struct i2c_client *client,
230 struct pca9532_data *data, struct pca9532_platform_data *pdata) 337 struct pca9532_data *data, struct pca9532_platform_data *pdata)
231{ 338{
232 int i, err = 0; 339 int i, err = 0;
340 int gpios = 0;
341 u8 maxleds = data->chip_info->num_leds;
233 342
234 for (i = 0; i < 2; i++) { 343 for (i = 0; i < 2; i++) {
235 data->pwm[i] = pdata->pwm[i]; 344 data->pwm[i] = pdata->pwm[i];
236 data->psc[i] = pdata->psc[i]; 345 data->psc[i] = pdata->psc[i];
237 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(i), 346 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(maxleds, i),
238 data->pwm[i]); 347 data->pwm[i]);
239 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(i), 348 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(maxleds, i),
240 data->psc[i]); 349 data->psc[i]);
241 } 350 }
242 351
243 for (i = 0; i < 16; i++) { 352 for (i = 0; i < data->chip_info->num_leds; i++) {
244 struct pca9532_led *led = &data->leds[i]; 353 struct pca9532_led *led = &data->leds[i];
245 struct pca9532_led *pled = &pdata->leds[i]; 354 struct pca9532_led *pled = &pdata->leds[i];
246 led->client = client; 355 led->client = client;
@@ -249,6 +358,9 @@ static int pca9532_configure(struct i2c_client *client,
249 switch (led->type) { 358 switch (led->type) {
250 case PCA9532_TYPE_NONE: 359 case PCA9532_TYPE_NONE:
251 break; 360 break;
361 case PCA9532_TYPE_GPIO:
362 gpios++;
363 break;
252 case PCA9532_TYPE_LED: 364 case PCA9532_TYPE_LED:
253 led->state = pled->state; 365 led->state = pled->state;
254 led->name = pled->name; 366 led->name = pled->name;
@@ -297,6 +409,34 @@ static int pca9532_configure(struct i2c_client *client,
297 break; 409 break;
298 } 410 }
299 } 411 }
412
413#ifdef CONFIG_LEDS_PCA9532_GPIO
414 if (gpios) {
415 data->gpio.label = "gpio-pca9532";
416 data->gpio.direction_input = pca9532_gpio_direction_input;
417 data->gpio.direction_output = pca9532_gpio_direction_output;
418 data->gpio.set = pca9532_gpio_set_value;
419 data->gpio.get = pca9532_gpio_get_value;
420 data->gpio.request = pca9532_gpio_request_pin;
421 data->gpio.can_sleep = 1;
422 data->gpio.base = pdata->gpio_base;
423 data->gpio.ngpio = data->chip_info->num_leds;
424 data->gpio.dev = &client->dev;
425 data->gpio.owner = THIS_MODULE;
426
427 err = gpiochip_add(&data->gpio);
428 if (err) {
429 /* Use data->gpio.dev as a flag for freeing gpiochip */
430 data->gpio.dev = NULL;
431 dev_warn(&client->dev, "could not add gpiochip\n");
432 } else {
433 dev_info(&client->dev, "gpios %i...%i\n",
434 data->gpio.base, data->gpio.base +
435 data->gpio.ngpio - 1);
436 }
437 }
438#endif
439
300 return 0; 440 return 0;
301 441
302exit: 442exit:
@@ -322,6 +462,8 @@ static int pca9532_probe(struct i2c_client *client,
322 if (!data) 462 if (!data)
323 return -ENOMEM; 463 return -ENOMEM;
324 464
465 data->chip_info = &pca9532_chip_info_tbl[id->driver_data];
466
325 dev_info(&client->dev, "setting platform data\n"); 467 dev_info(&client->dev, "setting platform data\n");
326 i2c_set_clientdata(client, data); 468 i2c_set_clientdata(client, data);
327 data->client = client; 469 data->client = client;
@@ -337,7 +479,12 @@ static int pca9532_probe(struct i2c_client *client,
337static int pca9532_remove(struct i2c_client *client) 479static int pca9532_remove(struct i2c_client *client)
338{ 480{
339 struct pca9532_data *data = i2c_get_clientdata(client); 481 struct pca9532_data *data = i2c_get_clientdata(client);
340 pca9532_destroy_devices(data, 16); 482 int err;
483
484 err = pca9532_destroy_devices(data, data->chip_info->num_leds);
485 if (err)
486 return err;
487
341 kfree(data); 488 kfree(data);
342 return 0; 489 return 0;
343} 490}
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 2dd8ecbfdc31..e77c7f8dcdd4 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -40,10 +40,17 @@ void led_trigger_set_default(struct led_classdev *led_cdev);
40void led_trigger_set(struct led_classdev *led_cdev, 40void led_trigger_set(struct led_classdev *led_cdev,
41 struct led_trigger *trigger); 41 struct led_trigger *trigger);
42void led_trigger_remove(struct led_classdev *led_cdev); 42void led_trigger_remove(struct led_classdev *led_cdev);
43
44static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
45{
46 return led_cdev->trigger_data;
47}
48
43#else 49#else
44#define led_trigger_set_default(x) do {} while (0) 50#define led_trigger_set_default(x) do {} while (0)
45#define led_trigger_set(x, y) do {} while (0) 51#define led_trigger_set(x, y) do {} while (0)
46#define led_trigger_remove(x) do {} while (0) 52#define led_trigger_remove(x) do {} while (0)
53#define led_get_trigger_data(x) (NULL)
47#endif 54#endif
48 55
49ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, 56ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index b09bcbeade9c..d87c9d02f786 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -91,6 +91,9 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
91 if (rc) 91 if (rc)
92 goto err_out_delayon; 92 goto err_out_delayon;
93 93
94 led_blink_set(led_cdev, &led_cdev->blink_delay_on,
95 &led_cdev->blink_delay_off);
96
94 led_cdev->trigger_data = (void *)1; 97 led_cdev->trigger_data = (void *)1;
95 98
96 return; 99 return;
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 2d8b4044be36..b2b0c45f32a9 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include <linux/i2c.h> 22#include <linux/i2c.h>
23#include <linux/i2c-algo-bit.h>
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/module.h> 26#include <linux/module.h>
@@ -49,11 +50,12 @@
49 50
50#define UNSET (-1U) 51#define UNSET (-1U)
51 52
52#define DM1105_BOARD_NOAUTO UNSET 53#define DM1105_BOARD_NOAUTO UNSET
53#define DM1105_BOARD_UNKNOWN 0 54#define DM1105_BOARD_UNKNOWN 0
54#define DM1105_BOARD_DVBWORLD_2002 1 55#define DM1105_BOARD_DVBWORLD_2002 1
55#define DM1105_BOARD_DVBWORLD_2004 2 56#define DM1105_BOARD_DVBWORLD_2004 2
56#define DM1105_BOARD_AXESS_DM05 3 57#define DM1105_BOARD_AXESS_DM05 3
58#define DM1105_BOARD_UNBRANDED_I2C_ON_GPIO 4
57 59
58/* ----------------------------------------------- */ 60/* ----------------------------------------------- */
59/* 61/*
@@ -157,22 +159,38 @@
157#define DM1105_MAX 0x04 159#define DM1105_MAX 0x04
158 160
159#define DRIVER_NAME "dm1105" 161#define DRIVER_NAME "dm1105"
162#define DM1105_I2C_GPIO_NAME "dm1105-gpio"
160 163
161#define DM1105_DMA_PACKETS 47 164#define DM1105_DMA_PACKETS 47
162#define DM1105_DMA_PACKET_LENGTH (128*4) 165#define DM1105_DMA_PACKET_LENGTH (128*4)
163#define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS) 166#define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS)
164 167
168/* */
169#define GPIO08 (1 << 8)
170#define GPIO13 (1 << 13)
171#define GPIO14 (1 << 14)
172#define GPIO15 (1 << 15)
173#define GPIO16 (1 << 16)
174#define GPIO17 (1 << 17)
175#define GPIO_ALL 0x03ffff
176
165/* GPIO's for LNB power control */ 177/* GPIO's for LNB power control */
166#define DM1105_LNB_MASK 0x00000000 178#define DM1105_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13))
167#define DM1105_LNB_OFF 0x00020000 179#define DM1105_LNB_OFF GPIO17
168#define DM1105_LNB_13V 0x00010100 180#define DM1105_LNB_13V (GPIO16 | GPIO08)
169#define DM1105_LNB_18V 0x00000100 181#define DM1105_LNB_18V GPIO08
170 182
171/* GPIO's for LNB power control for Axess DM05 */ 183/* GPIO's for LNB power control for Axess DM05 */
172#define DM05_LNB_MASK 0x00000000 184#define DM05_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13))
173#define DM05_LNB_OFF 0x00020000/* actually 13v */ 185#define DM05_LNB_OFF GPIO17/* actually 13v */
174#define DM05_LNB_13V 0x00020000 186#define DM05_LNB_13V GPIO17
175#define DM05_LNB_18V 0x00030000 187#define DM05_LNB_18V (GPIO17 | GPIO16)
188
189/* GPIO's for LNB power control for unbranded with I2C on GPIO */
190#define UNBR_LNB_MASK (GPIO17 | GPIO16)
191#define UNBR_LNB_OFF 0
192#define UNBR_LNB_13V GPIO17
193#define UNBR_LNB_18V (GPIO17 | GPIO16)
176 194
177static unsigned int card[] = {[0 ... 3] = UNSET }; 195static unsigned int card[] = {[0 ... 3] = UNSET };
178module_param_array(card, int, NULL, 0444); 196module_param_array(card, int, NULL, 0444);
@@ -187,7 +205,11 @@ static unsigned int dm1105_devcount;
187DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 205DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
188 206
189struct dm1105_board { 207struct dm1105_board {
190 char *name; 208 char *name;
209 struct {
210 u32 mask, off, v13, v18;
211 } lnb;
212 u32 gpio_scl, gpio_sda;
191}; 213};
192 214
193struct dm1105_subid { 215struct dm1105_subid {
@@ -199,15 +221,50 @@ struct dm1105_subid {
199static const struct dm1105_board dm1105_boards[] = { 221static const struct dm1105_board dm1105_boards[] = {
200 [DM1105_BOARD_UNKNOWN] = { 222 [DM1105_BOARD_UNKNOWN] = {
201 .name = "UNKNOWN/GENERIC", 223 .name = "UNKNOWN/GENERIC",
224 .lnb = {
225 .mask = DM1105_LNB_MASK,
226 .off = DM1105_LNB_OFF,
227 .v13 = DM1105_LNB_13V,
228 .v18 = DM1105_LNB_18V,
229 },
202 }, 230 },
203 [DM1105_BOARD_DVBWORLD_2002] = { 231 [DM1105_BOARD_DVBWORLD_2002] = {
204 .name = "DVBWorld PCI 2002", 232 .name = "DVBWorld PCI 2002",
233 .lnb = {
234 .mask = DM1105_LNB_MASK,
235 .off = DM1105_LNB_OFF,
236 .v13 = DM1105_LNB_13V,
237 .v18 = DM1105_LNB_18V,
238 },
205 }, 239 },
206 [DM1105_BOARD_DVBWORLD_2004] = { 240 [DM1105_BOARD_DVBWORLD_2004] = {
207 .name = "DVBWorld PCI 2004", 241 .name = "DVBWorld PCI 2004",
242 .lnb = {
243 .mask = DM1105_LNB_MASK,
244 .off = DM1105_LNB_OFF,
245 .v13 = DM1105_LNB_13V,
246 .v18 = DM1105_LNB_18V,
247 },
208 }, 248 },
209 [DM1105_BOARD_AXESS_DM05] = { 249 [DM1105_BOARD_AXESS_DM05] = {
210 .name = "Axess/EasyTv DM05", 250 .name = "Axess/EasyTv DM05",
251 .lnb = {
252 .mask = DM05_LNB_MASK,
253 .off = DM05_LNB_OFF,
254 .v13 = DM05_LNB_13V,
255 .v18 = DM05_LNB_18V,
256 },
257 },
258 [DM1105_BOARD_UNBRANDED_I2C_ON_GPIO] = {
259 .name = "Unbranded DM1105 with i2c on GPIOs",
260 .lnb = {
261 .mask = UNBR_LNB_MASK,
262 .off = UNBR_LNB_OFF,
263 .v13 = UNBR_LNB_13V,
264 .v18 = UNBR_LNB_18V,
265 },
266 .gpio_scl = GPIO14,
267 .gpio_sda = GPIO13,
211 }, 268 },
212}; 269};
213 270
@@ -293,6 +350,8 @@ struct dm1105_dev {
293 350
294 /* i2c */ 351 /* i2c */
295 struct i2c_adapter i2c_adap; 352 struct i2c_adapter i2c_adap;
353 struct i2c_adapter i2c_bb_adap;
354 struct i2c_algo_bit_data i2c_bit;
296 355
297 /* irq */ 356 /* irq */
298 struct work_struct work; 357 struct work_struct work;
@@ -328,6 +387,103 @@ struct dm1105_dev {
328#define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit)) 387#define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit))
329#define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0) 388#define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0)
330 389
390/* The chip has 18 GPIOs. In HOST mode GPIO's used as 15 bit address lines,
391 so we can use only 3 GPIO's from GPIO15 to GPIO17.
392 Here I don't check whether HOST is enebled as it is not implemented yet.
393 */
394static void dm1105_gpio_set(struct dm1105_dev *dev, u32 mask)
395{
396 if (mask & 0xfffc0000)
397 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
398
399 if (mask & 0x0003ffff)
400 dm_setl(DM1105_GPIOVAL, mask & 0x0003ffff);
401
402}
403
404static void dm1105_gpio_clear(struct dm1105_dev *dev, u32 mask)
405{
406 if (mask & 0xfffc0000)
407 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
408
409 if (mask & 0x0003ffff)
410 dm_clearl(DM1105_GPIOVAL, mask & 0x0003ffff);
411
412}
413
414static void dm1105_gpio_andor(struct dm1105_dev *dev, u32 mask, u32 val)
415{
416 if (mask & 0xfffc0000)
417 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
418
419 if (mask & 0x0003ffff)
420 dm_andorl(DM1105_GPIOVAL, mask & 0x0003ffff, val);
421
422}
423
424static u32 dm1105_gpio_get(struct dm1105_dev *dev, u32 mask)
425{
426 if (mask & 0xfffc0000)
427 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
428
429 if (mask & 0x0003ffff)
430 return dm_readl(DM1105_GPIOVAL) & mask & 0x0003ffff;
431
432 return 0;
433}
434
435static void dm1105_gpio_enable(struct dm1105_dev *dev, u32 mask, int asoutput)
436{
437 if (mask & 0xfffc0000)
438 printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
439
440 if ((mask & 0x0003ffff) && asoutput)
441 dm_clearl(DM1105_GPIOCTR, mask & 0x0003ffff);
442 else if ((mask & 0x0003ffff) && !asoutput)
443 dm_setl(DM1105_GPIOCTR, mask & 0x0003ffff);
444
445}
446
447static void dm1105_setline(struct dm1105_dev *dev, u32 line, int state)
448{
449 if (state)
450 dm1105_gpio_enable(dev, line, 0);
451 else {
452 dm1105_gpio_enable(dev, line, 1);
453 dm1105_gpio_clear(dev, line);
454 }
455}
456
457static void dm1105_setsda(void *data, int state)
458{
459 struct dm1105_dev *dev = data;
460
461 dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_sda, state);
462}
463
464static void dm1105_setscl(void *data, int state)
465{
466 struct dm1105_dev *dev = data;
467
468 dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_scl, state);
469}
470
471static int dm1105_getsda(void *data)
472{
473 struct dm1105_dev *dev = data;
474
475 return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_sda)
476 ? 1 : 0;
477}
478
479static int dm1105_getscl(void *data)
480{
481 struct dm1105_dev *dev = data;
482
483 return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_scl)
484 ? 1 : 0;
485}
486
331static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap, 487static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap,
332 struct i2c_msg *msgs, int num) 488 struct i2c_msg *msgs, int num)
333{ 489{
@@ -436,31 +592,20 @@ static inline struct dm1105_dev *frontend_to_dm1105_dev(struct dvb_frontend *fe)
436static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) 592static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
437{ 593{
438 struct dm1105_dev *dev = frontend_to_dm1105_dev(fe); 594 struct dm1105_dev *dev = frontend_to_dm1105_dev(fe);
439 u32 lnb_mask, lnb_13v, lnb_18v, lnb_off;
440 595
441 switch (dev->boardnr) { 596 dm1105_gpio_enable(dev, dm1105_boards[dev->boardnr].lnb.mask, 1);
442 case DM1105_BOARD_AXESS_DM05:
443 lnb_mask = DM05_LNB_MASK;
444 lnb_off = DM05_LNB_OFF;
445 lnb_13v = DM05_LNB_13V;
446 lnb_18v = DM05_LNB_18V;
447 break;
448 case DM1105_BOARD_DVBWORLD_2002:
449 case DM1105_BOARD_DVBWORLD_2004:
450 default:
451 lnb_mask = DM1105_LNB_MASK;
452 lnb_off = DM1105_LNB_OFF;
453 lnb_13v = DM1105_LNB_13V;
454 lnb_18v = DM1105_LNB_18V;
455 }
456
457 dm_writel(DM1105_GPIOCTR, lnb_mask);
458 if (voltage == SEC_VOLTAGE_18) 597 if (voltage == SEC_VOLTAGE_18)
459 dm_writel(DM1105_GPIOVAL, lnb_18v); 598 dm1105_gpio_andor(dev,
599 dm1105_boards[dev->boardnr].lnb.mask,
600 dm1105_boards[dev->boardnr].lnb.v18);
460 else if (voltage == SEC_VOLTAGE_13) 601 else if (voltage == SEC_VOLTAGE_13)
461 dm_writel(DM1105_GPIOVAL, lnb_13v); 602 dm1105_gpio_andor(dev,
603 dm1105_boards[dev->boardnr].lnb.mask,
604 dm1105_boards[dev->boardnr].lnb.v13);
462 else 605 else
463 dm_writel(DM1105_GPIOVAL, lnb_off); 606 dm1105_gpio_andor(dev,
607 dm1105_boards[dev->boardnr].lnb.mask,
608 dm1105_boards[dev->boardnr].lnb.off);
464 609
465 return 0; 610 return 0;
466} 611}
@@ -708,6 +853,38 @@ static int __devinit frontend_init(struct dm1105_dev *dev)
708 int ret; 853 int ret;
709 854
710 switch (dev->boardnr) { 855 switch (dev->boardnr) {
856 case DM1105_BOARD_UNBRANDED_I2C_ON_GPIO:
857 dm1105_gpio_enable(dev, GPIO15, 1);
858 dm1105_gpio_clear(dev, GPIO15);
859 msleep(100);
860 dm1105_gpio_set(dev, GPIO15);
861 msleep(200);
862 dev->fe = dvb_attach(
863 stv0299_attach, &sharp_z0194a_config,
864 &dev->i2c_bb_adap);
865 if (dev->fe) {
866 dev->fe->ops.set_voltage = dm1105_set_voltage;
867 dvb_attach(dvb_pll_attach, dev->fe, 0x60,
868 &dev->i2c_bb_adap, DVB_PLL_OPERA1);
869 break;
870 }
871
872 dev->fe = dvb_attach(
873 stv0288_attach, &earda_config,
874 &dev->i2c_bb_adap);
875 if (dev->fe) {
876 dev->fe->ops.set_voltage = dm1105_set_voltage;
877 dvb_attach(stb6000_attach, dev->fe, 0x61,
878 &dev->i2c_bb_adap);
879 break;
880 }
881
882 dev->fe = dvb_attach(
883 si21xx_attach, &serit_config,
884 &dev->i2c_bb_adap);
885 if (dev->fe)
886 dev->fe->ops.set_voltage = dm1105_set_voltage;
887 break;
711 case DM1105_BOARD_DVBWORLD_2004: 888 case DM1105_BOARD_DVBWORLD_2004:
712 dev->fe = dvb_attach( 889 dev->fe = dvb_attach(
713 cx24116_attach, &serit_sp2633_config, 890 cx24116_attach, &serit_sp2633_config,
@@ -870,11 +1047,32 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
870 if (ret < 0) 1047 if (ret < 0)
871 goto err_dm1105_hw_exit; 1048 goto err_dm1105_hw_exit;
872 1049
1050 i2c_set_adapdata(&dev->i2c_bb_adap, dev);
1051 strcpy(dev->i2c_bb_adap.name, DM1105_I2C_GPIO_NAME);
1052 dev->i2c_bb_adap.owner = THIS_MODULE;
1053 dev->i2c_bb_adap.dev.parent = &pdev->dev;
1054 dev->i2c_bb_adap.algo_data = &dev->i2c_bit;
1055 dev->i2c_bit.data = dev;
1056 dev->i2c_bit.setsda = dm1105_setsda;
1057 dev->i2c_bit.setscl = dm1105_setscl;
1058 dev->i2c_bit.getsda = dm1105_getsda;
1059 dev->i2c_bit.getscl = dm1105_getscl;
1060 dev->i2c_bit.udelay = 10;
1061 dev->i2c_bit.timeout = 10;
1062
1063 /* Raise SCL and SDA */
1064 dm1105_setsda(dev, 1);
1065 dm1105_setscl(dev, 1);
1066
1067 ret = i2c_bit_add_bus(&dev->i2c_bb_adap);
1068 if (ret < 0)
1069 goto err_i2c_del_adapter;
1070
873 /* dvb */ 1071 /* dvb */
874 ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME, 1072 ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME,
875 THIS_MODULE, &pdev->dev, adapter_nr); 1073 THIS_MODULE, &pdev->dev, adapter_nr);
876 if (ret < 0) 1074 if (ret < 0)
877 goto err_i2c_del_adapter; 1075 goto err_i2c_del_adapters;
878 1076
879 dvb_adapter = &dev->dvb_adapter; 1077 dvb_adapter = &dev->dvb_adapter;
880 1078
@@ -952,6 +1150,8 @@ err_dvb_dmx_release:
952 dvb_dmx_release(dvbdemux); 1150 dvb_dmx_release(dvbdemux);
953err_dvb_unregister_adapter: 1151err_dvb_unregister_adapter:
954 dvb_unregister_adapter(dvb_adapter); 1152 dvb_unregister_adapter(dvb_adapter);
1153err_i2c_del_adapters:
1154 i2c_del_adapter(&dev->i2c_bb_adap);
955err_i2c_del_adapter: 1155err_i2c_del_adapter:
956 i2c_del_adapter(&dev->i2c_adap); 1156 i2c_del_adapter(&dev->i2c_adap);
957err_dm1105_hw_exit: 1157err_dm1105_hw_exit:
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index f36f471deae2..37b146961ae2 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -207,17 +207,6 @@ static int lme2510_stream_restart(struct dvb_usb_device *d)
207 rbuff, sizeof(rbuff)); 207 rbuff, sizeof(rbuff));
208 return ret; 208 return ret;
209} 209}
210static int lme2510_remote_keypress(struct dvb_usb_adapter *adap, u32 keypress)
211{
212 struct dvb_usb_device *d = adap->dev;
213
214 deb_info(1, "INT Key Keypress =%04x", keypress);
215
216 if (keypress > 0)
217 rc_keydown(d->rc_dev, keypress, 0);
218
219 return 0;
220}
221 210
222static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) 211static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out)
223{ 212{
@@ -256,6 +245,7 @@ static void lme2510_int_response(struct urb *lme_urb)
256 struct lme2510_state *st = adap->dev->priv; 245 struct lme2510_state *st = adap->dev->priv;
257 static u8 *ibuf, *rbuf; 246 static u8 *ibuf, *rbuf;
258 int i = 0, offset; 247 int i = 0, offset;
248 u32 key;
259 249
260 switch (lme_urb->status) { 250 switch (lme_urb->status) {
261 case 0: 251 case 0:
@@ -282,10 +272,16 @@ static void lme2510_int_response(struct urb *lme_urb)
282 272
283 switch (ibuf[0]) { 273 switch (ibuf[0]) {
284 case 0xaa: 274 case 0xaa:
285 debug_data_snipet(1, "INT Remote data snipet in", ibuf); 275 debug_data_snipet(1, "INT Remote data snipet", ibuf);
286 lme2510_remote_keypress(adap, 276 if ((ibuf[4] + ibuf[5]) == 0xff) {
287 (u32)(ibuf[2] << 24) + (ibuf[3] << 16) + 277 key = ibuf[5];
288 (ibuf[4] << 8) + ibuf[5]); 278 key += (ibuf[3] > 0)
279 ? (ibuf[3] ^ 0xff) << 8 : 0;
280 key += (ibuf[2] ^ 0xff) << 16;
281 deb_info(1, "INT Key =%08x", key);
282 if (adap->dev->rc_dev != NULL)
283 rc_keydown(adap->dev->rc_dev, key, 0);
284 }
289 break; 285 break;
290 case 0xbb: 286 case 0xbb:
291 switch (st->tuner_config) { 287 switch (st->tuner_config) {
@@ -691,45 +687,6 @@ static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
691 return (ret < 0) ? -ENODEV : 0; 687 return (ret < 0) ? -ENODEV : 0;
692} 688}
693 689
694static int lme2510_int_service(struct dvb_usb_adapter *adap)
695{
696 struct dvb_usb_device *d = adap->dev;
697 struct rc_dev *rc;
698 int ret;
699
700 info("STA Configuring Remote");
701
702 rc = rc_allocate_device();
703 if (!rc)
704 return -ENOMEM;
705
706 usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
707 strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
708
709 rc->input_name = "LME2510 Remote Control";
710 rc->input_phys = d->rc_phys;
711 rc->map_name = RC_MAP_LME2510;
712 rc->driver_name = "LME 2510";
713 usb_to_input_id(d->udev, &rc->input_id);
714
715 ret = rc_register_device(rc);
716 if (ret) {
717 rc_free_device(rc);
718 return ret;
719 }
720 d->rc_dev = rc;
721
722 /* Start the Interrupt */
723 ret = lme2510_int_read(adap);
724 if (ret < 0) {
725 rc_unregister_device(rc);
726 info("INT Unable to start Interrupt Service");
727 return -ENODEV;
728 }
729
730 return 0;
731}
732
733static u8 check_sum(u8 *p, u8 len) 690static u8 check_sum(u8 *p, u8 len)
734{ 691{
735 u8 sum = 0; 692 u8 sum = 0;
@@ -831,7 +788,7 @@ static int lme_firmware_switch(struct usb_device *udev, int cold)
831 788
832 cold_fw = !cold; 789 cold_fw = !cold;
833 790
834 if (udev->descriptor.idProduct == 0x1122) { 791 if (le16_to_cpu(udev->descriptor.idProduct) == 0x1122) {
835 switch (dvb_usb_lme2510_firmware) { 792 switch (dvb_usb_lme2510_firmware) {
836 default: 793 default:
837 dvb_usb_lme2510_firmware = TUNER_S0194; 794 dvb_usb_lme2510_firmware = TUNER_S0194;
@@ -1053,8 +1010,11 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
1053 1010
1054 1011
1055end: if (ret) { 1012end: if (ret) {
1056 kfree(adap->fe); 1013 if (adap->fe) {
1057 adap->fe = NULL; 1014 dvb_frontend_detach(adap->fe);
1015 adap->fe = NULL;
1016 }
1017 adap->dev->props.rc.core.rc_codes = NULL;
1058 return -ENODEV; 1018 return -ENODEV;
1059 } 1019 }
1060 1020
@@ -1097,8 +1057,12 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
1097 return -ENODEV; 1057 return -ENODEV;
1098 } 1058 }
1099 1059
1100 /* Start the Interrupt & Remote*/ 1060 /* Start the Interrupt*/
1101 ret = lme2510_int_service(adap); 1061 ret = lme2510_int_read(adap);
1062 if (ret < 0) {
1063 info("INT Unable to start Interrupt Service");
1064 return -ENODEV;
1065 }
1102 1066
1103 return ret; 1067 return ret;
1104} 1068}
@@ -1204,6 +1168,12 @@ static struct dvb_usb_device_properties lme2510_properties = {
1204 } 1168 }
1205 } 1169 }
1206 }, 1170 },
1171 .rc.core = {
1172 .protocol = RC_TYPE_NEC,
1173 .module_name = "LME2510 Remote Control",
1174 .allowed_protos = RC_TYPE_NEC,
1175 .rc_codes = RC_MAP_LME2510,
1176 },
1207 .power_ctrl = lme2510_powerup, 1177 .power_ctrl = lme2510_powerup,
1208 .identify_state = lme2510_identify_state, 1178 .identify_state = lme2510_identify_state,
1209 .i2c_algo = &lme2510_i2c_algo, 1179 .i2c_algo = &lme2510_i2c_algo,
@@ -1246,6 +1216,12 @@ static struct dvb_usb_device_properties lme2510c_properties = {
1246 } 1216 }
1247 } 1217 }
1248 }, 1218 },
1219 .rc.core = {
1220 .protocol = RC_TYPE_NEC,
1221 .module_name = "LME2510 Remote Control",
1222 .allowed_protos = RC_TYPE_NEC,
1223 .rc_codes = RC_MAP_LME2510,
1224 },
1249 .power_ctrl = lme2510_powerup, 1225 .power_ctrl = lme2510_powerup,
1250 .identify_state = lme2510_identify_state, 1226 .identify_state = lme2510_identify_state,
1251 .i2c_algo = &lme2510_i2c_algo, 1227 .i2c_algo = &lme2510_i2c_algo,
@@ -1269,19 +1245,21 @@ static void *lme2510_exit_int(struct dvb_usb_device *d)
1269 adap->feedcount = 0; 1245 adap->feedcount = 0;
1270 } 1246 }
1271 1247
1272 if (st->lme_urb != NULL) { 1248 if (st->usb_buffer != NULL) {
1273 st->i2c_talk_onoff = 1; 1249 st->i2c_talk_onoff = 1;
1274 st->signal_lock = 0; 1250 st->signal_lock = 0;
1275 st->signal_level = 0; 1251 st->signal_level = 0;
1276 st->signal_sn = 0; 1252 st->signal_sn = 0;
1277 buffer = st->usb_buffer; 1253 buffer = st->usb_buffer;
1254 }
1255
1256 if (st->lme_urb != NULL) {
1278 usb_kill_urb(st->lme_urb); 1257 usb_kill_urb(st->lme_urb);
1279 usb_free_coherent(d->udev, 5000, st->buffer, 1258 usb_free_coherent(d->udev, 5000, st->buffer,
1280 st->lme_urb->transfer_dma); 1259 st->lme_urb->transfer_dma);
1281 info("Interrupt Service Stopped"); 1260 info("Interrupt Service Stopped");
1282 rc_unregister_device(d->rc_dev);
1283 info("Remote Stopped");
1284 } 1261 }
1262
1285 return buffer; 1263 return buffer;
1286} 1264}
1287 1265
@@ -1293,7 +1271,8 @@ static void lme2510_exit(struct usb_interface *intf)
1293 if (d != NULL) { 1271 if (d != NULL) {
1294 usb_buffer = lme2510_exit_int(d); 1272 usb_buffer = lme2510_exit_int(d);
1295 dvb_usb_device_exit(intf); 1273 dvb_usb_device_exit(intf);
1296 kfree(usb_buffer); 1274 if (usb_buffer != NULL)
1275 kfree(usb_buffer);
1297 } 1276 }
1298} 1277}
1299 1278
@@ -1327,5 +1306,5 @@ module_exit(lme2510_module_exit);
1327 1306
1328MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); 1307MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
1329MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); 1308MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
1330MODULE_VERSION("1.86"); 1309MODULE_VERSION("1.88");
1331MODULE_LICENSE("GPL"); 1310MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stb0899_algo.c b/drivers/media/dvb/frontends/stb0899_algo.c
index 2da55ec20392..d70eee00f33a 100644
--- a/drivers/media/dvb/frontends/stb0899_algo.c
+++ b/drivers/media/dvb/frontends/stb0899_algo.c
@@ -23,7 +23,7 @@
23#include "stb0899_priv.h" 23#include "stb0899_priv.h"
24#include "stb0899_reg.h" 24#include "stb0899_reg.h"
25 25
26inline u32 stb0899_do_div(u64 n, u32 d) 26static inline u32 stb0899_do_div(u64 n, u32 d)
27{ 27{
28 /* wrap do_div() for ease of use */ 28 /* wrap do_div() for ease of use */
29 29
diff --git a/drivers/media/dvb/frontends/tda8261.c b/drivers/media/dvb/frontends/tda8261.c
index 1742056a34e8..53c7d8f1df28 100644
--- a/drivers/media/dvb/frontends/tda8261.c
+++ b/drivers/media/dvb/frontends/tda8261.c
@@ -224,7 +224,6 @@ exit:
224} 224}
225 225
226EXPORT_SYMBOL(tda8261_attach); 226EXPORT_SYMBOL(tda8261_attach);
227MODULE_PARM_DESC(verbose, "Set verbosity level");
228 227
229MODULE_AUTHOR("Manu Abraham"); 228MODULE_AUTHOR("Manu Abraham");
230MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner"); 229MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 5c2a9058c09f..e83e84003025 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -412,8 +412,7 @@ static int __devinit maxiradio_init_one(struct pci_dev *pdev, const struct pci_d
412 goto err_out_free_region; 412 goto err_out_free_region;
413 } 413 }
414 414
415 v4l2_info(v4l2_dev, "version " DRIVER_VERSION 415 v4l2_info(v4l2_dev, "version " DRIVER_VERSION "\n");
416 " time " __TIME__ " " __DATE__ "\n");
417 416
418 v4l2_info(v4l2_dev, "found Guillemot MAXI Radio device (io = 0x%x)\n", 417 v4l2_info(v4l2_dev, "found Guillemot MAXI Radio device (io = 0x%x)\n",
419 dev->io); 418 dev->io);
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index 1e3a8dd820a4..a185610b376b 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -21,7 +21,6 @@
21#include <media/v4l2-ioctl.h> 21#include <media/v4l2-ioctl.h>
22#include <media/v4l2-device.h> 22#include <media/v4l2-device.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/mfd/core.h>
25#include <linux/interrupt.h> 24#include <linux/interrupt.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/i2c.h> 26#include <linux/i2c.h>
@@ -149,7 +148,7 @@ static const struct v4l2_file_operations timbradio_fops = {
149 148
150static int __devinit timbradio_probe(struct platform_device *pdev) 149static int __devinit timbradio_probe(struct platform_device *pdev)
151{ 150{
152 struct timb_radio_platform_data *pdata = mfd_get_data(pdev); 151 struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
153 struct timbradio *tr; 152 struct timbradio *tr;
154 int err; 153 int err;
155 154
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index e2550dc2944f..459f7272d326 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
1382 1382
1383 switch (ctrl->id) { 1383 switch (ctrl->id) {
1384 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: 1384 case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
1385 ctrl->val = wl1273_fm_get_tx_ctune(radio); 1385 ctrl->cur.val = wl1273_fm_get_tx_ctune(radio);
1386 break; 1386 break;
1387 1387
1388 default: 1388 default:
@@ -1990,7 +1990,7 @@ static int wl1273_fm_radio_remove(struct platform_device *pdev)
1990 1990
1991static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev) 1991static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev)
1992{ 1992{
1993 struct wl1273_core **core = mfd_get_data(pdev); 1993 struct wl1273_core **core = pdev->dev.platform_data;
1994 struct wl1273_device *radio; 1994 struct wl1273_device *radio;
1995 struct v4l2_ctrl *ctrl; 1995 struct v4l2_ctrl *ctrl;
1996 int r = 0; 1996 int r = 0;
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index d50e5ac75ab6..87010724f914 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -191,7 +191,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
191 191
192 switch (ctrl->id) { 192 switch (ctrl->id) {
193 case V4L2_CID_TUNE_ANTENNA_CAPACITOR: 193 case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
194 ctrl->val = fm_tx_get_tune_cap_val(fmdev); 194 ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev);
195 break; 195 break;
196 default: 196 default:
197 fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id); 197 fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 154c337f00fd..7d4bbc226d06 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -148,6 +148,18 @@ config IR_ITE_CIR
148 To compile this driver as a module, choose M here: the 148 To compile this driver as a module, choose M here: the
149 module will be called ite-cir. 149 module will be called ite-cir.
150 150
151config IR_FINTEK
152 tristate "Fintek Consumer Infrared Transceiver"
153 depends on PNP
154 depends on RC_CORE
155 ---help---
156 Say Y here to enable support for integrated infrared receiver
157 /transciever made by Fintek. This chip is found on assorted
158 Jetway motherboards (and of course, possibly others).
159
160 To compile this driver as a module, choose M here: the
161 module will be called fintek-cir.
162
151config IR_NUVOTON 163config IR_NUVOTON
152 tristate "Nuvoton w836x7hg Consumer Infrared Transceiver" 164 tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
153 depends on PNP 165 depends on PNP
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 1f90a219a162..52830e5f4eaa 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
16obj-$(CONFIG_IR_IMON) += imon.o 16obj-$(CONFIG_IR_IMON) += imon.o
17obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o 17obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
18obj-$(CONFIG_IR_MCEUSB) += mceusb.o 18obj-$(CONFIG_IR_MCEUSB) += mceusb.o
19obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
19obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o 20obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
20obj-$(CONFIG_IR_ENE) += ene_ir.o 21obj-$(CONFIG_IR_ENE) += ene_ir.o
21obj-$(CONFIG_IR_REDRAT3) += redrat3.o 22obj-$(CONFIG_IR_REDRAT3) += redrat3.o
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
new file mode 100644
index 000000000000..8fa539dde1b4
--- /dev/null
+++ b/drivers/media/rc/fintek-cir.c
@@ -0,0 +1,684 @@
1/*
2 * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
3 *
4 * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
5 *
6 * Special thanks to Fintek for providing hardware and spec sheets.
7 * This driver is based upon the nuvoton, ite and ene drivers for
8 * similar hardware.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pnp.h>
29#include <linux/io.h>
30#include <linux/interrupt.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <media/rc-core.h>
34#include <linux/pci_ids.h>
35
36#include "fintek-cir.h"
37
38/* write val to config reg */
39static inline void fintek_cr_write(struct fintek_dev *fintek, u8 val, u8 reg)
40{
41 fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
42 __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
43 outb(reg, fintek->cr_ip);
44 outb(val, fintek->cr_dp);
45}
46
47/* read val from config reg */
48static inline u8 fintek_cr_read(struct fintek_dev *fintek, u8 reg)
49{
50 u8 val;
51
52 outb(reg, fintek->cr_ip);
53 val = inb(fintek->cr_dp);
54
55 fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)",
56 __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
57 return val;
58}
59
60/* update config register bit without changing other bits */
61static inline void fintek_set_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
62{
63 u8 tmp = fintek_cr_read(fintek, reg) | val;
64 fintek_cr_write(fintek, tmp, reg);
65}
66
67/* clear config register bit without changing other bits */
68static inline void fintek_clear_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
69{
70 u8 tmp = fintek_cr_read(fintek, reg) & ~val;
71 fintek_cr_write(fintek, tmp, reg);
72}
73
74/* enter config mode */
75static inline void fintek_config_mode_enable(struct fintek_dev *fintek)
76{
77 /* Enabling Config Mode explicitly requires writing 2x */
78 outb(CONFIG_REG_ENABLE, fintek->cr_ip);
79 outb(CONFIG_REG_ENABLE, fintek->cr_ip);
80}
81
82/* exit config mode */
83static inline void fintek_config_mode_disable(struct fintek_dev *fintek)
84{
85 outb(CONFIG_REG_DISABLE, fintek->cr_ip);
86}
87
88/*
89 * When you want to address a specific logical device, write its logical
90 * device number to GCR_LOGICAL_DEV_NO
91 */
92static inline void fintek_select_logical_dev(struct fintek_dev *fintek, u8 ldev)
93{
94 fintek_cr_write(fintek, ldev, GCR_LOGICAL_DEV_NO);
95}
96
97/* write val to cir config register */
98static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 offset)
99{
100 outb(val, fintek->cir_addr + offset);
101}
102
103/* read val from cir config register */
104static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
105{
106 u8 val;
107
108 val = inb(fintek->cir_addr + offset);
109
110 return val;
111}
112
113#define pr_reg(text, ...) \
114 printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
115
116/* dump current cir register contents */
117static void cir_dump_regs(struct fintek_dev *fintek)
118{
119 fintek_config_mode_enable(fintek);
120 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
121
122 pr_reg("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME);
123 pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
124 (fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) |
125 fintek_cr_read(fintek, CIR_CR_BASE_ADDR_LO));
126 pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
127 fintek_cr_read(fintek, CIR_CR_IRQ_SEL));
128
129 fintek_config_mode_disable(fintek);
130
131 pr_reg("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME);
132 pr_reg(" * STATUS: 0x%x\n", fintek_cir_reg_read(fintek, CIR_STATUS));
133 pr_reg(" * CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_CONTROL));
134 pr_reg(" * RX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_RX_DATA));
135 pr_reg(" * TX_CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_CONTROL));
136 pr_reg(" * TX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_DATA));
137}
138
139/* detect hardware features */
140static int fintek_hw_detect(struct fintek_dev *fintek)
141{
142 unsigned long flags;
143 u8 chip_major, chip_minor;
144 u8 vendor_major, vendor_minor;
145 u8 portsel, ir_class;
146 u16 vendor;
147 int ret = 0;
148
149 fintek_config_mode_enable(fintek);
150
151 /* Check if we're using config port 0x4e or 0x2e */
152 portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
153 if (portsel == 0xff) {
154 fit_pr(KERN_INFO, "first portsel read was bunk, trying alt");
155 fintek_config_mode_disable(fintek);
156 fintek->cr_ip = CR_INDEX_PORT2;
157 fintek->cr_dp = CR_DATA_PORT2;
158 fintek_config_mode_enable(fintek);
159 portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
160 }
161 fit_dbg("portsel reg: 0x%02x", portsel);
162
163 ir_class = fintek_cir_reg_read(fintek, CIR_CR_CLASS);
164 fit_dbg("ir_class reg: 0x%02x", ir_class);
165
166 switch (ir_class) {
167 case CLASS_RX_2TX:
168 case CLASS_RX_1TX:
169 fintek->hw_tx_capable = true;
170 break;
171 case CLASS_RX_ONLY:
172 default:
173 fintek->hw_tx_capable = false;
174 break;
175 }
176
177 chip_major = fintek_cr_read(fintek, GCR_CHIP_ID_HI);
178 chip_minor = fintek_cr_read(fintek, GCR_CHIP_ID_LO);
179
180 vendor_major = fintek_cr_read(fintek, GCR_VENDOR_ID_HI);
181 vendor_minor = fintek_cr_read(fintek, GCR_VENDOR_ID_LO);
182 vendor = vendor_major << 8 | vendor_minor;
183
184 if (vendor != VENDOR_ID_FINTEK)
185 fit_pr(KERN_WARNING, "Unknown vendor ID: 0x%04x", vendor);
186 else
187 fit_dbg("Read Fintek vendor ID from chip");
188
189 fintek_config_mode_disable(fintek);
190
191 spin_lock_irqsave(&fintek->fintek_lock, flags);
192 fintek->chip_major = chip_major;
193 fintek->chip_minor = chip_minor;
194 fintek->chip_vendor = vendor;
195 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
196
197 return ret;
198}
199
200static void fintek_cir_ldev_init(struct fintek_dev *fintek)
201{
202 /* Select CIR logical device and enable */
203 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
204 fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
205
206 /* Write allocated CIR address and IRQ information to hardware */
207 fintek_cr_write(fintek, fintek->cir_addr >> 8, CIR_CR_BASE_ADDR_HI);
208 fintek_cr_write(fintek, fintek->cir_addr & 0xff, CIR_CR_BASE_ADDR_LO);
209
210 fintek_cr_write(fintek, fintek->cir_irq, CIR_CR_IRQ_SEL);
211
212 fit_dbg("CIR initialized, base io address: 0x%lx, irq: %d (len: %d)",
213 fintek->cir_addr, fintek->cir_irq, fintek->cir_port_len);
214}
215
216/* enable CIR interrupts */
217static void fintek_enable_cir_irq(struct fintek_dev *fintek)
218{
219 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
220}
221
222static void fintek_cir_regs_init(struct fintek_dev *fintek)
223{
224 /* clear any and all stray interrupts */
225 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
226
227 /* and finally, enable interrupts */
228 fintek_enable_cir_irq(fintek);
229}
230
231static void fintek_enable_wake(struct fintek_dev *fintek)
232{
233 fintek_config_mode_enable(fintek);
234 fintek_select_logical_dev(fintek, LOGICAL_DEV_ACPI);
235
236 /* Allow CIR PME's to wake system */
237 fintek_set_reg_bit(fintek, ACPI_WAKE_EN_CIR_BIT, LDEV_ACPI_WAKE_EN_REG);
238 /* Enable CIR PME's */
239 fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_EN_REG);
240 /* Clear CIR PME status register */
241 fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_CLR_REG);
242 /* Save state */
243 fintek_set_reg_bit(fintek, ACPI_STATE_CIR_BIT, LDEV_ACPI_STATE_REG);
244
245 fintek_config_mode_disable(fintek);
246}
247
248static int fintek_cmdsize(u8 cmd, u8 subcmd)
249{
250 int datasize = 0;
251
252 switch (cmd) {
253 case BUF_COMMAND_NULL:
254 if (subcmd == BUF_HW_CMD_HEADER)
255 datasize = 1;
256 break;
257 case BUF_HW_CMD_HEADER:
258 if (subcmd == BUF_CMD_G_REVISION)
259 datasize = 2;
260 break;
261 case BUF_COMMAND_HEADER:
262 switch (subcmd) {
263 case BUF_CMD_S_CARRIER:
264 case BUF_CMD_S_TIMEOUT:
265 case BUF_RSP_PULSE_COUNT:
266 datasize = 2;
267 break;
268 case BUF_CMD_SIG_END:
269 case BUF_CMD_S_TXMASK:
270 case BUF_CMD_S_RXSENSOR:
271 datasize = 1;
272 break;
273 }
274 }
275
276 return datasize;
277}
278
279/* process ir data stored in driver buffer */
280static void fintek_process_rx_ir_data(struct fintek_dev *fintek)
281{
282 DEFINE_IR_RAW_EVENT(rawir);
283 u8 sample;
284 int i;
285
286 for (i = 0; i < fintek->pkts; i++) {
287 sample = fintek->buf[i];
288 switch (fintek->parser_state) {
289 case CMD_HEADER:
290 fintek->cmd = sample;
291 if ((fintek->cmd == BUF_COMMAND_HEADER) ||
292 ((fintek->cmd & BUF_COMMAND_MASK) !=
293 BUF_PULSE_BIT)) {
294 fintek->parser_state = SUBCMD;
295 continue;
296 }
297 fintek->rem = (fintek->cmd & BUF_LEN_MASK);
298 fit_dbg("%s: rem: 0x%02x", __func__, fintek->rem);
299 if (fintek->rem)
300 fintek->parser_state = PARSE_IRDATA;
301 else
302 ir_raw_event_reset(fintek->rdev);
303 break;
304 case SUBCMD:
305 fintek->rem = fintek_cmdsize(fintek->cmd, sample);
306 fintek->parser_state = CMD_DATA;
307 break;
308 case CMD_DATA:
309 fintek->rem--;
310 break;
311 case PARSE_IRDATA:
312 fintek->rem--;
313 init_ir_raw_event(&rawir);
314 rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
315 rawir.duration = US_TO_NS((sample & BUF_SAMPLE_MASK)
316 * CIR_SAMPLE_PERIOD);
317
318 fit_dbg("Storing %s with duration %d",
319 rawir.pulse ? "pulse" : "space",
320 rawir.duration);
321 ir_raw_event_store_with_filter(fintek->rdev, &rawir);
322 break;
323 }
324
325 if ((fintek->parser_state != CMD_HEADER) && !fintek->rem)
326 fintek->parser_state = CMD_HEADER;
327 }
328
329 fintek->pkts = 0;
330
331 fit_dbg("Calling ir_raw_event_handle");
332 ir_raw_event_handle(fintek->rdev);
333}
334
335/* copy data from hardware rx register into driver buffer */
336static void fintek_get_rx_ir_data(struct fintek_dev *fintek, u8 rx_irqs)
337{
338 unsigned long flags;
339 u8 sample, status;
340
341 spin_lock_irqsave(&fintek->fintek_lock, flags);
342
343 /*
344 * We must read data from CIR_RX_DATA until the hardware IR buffer
345 * is empty and clears the RX_TIMEOUT and/or RX_RECEIVE flags in
346 * the CIR_STATUS register
347 */
348 do {
349 sample = fintek_cir_reg_read(fintek, CIR_RX_DATA);
350 fit_dbg("%s: sample: 0x%02x", __func__, sample);
351
352 fintek->buf[fintek->pkts] = sample;
353 fintek->pkts++;
354
355 status = fintek_cir_reg_read(fintek, CIR_STATUS);
356 if (!(status & CIR_STATUS_IRQ_EN))
357 break;
358 } while (status & rx_irqs);
359
360 fintek_process_rx_ir_data(fintek);
361
362 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
363}
364
365static void fintek_cir_log_irqs(u8 status)
366{
367 fit_pr(KERN_INFO, "IRQ 0x%02x:%s%s%s%s%s", status,
368 status & CIR_STATUS_IRQ_EN ? " IRQEN" : "",
369 status & CIR_STATUS_TX_FINISH ? " TXF" : "",
370 status & CIR_STATUS_TX_UNDERRUN ? " TXU" : "",
371 status & CIR_STATUS_RX_TIMEOUT ? " RXTO" : "",
372 status & CIR_STATUS_RX_RECEIVE ? " RXOK" : "");
373}
374
375/* interrupt service routine for incoming and outgoing CIR data */
376static irqreturn_t fintek_cir_isr(int irq, void *data)
377{
378 struct fintek_dev *fintek = data;
379 u8 status, rx_irqs;
380
381 fit_dbg_verbose("%s firing", __func__);
382
383 fintek_config_mode_enable(fintek);
384 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
385 fintek_config_mode_disable(fintek);
386
387 /*
388 * Get IR Status register contents. Write 1 to ack/clear
389 *
390 * bit: reg name - description
391 * 3: TX_FINISH - TX is finished
392 * 2: TX_UNDERRUN - TX underrun
393 * 1: RX_TIMEOUT - RX data timeout
394 * 0: RX_RECEIVE - RX data received
395 */
396 status = fintek_cir_reg_read(fintek, CIR_STATUS);
397 if (!(status & CIR_STATUS_IRQ_MASK) || status == 0xff) {
398 fit_dbg_verbose("%s exiting, IRSTS 0x%02x", __func__, status);
399 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
400 return IRQ_RETVAL(IRQ_NONE);
401 }
402
403 if (debug)
404 fintek_cir_log_irqs(status);
405
406 rx_irqs = status & (CIR_STATUS_RX_RECEIVE | CIR_STATUS_RX_TIMEOUT);
407 if (rx_irqs)
408 fintek_get_rx_ir_data(fintek, rx_irqs);
409
410 /* ack/clear all irq flags we've got */
411 fintek_cir_reg_write(fintek, status, CIR_STATUS);
412
413 fit_dbg_verbose("%s done", __func__);
414 return IRQ_RETVAL(IRQ_HANDLED);
415}
416
417static void fintek_enable_cir(struct fintek_dev *fintek)
418{
419 /* set IRQ enabled */
420 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
421
422 fintek_config_mode_enable(fintek);
423
424 /* enable the CIR logical device */
425 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
426 fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
427
428 fintek_config_mode_disable(fintek);
429
430 /* clear all pending interrupts */
431 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
432
433 /* enable interrupts */
434 fintek_enable_cir_irq(fintek);
435}
436
437static void fintek_disable_cir(struct fintek_dev *fintek)
438{
439 fintek_config_mode_enable(fintek);
440
441 /* disable the CIR logical device */
442 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
443 fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
444
445 fintek_config_mode_disable(fintek);
446}
447
448static int fintek_open(struct rc_dev *dev)
449{
450 struct fintek_dev *fintek = dev->priv;
451 unsigned long flags;
452
453 spin_lock_irqsave(&fintek->fintek_lock, flags);
454 fintek_enable_cir(fintek);
455 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
456
457 return 0;
458}
459
460static void fintek_close(struct rc_dev *dev)
461{
462 struct fintek_dev *fintek = dev->priv;
463 unsigned long flags;
464
465 spin_lock_irqsave(&fintek->fintek_lock, flags);
466 fintek_disable_cir(fintek);
467 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
468}
469
470/* Allocate memory, probe hardware, and initialize everything */
471static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
472{
473 struct fintek_dev *fintek;
474 struct rc_dev *rdev;
475 int ret = -ENOMEM;
476
477 fintek = kzalloc(sizeof(struct fintek_dev), GFP_KERNEL);
478 if (!fintek)
479 return ret;
480
481 /* input device for IR remote (and tx) */
482 rdev = rc_allocate_device();
483 if (!rdev)
484 goto failure;
485
486 ret = -ENODEV;
487 /* validate pnp resources */
488 if (!pnp_port_valid(pdev, 0)) {
489 dev_err(&pdev->dev, "IR PNP Port not valid!\n");
490 goto failure;
491 }
492
493 if (!pnp_irq_valid(pdev, 0)) {
494 dev_err(&pdev->dev, "IR PNP IRQ not valid!\n");
495 goto failure;
496 }
497
498 fintek->cir_addr = pnp_port_start(pdev, 0);
499 fintek->cir_irq = pnp_irq(pdev, 0);
500 fintek->cir_port_len = pnp_port_len(pdev, 0);
501
502 fintek->cr_ip = CR_INDEX_PORT;
503 fintek->cr_dp = CR_DATA_PORT;
504
505 spin_lock_init(&fintek->fintek_lock);
506
507 ret = -EBUSY;
508 /* now claim resources */
509 if (!request_region(fintek->cir_addr,
510 fintek->cir_port_len, FINTEK_DRIVER_NAME))
511 goto failure;
512
513 if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
514 FINTEK_DRIVER_NAME, (void *)fintek))
515 goto failure;
516
517 pnp_set_drvdata(pdev, fintek);
518 fintek->pdev = pdev;
519
520 ret = fintek_hw_detect(fintek);
521 if (ret)
522 goto failure;
523
524 /* Initialize CIR & CIR Wake Logical Devices */
525 fintek_config_mode_enable(fintek);
526 fintek_cir_ldev_init(fintek);
527 fintek_config_mode_disable(fintek);
528
529 /* Initialize CIR & CIR Wake Config Registers */
530 fintek_cir_regs_init(fintek);
531
532 /* Set up the rc device */
533 rdev->priv = fintek;
534 rdev->driver_type = RC_DRIVER_IR_RAW;
535 rdev->allowed_protos = RC_TYPE_ALL;
536 rdev->open = fintek_open;
537 rdev->close = fintek_close;
538 rdev->input_name = FINTEK_DESCRIPTION;
539 rdev->input_phys = "fintek/cir0";
540 rdev->input_id.bustype = BUS_HOST;
541 rdev->input_id.vendor = VENDOR_ID_FINTEK;
542 rdev->input_id.product = fintek->chip_major;
543 rdev->input_id.version = fintek->chip_minor;
544 rdev->dev.parent = &pdev->dev;
545 rdev->driver_name = FINTEK_DRIVER_NAME;
546 rdev->map_name = RC_MAP_RC6_MCE;
547 rdev->timeout = US_TO_NS(1000);
548 /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
549 rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
550
551 ret = rc_register_device(rdev);
552 if (ret)
553 goto failure;
554
555 device_init_wakeup(&pdev->dev, true);
556 fintek->rdev = rdev;
557 fit_pr(KERN_NOTICE, "driver has been successfully loaded\n");
558 if (debug)
559 cir_dump_regs(fintek);
560
561 return 0;
562
563failure:
564 if (fintek->cir_irq)
565 free_irq(fintek->cir_irq, fintek);
566 if (fintek->cir_addr)
567 release_region(fintek->cir_addr, fintek->cir_port_len);
568
569 rc_free_device(rdev);
570 kfree(fintek);
571
572 return ret;
573}
574
575static void __devexit fintek_remove(struct pnp_dev *pdev)
576{
577 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
578 unsigned long flags;
579
580 spin_lock_irqsave(&fintek->fintek_lock, flags);
581 /* disable CIR */
582 fintek_disable_cir(fintek);
583 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
584 /* enable CIR Wake (for IR power-on) */
585 fintek_enable_wake(fintek);
586 spin_unlock_irqrestore(&fintek->fintek_lock, flags);
587
588 /* free resources */
589 free_irq(fintek->cir_irq, fintek);
590 release_region(fintek->cir_addr, fintek->cir_port_len);
591
592 rc_unregister_device(fintek->rdev);
593
594 kfree(fintek);
595}
596
597static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
598{
599 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
600
601 fit_dbg("%s called", __func__);
602
603 /* disable all CIR interrupts */
604 fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
605
606 fintek_config_mode_enable(fintek);
607
608 /* disable cir logical dev */
609 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
610 fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
611
612 fintek_config_mode_disable(fintek);
613
614 /* make sure wake is enabled */
615 fintek_enable_wake(fintek);
616
617 return 0;
618}
619
620static int fintek_resume(struct pnp_dev *pdev)
621{
622 int ret = 0;
623 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
624
625 fit_dbg("%s called", __func__);
626
627 /* open interrupt */
628 fintek_enable_cir_irq(fintek);
629
630 /* Enable CIR logical device */
631 fintek_config_mode_enable(fintek);
632 fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
633 fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
634
635 fintek_config_mode_disable(fintek);
636
637 fintek_cir_regs_init(fintek);
638
639 return ret;
640}
641
642static void fintek_shutdown(struct pnp_dev *pdev)
643{
644 struct fintek_dev *fintek = pnp_get_drvdata(pdev);
645 fintek_enable_wake(fintek);
646}
647
648static const struct pnp_device_id fintek_ids[] = {
649 { "FIT0002", 0 }, /* CIR */
650 { "", 0 },
651};
652
653static struct pnp_driver fintek_driver = {
654 .name = FINTEK_DRIVER_NAME,
655 .id_table = fintek_ids,
656 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
657 .probe = fintek_probe,
658 .remove = __devexit_p(fintek_remove),
659 .suspend = fintek_suspend,
660 .resume = fintek_resume,
661 .shutdown = fintek_shutdown,
662};
663
664int fintek_init(void)
665{
666 return pnp_register_driver(&fintek_driver);
667}
668
669void fintek_exit(void)
670{
671 pnp_unregister_driver(&fintek_driver);
672}
673
674module_param(debug, int, S_IRUGO | S_IWUSR);
675MODULE_PARM_DESC(debug, "Enable debugging output");
676
677MODULE_DEVICE_TABLE(pnp, fintek_ids);
678MODULE_DESCRIPTION(FINTEK_DESCRIPTION " driver");
679
680MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
681MODULE_LICENSE("GPL");
682
683module_init(fintek_init);
684module_exit(fintek_exit);
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
new file mode 100644
index 000000000000..1b10b2011f5e
--- /dev/null
+++ b/drivers/media/rc/fintek-cir.h
@@ -0,0 +1,243 @@
1/*
2 * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
3 *
4 * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
5 *
6 * Special thanks to Fintek for providing hardware and spec sheets.
7 * This driver is based upon the nuvoton, ite and ene drivers for
8 * similar hardware.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
24 */
25
26#include <linux/spinlock.h>
27#include <linux/ioctl.h>
28
29/* platform driver name to register */
30#define FINTEK_DRIVER_NAME "fintek-cir"
31#define FINTEK_DESCRIPTION "Fintek LPC SuperIO Consumer IR Transceiver"
32#define VENDOR_ID_FINTEK 0x1934
33
34
35/* debugging module parameter */
36static int debug;
37
38#define fit_pr(level, text, ...) \
39 printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__)
40
41#define fit_dbg(text, ...) \
42 if (debug) \
43 printk(KERN_DEBUG \
44 KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
45
46#define fit_dbg_verbose(text, ...) \
47 if (debug > 1) \
48 printk(KERN_DEBUG \
49 KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
50
51#define fit_dbg_wake(text, ...) \
52 if (debug > 2) \
53 printk(KERN_DEBUG \
54 KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
55
56
57#define TX_BUF_LEN 256
58#define RX_BUF_LEN 32
59
60struct fintek_dev {
61 struct pnp_dev *pdev;
62 struct rc_dev *rdev;
63
64 spinlock_t fintek_lock;
65
66 /* for rx */
67 u8 buf[RX_BUF_LEN];
68 unsigned int pkts;
69
70 struct {
71 spinlock_t lock;
72 u8 buf[TX_BUF_LEN];
73 unsigned int buf_count;
74 unsigned int cur_buf_num;
75 wait_queue_head_t queue;
76 } tx;
77
78 /* Config register index/data port pair */
79 u8 cr_ip;
80 u8 cr_dp;
81
82 /* hardware I/O settings */
83 unsigned long cir_addr;
84 int cir_irq;
85 int cir_port_len;
86
87 /* hardware id */
88 u8 chip_major;
89 u8 chip_minor;
90 u16 chip_vendor;
91
92 /* hardware features */
93 bool hw_learning_capable;
94 bool hw_tx_capable;
95
96 /* rx settings */
97 bool learning_enabled;
98 bool carrier_detect_enabled;
99
100 enum {
101 CMD_HEADER = 0,
102 SUBCMD,
103 CMD_DATA,
104 PARSE_IRDATA,
105 } parser_state;
106
107 u8 cmd, rem;
108
109 /* carrier period = 1 / frequency */
110 u32 carrier;
111};
112
113/* buffer packet constants, largely identical to mceusb.c */
114#define BUF_PULSE_BIT 0x80
115#define BUF_LEN_MASK 0x1f
116#define BUF_SAMPLE_MASK 0x7f
117
118#define BUF_COMMAND_HEADER 0x9f
119#define BUF_COMMAND_MASK 0xe0
120#define BUF_COMMAND_NULL 0x00
121#define BUF_HW_CMD_HEADER 0xff
122#define BUF_CMD_G_REVISION 0x0b
123#define BUF_CMD_S_CARRIER 0x06
124#define BUF_CMD_S_TIMEOUT 0x0c
125#define BUF_CMD_SIG_END 0x01
126#define BUF_CMD_S_TXMASK 0x08
127#define BUF_CMD_S_RXSENSOR 0x14
128#define BUF_RSP_PULSE_COUNT 0x15
129
130#define CIR_SAMPLE_PERIOD 50
131
132/*
133 * Configuration Register:
134 * Index Port
135 * Data Port
136 */
137#define CR_INDEX_PORT 0x2e
138#define CR_DATA_PORT 0x2f
139
140/* Possible alternate values, depends on how the chip is wired */
141#define CR_INDEX_PORT2 0x4e
142#define CR_DATA_PORT2 0x4f
143
144/*
145 * GCR_CONFIG_PORT_SEL bit 4 specifies which Index Port value is
146 * active. 1 = 0x4e, 0 = 0x2e
147 */
148#define PORT_SEL_PORT_4E_EN 0x10
149
150/* Extended Function Mode enable/disable magic values */
151#define CONFIG_REG_ENABLE 0x87
152#define CONFIG_REG_DISABLE 0xaa
153
154/* Chip IDs found in CR_CHIP_ID_{HI,LO} */
155#define CHIP_ID_HIGH_F71809U 0x04
156#define CHIP_ID_LOW_F71809U 0x08
157
158/*
159 * Global control regs we need to care about:
160 * Global Control def.
161 * Register name addr val. */
162#define GCR_SOFTWARE_RESET 0x02 /* 0x00 */
163#define GCR_LOGICAL_DEV_NO 0x07 /* 0x00 */
164#define GCR_CHIP_ID_HI 0x20 /* 0x04 */
165#define GCR_CHIP_ID_LO 0x21 /* 0x08 */
166#define GCR_VENDOR_ID_HI 0x23 /* 0x19 */
167#define GCR_VENDOR_ID_LO 0x24 /* 0x34 */
168#define GCR_CONFIG_PORT_SEL 0x25 /* 0x01 */
169#define GCR_KBMOUSE_WAKEUP 0x27
170
171#define LOGICAL_DEV_DISABLE 0x00
172#define LOGICAL_DEV_ENABLE 0x01
173
174/* Logical device number of the CIR function */
175#define LOGICAL_DEV_CIR 0x05
176
177/* CIR Logical Device (LDN 0x08) config registers */
178#define CIR_CR_COMMAND_INDEX 0x04
179#define CIR_CR_IRCS 0x05 /* Before host writes command to IR, host
180 must set to 1. When host finshes write
181 command to IR, host must clear to 0. */
182#define CIR_CR_COMMAND_DATA 0x06 /* Host read or write comand data */
183#define CIR_CR_CLASS 0x07 /* 0xff = rx-only, 0x66 = rx + 2 tx,
184 0x33 = rx + 1 tx */
185#define CIR_CR_DEV_EN 0x30 /* bit0 = 1 enables CIR */
186#define CIR_CR_BASE_ADDR_HI 0x60 /* MSB of CIR IO base addr */
187#define CIR_CR_BASE_ADDR_LO 0x61 /* LSB of CIR IO base addr */
188#define CIR_CR_IRQ_SEL 0x70 /* bits3-0 store CIR IRQ */
189#define CIR_CR_PSOUT_STATUS 0xf1
190#define CIR_CR_WAKE_KEY3_ADDR 0xf8
191#define CIR_CR_WAKE_KEY3_CODE 0xf9
192#define CIR_CR_WAKE_KEY3_DC 0xfa
193#define CIR_CR_WAKE_CONTROL 0xfb
194#define CIR_CR_WAKE_KEY12_ADDR 0xfc
195#define CIR_CR_WAKE_KEY4_ADDR 0xfd
196#define CIR_CR_WAKE_KEY5_ADDR 0xfe
197
198#define CLASS_RX_ONLY 0xff
199#define CLASS_RX_2TX 0x66
200#define CLASS_RX_1TX 0x33
201
202/* CIR device registers */
203#define CIR_STATUS 0x00
204#define CIR_RX_DATA 0x01
205#define CIR_TX_CONTROL 0x02
206#define CIR_TX_DATA 0x03
207#define CIR_CONTROL 0x04
208
209/* Bits to enable CIR wake */
210#define LOGICAL_DEV_ACPI 0x01
211#define LDEV_ACPI_WAKE_EN_REG 0xe8
212#define ACPI_WAKE_EN_CIR_BIT 0x04
213
214#define LDEV_ACPI_PME_EN_REG 0xf0
215#define LDEV_ACPI_PME_CLR_REG 0xf1
216#define ACPI_PME_CIR_BIT 0x02
217
218#define LDEV_ACPI_STATE_REG 0xf4
219#define ACPI_STATE_CIR_BIT 0x20
220
221/*
222 * CIR status register (0x00):
223 * 7 - CIR_IRQ_EN (1 = enable CIR IRQ, 0 = disable)
224 * 3 - TX_FINISH (1 when TX finished, write 1 to clear)
225 * 2 - TX_UNDERRUN (1 on TX underrun, write 1 to clear)
226 * 1 - RX_TIMEOUT (1 on RX timeout, write 1 to clear)
227 * 0 - RX_RECEIVE (1 on RX receive, write 1 to clear)
228 */
229#define CIR_STATUS_IRQ_EN 0x80
230#define CIR_STATUS_TX_FINISH 0x08
231#define CIR_STATUS_TX_UNDERRUN 0x04
232#define CIR_STATUS_RX_TIMEOUT 0x02
233#define CIR_STATUS_RX_RECEIVE 0x01
234#define CIR_STATUS_IRQ_MASK 0x0f
235
236/*
237 * CIR TX control register (0x02):
238 * 7 - TX_START (1 to indicate TX start, auto-cleared when done)
239 * 6 - TX_END (1 to indicate TX data written to TX fifo)
240 */
241#define CIR_TX_CONTROL_TX_START 0x80
242#define CIR_TX_CONTROL_TX_END 0x40
243
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c
index afae14fd152e..129d3f9a461d 100644
--- a/drivers/media/rc/keymaps/rc-lme2510.c
+++ b/drivers/media/rc/keymaps/rc-lme2510.c
@@ -14,81 +14,81 @@
14 14
15static struct rc_map_table lme2510_rc[] = { 15static struct rc_map_table lme2510_rc[] = {
16 /* Type 1 - 26 buttons */ 16 /* Type 1 - 26 buttons */
17 { 0xef12ba45, KEY_0 }, 17 { 0x10ed45, KEY_0 },
18 { 0xef12a05f, KEY_1 }, 18 { 0x10ed5f, KEY_1 },
19 { 0xef12af50, KEY_2 }, 19 { 0x10ed50, KEY_2 },
20 { 0xef12a25d, KEY_3 }, 20 { 0x10ed5d, KEY_3 },
21 { 0xef12be41, KEY_4 }, 21 { 0x10ed41, KEY_4 },
22 { 0xef12f50a, KEY_5 }, 22 { 0x10ed0a, KEY_5 },
23 { 0xef12bd42, KEY_6 }, 23 { 0x10ed42, KEY_6 },
24 { 0xef12b847, KEY_7 }, 24 { 0x10ed47, KEY_7 },
25 { 0xef12b649, KEY_8 }, 25 { 0x10ed49, KEY_8 },
26 { 0xef12fa05, KEY_9 }, 26 { 0x10ed05, KEY_9 },
27 { 0xef12bc43, KEY_POWER }, 27 { 0x10ed43, KEY_POWER },
28 { 0xef12b946, KEY_SUBTITLE }, 28 { 0x10ed46, KEY_SUBTITLE },
29 { 0xef12f906, KEY_PAUSE }, 29 { 0x10ed06, KEY_PAUSE },
30 { 0xef12fc03, KEY_MEDIA_REPEAT}, 30 { 0x10ed03, KEY_MEDIA_REPEAT},
31 { 0xef12fd02, KEY_PAUSE }, 31 { 0x10ed02, KEY_PAUSE },
32 { 0xef12a15e, KEY_VOLUMEUP }, 32 { 0x10ed5e, KEY_VOLUMEUP },
33 { 0xef12a35c, KEY_VOLUMEDOWN }, 33 { 0x10ed5c, KEY_VOLUMEDOWN },
34 { 0xef12f609, KEY_CHANNELUP }, 34 { 0x10ed09, KEY_CHANNELUP },
35 { 0xef12e51a, KEY_CHANNELDOWN }, 35 { 0x10ed1a, KEY_CHANNELDOWN },
36 { 0xef12e11e, KEY_PLAY }, 36 { 0x10ed1e, KEY_PLAY },
37 { 0xef12e41b, KEY_ZOOM }, 37 { 0x10ed1b, KEY_ZOOM },
38 { 0xef12a659, KEY_MUTE }, 38 { 0x10ed59, KEY_MUTE },
39 { 0xef12a55a, KEY_TV }, 39 { 0x10ed5a, KEY_TV },
40 { 0xef12e718, KEY_RECORD }, 40 { 0x10ed18, KEY_RECORD },
41 { 0xef12f807, KEY_EPG }, 41 { 0x10ed07, KEY_EPG },
42 { 0xef12fe01, KEY_STOP }, 42 { 0x10ed01, KEY_STOP },
43 /* Type 2 - 20 buttons */ 43 /* Type 2 - 20 buttons */
44 { 0xff40ea15, KEY_0 }, 44 { 0xbf15, KEY_0 },
45 { 0xff40f708, KEY_1 }, 45 { 0xbf08, KEY_1 },
46 { 0xff40f609, KEY_2 }, 46 { 0xbf09, KEY_2 },
47 { 0xff40f50a, KEY_3 }, 47 { 0xbf0a, KEY_3 },
48 { 0xff40f30c, KEY_4 }, 48 { 0xbf0c, KEY_4 },
49 { 0xff40f20d, KEY_5 }, 49 { 0xbf0d, KEY_5 },
50 { 0xff40f10e, KEY_6 }, 50 { 0xbf0e, KEY_6 },
51 { 0xff40ef10, KEY_7 }, 51 { 0xbf10, KEY_7 },
52 { 0xff40ee11, KEY_8 }, 52 { 0xbf11, KEY_8 },
53 { 0xff40ed12, KEY_9 }, 53 { 0xbf12, KEY_9 },
54 { 0xff40ff00, KEY_POWER }, 54 { 0xbf00, KEY_POWER },
55 { 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */ 55 { 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
56 { 0xff40e51a, KEY_PAUSE }, /* Timeshift */ 56 { 0xbf1a, KEY_PAUSE }, /* Timeshift */
57 { 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */ 57 { 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
58 { 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/ 58 { 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
59 { 0xff40fe01, KEY_CHANNELUP }, 59 { 0xbf01, KEY_CHANNELUP },
60 { 0xff40fa05, KEY_CHANNELDOWN }, 60 { 0xbf05, KEY_CHANNELDOWN },
61 { 0xff40eb14, KEY_ZOOM }, 61 { 0xbf14, KEY_ZOOM },
62 { 0xff40e718, KEY_RECORD }, 62 { 0xbf18, KEY_RECORD },
63 { 0xff40e916, KEY_STOP }, 63 { 0xbf16, KEY_STOP },
64 /* Type 3 - 20 buttons */ 64 /* Type 3 - 20 buttons */
65 { 0xff00e31c, KEY_0 }, 65 { 0x1c, KEY_0 },
66 { 0xff00f807, KEY_1 }, 66 { 0x07, KEY_1 },
67 { 0xff00ea15, KEY_2 }, 67 { 0x15, KEY_2 },
68 { 0xff00f609, KEY_3 }, 68 { 0x09, KEY_3 },
69 { 0xff00e916, KEY_4 }, 69 { 0x16, KEY_4 },
70 { 0xff00e619, KEY_5 }, 70 { 0x19, KEY_5 },
71 { 0xff00f20d, KEY_6 }, 71 { 0x0d, KEY_6 },
72 { 0xff00f30c, KEY_7 }, 72 { 0x0c, KEY_7 },
73 { 0xff00e718, KEY_8 }, 73 { 0x18, KEY_8 },
74 { 0xff00a15e, KEY_9 }, 74 { 0x5e, KEY_9 },
75 { 0xff00ba45, KEY_POWER }, 75 { 0x45, KEY_POWER },
76 { 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */ 76 { 0x44, KEY_MEDIA_REPEAT}, /* Recall */
77 { 0xff00b54a, KEY_PAUSE }, /* Timeshift */ 77 { 0x4a, KEY_PAUSE }, /* Timeshift */
78 { 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */ 78 { 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
79 { 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/ 79 { 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
80 { 0xff00b946, KEY_CHANNELUP }, 80 { 0x46, KEY_CHANNELUP },
81 { 0xff00bf40, KEY_CHANNELDOWN }, 81 { 0x40, KEY_CHANNELDOWN },
82 { 0xff00f708, KEY_ZOOM }, 82 { 0x08, KEY_ZOOM },
83 { 0xff00bd42, KEY_RECORD }, 83 { 0x42, KEY_RECORD },
84 { 0xff00a55a, KEY_STOP }, 84 { 0x5a, KEY_STOP },
85}; 85};
86 86
87static struct rc_map_list lme2510_map = { 87static struct rc_map_list lme2510_map = {
88 .map = { 88 .map = {
89 .scan = lme2510_rc, 89 .scan = lme2510_rc,
90 .size = ARRAY_SIZE(lme2510_rc), 90 .size = ARRAY_SIZE(lme2510_rc),
91 .rc_type = RC_TYPE_UNKNOWN, 91 .rc_type = RC_TYPE_NEC,
92 .name = RC_MAP_LME2510, 92 .name = RC_MAP_LME2510,
93 } 93 }
94}; 94};
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 3be180b3ba27..bb53de7fe408 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -687,7 +687,7 @@ config VIDEO_HEXIUM_GEMINI
687 687
688config VIDEO_TIMBERDALE 688config VIDEO_TIMBERDALE
689 tristate "Support for timberdale Video In/LogiWIN" 689 tristate "Support for timberdale Video In/LogiWIN"
690 depends on VIDEO_V4L2 && I2C 690 depends on VIDEO_V4L2 && I2C && DMADEVICES
691 select DMA_ENGINE 691 select DMA_ENGINE
692 select TIMB_DMA 692 select TIMB_DMA
693 select VIDEO_ADV7180 693 select VIDEO_ADV7180
@@ -757,6 +757,8 @@ config VIDEO_NOON010PC30
757 ---help--- 757 ---help---
758 This driver supports NOON010PC30 CIF camera from Siliconfile 758 This driver supports NOON010PC30 CIF camera from Siliconfile
759 759
760source "drivers/media/video/m5mols/Kconfig"
761
760config VIDEO_OMAP3 762config VIDEO_OMAP3
761 tristate "OMAP 3 Camera support (EXPERIMENTAL)" 763 tristate "OMAP 3 Camera support (EXPERIMENTAL)"
762 select OMAP_IOMMU 764 select OMAP_IOMMU
@@ -952,7 +954,7 @@ config VIDEO_SAMSUNG_S5P_FIMC
952 954
953config VIDEO_S5P_MIPI_CSIS 955config VIDEO_S5P_MIPI_CSIS
954 tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver" 956 tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
955 depends on VIDEO_V4L2 && PM_RUNTIME && VIDEO_V4L2_SUBDEV_API 957 depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API
956 ---help--- 958 ---help---
957 This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver. 959 This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver.
958 960
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 9519160c2e01..f0fecd6f6a33 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
69obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o 69obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
70obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o 70obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
71obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o 71obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
72obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
72 73
73obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o 74obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
74obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o 75obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 0073a8c55336..40eb6326e48a 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -438,7 +438,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
438 strcat(vc->card, " (676/"); 438 strcat(vc->card, " (676/");
439 break; 439 break;
440 default: 440 default:
441 strcat(vc->card, " (???/"); 441 strcat(vc->card, " (XXX/");
442 break; 442 break;
443 } 443 }
444 switch (cam->params.version.sensor_flags) { 444 switch (cam->params.version.sensor_flags) {
@@ -458,7 +458,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
458 strcat(vc->card, "500)"); 458 strcat(vc->card, "500)");
459 break; 459 break;
460 default: 460 default:
461 strcat(vc->card, "???)"); 461 strcat(vc->card, "XXX)");
462 break; 462 break;
463 } 463 }
464 464
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 280df43ca446..8d7813415760 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -1354,7 +1354,7 @@ void cx231xx_dump_SC_reg(struct cx231xx *dev)
1354{ 1354{
1355 u8 value[4] = { 0, 0, 0, 0 }; 1355 u8 value[4] = { 0, 0, 0, 0 };
1356 int status = 0; 1356 int status = 0;
1357 cx231xx_info("cx231xx_dump_SC_reg %s!\n", __TIME__); 1357 cx231xx_info("cx231xx_dump_SC_reg!\n");
1358 1358
1359 status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, 1359 status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
1360 value, 4); 1360 value, 4);
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index 66671a4092e4..26fc206f095e 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -34,7 +34,7 @@ MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
34MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver"); 34MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver");
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36 36
37#ifdef DEBUG 37#ifdef GSPCA_DEBUG
38int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK | 38int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK |
39 D_USBI | D_USBO | D_V4L2; 39 D_USBI | D_USBO | D_V4L2;
40#endif 40#endif
diff --git a/drivers/media/video/m5mols/Kconfig b/drivers/media/video/m5mols/Kconfig
new file mode 100644
index 000000000000..302dc3d70193
--- /dev/null
+++ b/drivers/media/video/m5mols/Kconfig
@@ -0,0 +1,5 @@
1config VIDEO_M5MOLS
2 tristate "Fujitsu M-5MOLS 8MP sensor support"
3 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
4 ---help---
5 This driver supports Fujitsu M-5MOLS camera sensor with ISP
diff --git a/drivers/media/video/m5mols/Makefile b/drivers/media/video/m5mols/Makefile
new file mode 100644
index 000000000000..0a44e028edc7
--- /dev/null
+++ b/drivers/media/video/m5mols/Makefile
@@ -0,0 +1,3 @@
1m5mols-objs := m5mols_core.o m5mols_controls.o m5mols_capture.o
2
3obj-$(CONFIG_VIDEO_M5MOLS) += m5mols.o
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
new file mode 100644
index 000000000000..10b55c854487
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -0,0 +1,296 @@
1/*
2 * Header for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef M5MOLS_H
17#define M5MOLS_H
18
19#include <media/v4l2-subdev.h>
20#include "m5mols_reg.h"
21
22extern int m5mols_debug;
23
24#define to_m5mols(__sd) container_of(__sd, struct m5mols_info, sd)
25
26#define to_sd(__ctrl) \
27 (&container_of(__ctrl->handler, struct m5mols_info, handle)->sd)
28
29enum m5mols_restype {
30 M5MOLS_RESTYPE_MONITOR,
31 M5MOLS_RESTYPE_CAPTURE,
32 M5MOLS_RESTYPE_MAX,
33};
34
35/**
36 * struct m5mols_resolution - structure for the resolution
37 * @type: resolution type according to the pixel code
38 * @width: width of the resolution
39 * @height: height of the resolution
40 * @reg: resolution preset register value
41 */
42struct m5mols_resolution {
43 u8 reg;
44 enum m5mols_restype type;
45 u16 width;
46 u16 height;
47};
48
49/**
50 * struct m5mols_exif - structure for the EXIF information of M-5MOLS
51 * @exposure_time: exposure time register value
52 * @shutter_speed: speed of the shutter register value
53 * @aperture: aperture register value
54 * @exposure_bias: it calls also EV bias
55 * @iso_speed: ISO register value
56 * @flash: status register value of the flash
57 * @sdr: status register value of the Subject Distance Range
58 * @qval: not written exact meaning in document
59 */
60struct m5mols_exif {
61 u32 exposure_time;
62 u32 shutter_speed;
63 u32 aperture;
64 u32 brightness;
65 u32 exposure_bias;
66 u16 iso_speed;
67 u16 flash;
68 u16 sdr;
69 u16 qval;
70};
71
72/**
73 * struct m5mols_capture - Structure for the capture capability
74 * @exif: EXIF information
75 * @main: size in bytes of the main image
76 * @thumb: size in bytes of the thumb image, if it was accompanied
77 * @total: total size in bytes of the produced image
78 */
79struct m5mols_capture {
80 struct m5mols_exif exif;
81 u32 main;
82 u32 thumb;
83 u32 total;
84};
85
86/**
87 * struct m5mols_scenemode - structure for the scenemode capability
88 * @metering: metering light register value
89 * @ev_bias: EV bias register value
90 * @wb_mode: mode which means the WhiteBalance is Auto or Manual
91 * @wb_preset: whitebalance preset register value in the Manual mode
92 * @chroma_en: register value whether the Chroma capability is enabled or not
93 * @chroma_lvl: chroma's level register value
94 * @edge_en: register value Whether the Edge capability is enabled or not
95 * @edge_lvl: edge's level register value
96 * @af_range: Auto Focus's range
97 * @fd_mode: Face Detection mode
98 * @mcc: Multi-axis Color Conversion which means emotion color
99 * @light: status of the Light
100 * @flash: status of the Flash
101 * @tone: Tone color which means Contrast
102 * @iso: ISO register value
103 * @capt_mode: Mode of the Image Stabilization while the camera capturing
104 * @wdr: Wide Dynamic Range register value
105 *
106 * The each value according to each scenemode is recommended in the documents.
107 */
108struct m5mols_scenemode {
109 u32 metering;
110 u32 ev_bias;
111 u32 wb_mode;
112 u32 wb_preset;
113 u32 chroma_en;
114 u32 chroma_lvl;
115 u32 edge_en;
116 u32 edge_lvl;
117 u32 af_range;
118 u32 fd_mode;
119 u32 mcc;
120 u32 light;
121 u32 flash;
122 u32 tone;
123 u32 iso;
124 u32 capt_mode;
125 u32 wdr;
126};
127
128/**
129 * struct m5mols_version - firmware version information
130 * @customer: customer information
131 * @project: version of project information according to customer
132 * @fw: firmware revision
133 * @hw: hardware revision
134 * @param: version of the parameter
135 * @awb: Auto WhiteBalance algorithm version
136 * @str: information about manufacturer and packaging vendor
137 * @af: Auto Focus version
138 *
139 * The register offset starts the customer version at 0x0, and it ends
140 * the awb version at 0x09. The customer, project information occupies 1 bytes
141 * each. And also the fw, hw, param, awb each requires 2 bytes. The str is
142 * unique string associated with firmware's version. It includes information
143 * about manufacturer and the vendor of the sensor's packaging. The least
144 * significant 2 bytes of the string indicate packaging manufacturer.
145 */
146#define VERSION_STRING_SIZE 22
147struct m5mols_version {
148 u8 customer;
149 u8 project;
150 u16 fw;
151 u16 hw;
152 u16 param;
153 u16 awb;
154 u8 str[VERSION_STRING_SIZE];
155 u8 af;
156};
157#define VERSION_SIZE sizeof(struct m5mols_version)
158
159/**
160 * struct m5mols_info - M-5MOLS driver data structure
161 * @pdata: platform data
162 * @sd: v4l-subdev instance
163 * @pad: media pad
164 * @ffmt: current fmt according to resolution type
165 * @res_type: current resolution type
166 * @code: current code
167 * @irq_waitq: waitqueue for the capture
168 * @work_irq: workqueue for the IRQ
169 * @flags: state variable for the interrupt handler
170 * @handle: control handler
171 * @autoexposure: Auto Exposure control
172 * @exposure: Exposure control
173 * @autowb: Auto White Balance control
174 * @colorfx: Color effect control
175 * @saturation: Saturation control
176 * @zoom: Zoom control
177 * @ver: information of the version
178 * @cap: the capture mode attributes
179 * @power: current sensor's power status
180 * @ctrl_sync: true means all controls of the sensor are initialized
181 * @int_capture: true means the capture interrupt is issued once
182 * @lock_ae: true means the Auto Exposure is locked
183 * @lock_awb: true means the Aut WhiteBalance is locked
184 * @resolution: register value for current resolution
185 * @interrupt: register value for current interrupt status
186 * @mode: register value for current operation mode
187 * @mode_save: register value for current operation mode for saving
188 * @set_power: optional power callback to the board code
189 */
190struct m5mols_info {
191 const struct m5mols_platform_data *pdata;
192 struct v4l2_subdev sd;
193 struct media_pad pad;
194 struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
195 int res_type;
196 enum v4l2_mbus_pixelcode code;
197 wait_queue_head_t irq_waitq;
198 struct work_struct work_irq;
199 unsigned long flags;
200
201 struct v4l2_ctrl_handler handle;
202 /* Autoexposure/exposure control cluster */
203 struct {
204 struct v4l2_ctrl *autoexposure;
205 struct v4l2_ctrl *exposure;
206 };
207 struct v4l2_ctrl *autowb;
208 struct v4l2_ctrl *colorfx;
209 struct v4l2_ctrl *saturation;
210 struct v4l2_ctrl *zoom;
211
212 struct m5mols_version ver;
213 struct m5mols_capture cap;
214 bool power;
215 bool ctrl_sync;
216 bool lock_ae;
217 bool lock_awb;
218 u8 resolution;
219 u32 interrupt;
220 u32 mode;
221 u32 mode_save;
222 int (*set_power)(struct device *dev, int on);
223};
224
225#define ST_CAPT_IRQ 0
226
227#define is_powered(__info) (__info->power)
228#define is_ctrl_synced(__info) (__info->ctrl_sync)
229#define is_available_af(__info) (__info->ver.af)
230#define is_code(__code, __type) (__code == m5mols_default_ffmt[__type].code)
231#define is_manufacturer(__info, __manufacturer) \
232 (__info->ver.str[0] == __manufacturer[0] && \
233 __info->ver.str[1] == __manufacturer[1])
234/*
235 * I2C operation of the M-5MOLS
236 *
237 * The I2C read operation of the M-5MOLS requires 2 messages. The first
238 * message sends the information about the command, command category, and total
239 * message size. The second message is used to retrieve the data specifed in
240 * the first message
241 *
242 * 1st message 2nd message
243 * +-------+---+----------+-----+-------+ +------+------+------+------+
244 * | size1 | R | category | cmd | size2 | | d[0] | d[1] | d[2] | d[3] |
245 * +-------+---+----------+-----+-------+ +------+------+------+------+
246 * - size1: message data size(5 in this case)
247 * - size2: desired buffer size of the 2nd message
248 * - d[0..3]: according to size2
249 *
250 * The I2C write operation needs just one message. The message includes
251 * category, command, total size, and desired data.
252 *
253 * 1st message
254 * +-------+---+----------+-----+------+------+------+------+
255 * | size1 | W | category | cmd | d[0] | d[1] | d[2] | d[3] |
256 * +-------+---+----------+-----+------+------+------+------+
257 * - d[0..3]: according to size1
258 */
259int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
260int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
261int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
262
263/*
264 * Mode operation of the M-5MOLS
265 *
266 * Changing the mode of the M-5MOLS is needed right executing order.
267 * There are three modes(PARAMETER, MONITOR, CAPTURE) which can be changed
268 * by user. There are various categories associated with each mode.
269 *
270 * +============================================================+
271 * | mode | category |
272 * +============================================================+
273 * | FLASH | FLASH(only after Stand-by or Power-on) |
274 * | SYSTEM | SYSTEM(only after sensor arm-booting) |
275 * | PARAMETER | PARAMETER |
276 * | MONITOR | MONITOR(preview), Auto Focus, Face Detection |
277 * | CAPTURE | Single CAPTURE, Preview(recording) |
278 * +============================================================+
279 *
280 * The available executing order between each modes are as follows:
281 * PARAMETER <---> MONITOR <---> CAPTURE
282 */
283int m5mols_mode(struct m5mols_info *info, u32 mode);
284
285int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
286int m5mols_sync_controls(struct m5mols_info *info);
287int m5mols_start_capture(struct m5mols_info *info);
288int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
289int m5mols_lock_3a(struct m5mols_info *info, bool lock);
290int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
291
292/* The firmware function */
293int m5mols_update_fw(struct v4l2_subdev *sd,
294 int (*set_power)(struct m5mols_info *, bool));
295
296#endif /* M5MOLS_H */
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
new file mode 100644
index 000000000000..d71a3903b60f
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -0,0 +1,191 @@
1/*
2 * The Capture code for Fujitsu M-5MOLS ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/i2c.h>
17#include <linux/slab.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/version.h>
22#include <linux/gpio.h>
23#include <linux/regulator/consumer.h>
24#include <linux/videodev2.h>
25#include <linux/version.h>
26#include <media/v4l2-ctrls.h>
27#include <media/v4l2-device.h>
28#include <media/v4l2-subdev.h>
29#include <media/m5mols.h>
30
31#include "m5mols.h"
32#include "m5mols_reg.h"
33
34static int m5mols_capture_error_handler(struct m5mols_info *info,
35 int timeout)
36{
37 int ret;
38
39 /* Disable all interrupts and clear relevant interrupt staus bits */
40 ret = m5mols_write(&info->sd, SYSTEM_INT_ENABLE,
41 info->interrupt & ~(REG_INT_CAPTURE));
42 if (ret)
43 return ret;
44
45 if (timeout == 0)
46 return -ETIMEDOUT;
47
48 return 0;
49}
50/**
51 * m5mols_read_rational - I2C read of a rational number
52 *
53 * Read numerator and denominator from registers @addr_num and @addr_den
54 * respectively and return the division result in @val.
55 */
56static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
57 u32 addr_den, u32 *val)
58{
59 u32 num, den;
60
61 int ret = m5mols_read(sd, addr_num, &num);
62 if (!ret)
63 ret = m5mols_read(sd, addr_den, &den);
64 if (ret)
65 return ret;
66 *val = den == 0 ? 0 : num / den;
67 return ret;
68}
69
70/**
71 * m5mols_capture_info - Gather captured image information
72 *
73 * For now it gathers only EXIF information and file size.
74 */
75static int m5mols_capture_info(struct m5mols_info *info)
76{
77 struct m5mols_exif *exif = &info->cap.exif;
78 struct v4l2_subdev *sd = &info->sd;
79 int ret;
80
81 ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU,
82 EXIF_INFO_EXPTIME_DE, &exif->exposure_time);
83 if (ret)
84 return ret;
85 ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE,
86 &exif->shutter_speed);
87 if (ret)
88 return ret;
89 ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE,
90 &exif->aperture);
91 if (ret)
92 return ret;
93 ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE,
94 &exif->brightness);
95 if (ret)
96 return ret;
97 ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE,
98 &exif->exposure_bias);
99 if (ret)
100 return ret;
101
102 ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
103 if (!ret)
104 ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
105 if (!ret)
106 ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
107 if (!ret)
108 ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
109 if (ret)
110 return ret;
111
112 if (!ret)
113 ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
114 if (!ret)
115 ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
116 if (!ret)
117 info->cap.total = info->cap.main + info->cap.thumb;
118
119 return ret;
120}
121
122int m5mols_start_capture(struct m5mols_info *info)
123{
124 struct v4l2_subdev *sd = &info->sd;
125 u32 resolution = info->resolution;
126 int timeout;
127 int ret;
128
129 /*
130 * Preparing capture. Setting control & interrupt before entering
131 * capture mode
132 *
133 * 1) change to MONITOR mode for operating control & interrupt
134 * 2) set controls (considering v4l2_control value & lock 3A)
135 * 3) set interrupt
136 * 4) change to CAPTURE mode
137 */
138 ret = m5mols_mode(info, REG_MONITOR);
139 if (!ret)
140 ret = m5mols_sync_controls(info);
141 if (!ret)
142 ret = m5mols_lock_3a(info, true);
143 if (!ret)
144 ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
145 if (!ret)
146 ret = m5mols_mode(info, REG_CAPTURE);
147 if (!ret) {
148 /* Wait for capture interrupt, after changing capture mode */
149 timeout = wait_event_interruptible_timeout(info->irq_waitq,
150 test_bit(ST_CAPT_IRQ, &info->flags),
151 msecs_to_jiffies(2000));
152 if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags))
153 ret = m5mols_capture_error_handler(info, timeout);
154 }
155 if (!ret)
156 ret = m5mols_lock_3a(info, false);
157 if (ret)
158 return ret;
159 /*
160 * Starting capture. Setting capture frame count and resolution and
161 * the format(available format: JPEG, Bayer RAW, YUV).
162 *
163 * 1) select single or multi(enable to 25), format, size
164 * 2) set interrupt
165 * 3) start capture(for main image, now)
166 * 4) get information
167 * 5) notify file size to v4l2 device(e.g, to s5p-fimc v4l2 device)
168 */
169 ret = m5mols_write(sd, CAPC_SEL_FRAME, 1);
170 if (!ret)
171 ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
172 if (!ret)
173 ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
174 if (!ret)
175 ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
176 if (!ret)
177 ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN);
178 if (!ret) {
179 /* Wait for the capture completion interrupt */
180 timeout = wait_event_interruptible_timeout(info->irq_waitq,
181 test_bit(ST_CAPT_IRQ, &info->flags),
182 msecs_to_jiffies(2000));
183 if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) {
184 ret = m5mols_capture_info(info);
185 if (!ret)
186 v4l2_subdev_notify(sd, 0, &info->cap.total);
187 }
188 }
189
190 return m5mols_capture_error_handler(info, timeout);
191}
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
new file mode 100644
index 000000000000..817c16fec368
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -0,0 +1,299 @@
1/*
2 * Controls for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/i2c.h>
17#include <linux/delay.h>
18#include <linux/videodev2.h>
19#include <media/v4l2-ctrls.h>
20
21#include "m5mols.h"
22#include "m5mols_reg.h"
23
24static struct m5mols_scenemode m5mols_default_scenemode[] = {
25 [REG_SCENE_NORMAL] = {
26 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
27 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
28 REG_AF_NORMAL, REG_FD_OFF,
29 REG_MCC_NORMAL, REG_LIGHT_OFF, REG_FLASH_OFF,
30 5, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
31 },
32 [REG_SCENE_PORTRAIT] = {
33 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
34 REG_CHROMA_ON, 3, REG_EDGE_ON, 4,
35 REG_AF_NORMAL, BIT_FD_EN | BIT_FD_DRAW_FACE_FRAME,
36 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
37 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
38 },
39 [REG_SCENE_LANDSCAPE] = {
40 REG_AE_ALL, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
41 REG_CHROMA_ON, 4, REG_EDGE_ON, 6,
42 REG_AF_NORMAL, REG_FD_OFF,
43 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
44 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
45 },
46 [REG_SCENE_SPORTS] = {
47 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
48 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
49 REG_AF_NORMAL, REG_FD_OFF,
50 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
51 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
52 },
53 [REG_SCENE_PARTY_INDOOR] = {
54 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
55 REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
56 REG_AF_NORMAL, REG_FD_OFF,
57 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
58 6, REG_ISO_200, REG_CAP_NONE, REG_WDR_OFF,
59 },
60 [REG_SCENE_BEACH_SNOW] = {
61 REG_AE_CENTER, REG_AE_INDEX_10_POS, REG_AWB_AUTO, 0,
62 REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
63 REG_AF_NORMAL, REG_FD_OFF,
64 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
65 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
66 },
67 [REG_SCENE_SUNSET] = {
68 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
69 REG_AWB_DAYLIGHT,
70 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
71 REG_AF_NORMAL, REG_FD_OFF,
72 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
73 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
74 },
75 [REG_SCENE_DAWN_DUSK] = {
76 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
77 REG_AWB_FLUORESCENT_1,
78 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
79 REG_AF_NORMAL, REG_FD_OFF,
80 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
81 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
82 },
83 [REG_SCENE_FALL] = {
84 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
85 REG_CHROMA_ON, 5, REG_EDGE_ON, 5,
86 REG_AF_NORMAL, REG_FD_OFF,
87 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
88 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
89 },
90 [REG_SCENE_NIGHT] = {
91 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
92 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
93 REG_AF_NORMAL, REG_FD_OFF,
94 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
95 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
96 },
97 [REG_SCENE_AGAINST_LIGHT] = {
98 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
99 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
100 REG_AF_NORMAL, REG_FD_OFF,
101 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
102 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
103 },
104 [REG_SCENE_FIRE] = {
105 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
106 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
107 REG_AF_NORMAL, REG_FD_OFF,
108 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
109 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
110 },
111 [REG_SCENE_TEXT] = {
112 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
113 REG_CHROMA_ON, 3, REG_EDGE_ON, 7,
114 REG_AF_MACRO, REG_FD_OFF,
115 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
116 6, REG_ISO_AUTO, REG_CAP_ANTI_SHAKE, REG_WDR_ON,
117 },
118 [REG_SCENE_CANDLE] = {
119 REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
120 REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
121 REG_AF_NORMAL, REG_FD_OFF,
122 REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
123 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
124 },
125};
126
127/**
128 * m5mols_do_scenemode() - Change current scenemode
129 * @mode: Desired mode of the scenemode
130 *
131 * WARNING: The execution order is important. Do not change the order.
132 */
133int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
134{
135 struct v4l2_subdev *sd = &info->sd;
136 struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
137 int ret;
138
139 if (mode > REG_SCENE_CANDLE)
140 return -EINVAL;
141
142 ret = m5mols_lock_3a(info, false);
143 if (!ret)
144 ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, mode);
145 if (!ret)
146 ret = m5mols_write(sd, AE_EV_PRESET_CAPTURE, mode);
147 if (!ret)
148 ret = m5mols_write(sd, AE_MODE, scenemode.metering);
149 if (!ret)
150 ret = m5mols_write(sd, AE_INDEX, scenemode.ev_bias);
151 if (!ret)
152 ret = m5mols_write(sd, AWB_MODE, scenemode.wb_mode);
153 if (!ret)
154 ret = m5mols_write(sd, AWB_MANUAL, scenemode.wb_preset);
155 if (!ret)
156 ret = m5mols_write(sd, MON_CHROMA_EN, scenemode.chroma_en);
157 if (!ret)
158 ret = m5mols_write(sd, MON_CHROMA_LVL, scenemode.chroma_lvl);
159 if (!ret)
160 ret = m5mols_write(sd, MON_EDGE_EN, scenemode.edge_en);
161 if (!ret)
162 ret = m5mols_write(sd, MON_EDGE_LVL, scenemode.edge_lvl);
163 if (!ret && is_available_af(info))
164 ret = m5mols_write(sd, AF_MODE, scenemode.af_range);
165 if (!ret && is_available_af(info))
166 ret = m5mols_write(sd, FD_CTL, scenemode.fd_mode);
167 if (!ret)
168 ret = m5mols_write(sd, MON_TONE_CTL, scenemode.tone);
169 if (!ret)
170 ret = m5mols_write(sd, AE_ISO, scenemode.iso);
171 if (!ret)
172 ret = m5mols_mode(info, REG_CAPTURE);
173 if (!ret)
174 ret = m5mols_write(sd, CAPP_WDR_EN, scenemode.wdr);
175 if (!ret)
176 ret = m5mols_write(sd, CAPP_MCC_MODE, scenemode.mcc);
177 if (!ret)
178 ret = m5mols_write(sd, CAPP_LIGHT_CTRL, scenemode.light);
179 if (!ret)
180 ret = m5mols_write(sd, CAPP_FLASH_CTRL, scenemode.flash);
181 if (!ret)
182 ret = m5mols_write(sd, CAPC_MODE, scenemode.capt_mode);
183 if (!ret)
184 ret = m5mols_mode(info, REG_MONITOR);
185
186 return ret;
187}
188
189static int m5mols_lock_ae(struct m5mols_info *info, bool lock)
190{
191 int ret = 0;
192
193 if (info->lock_ae != lock)
194 ret = m5mols_write(&info->sd, AE_LOCK,
195 lock ? REG_AE_LOCK : REG_AE_UNLOCK);
196 if (!ret)
197 info->lock_ae = lock;
198
199 return ret;
200}
201
202static int m5mols_lock_awb(struct m5mols_info *info, bool lock)
203{
204 int ret = 0;
205
206 if (info->lock_awb != lock)
207 ret = m5mols_write(&info->sd, AWB_LOCK,
208 lock ? REG_AWB_LOCK : REG_AWB_UNLOCK);
209 if (!ret)
210 info->lock_awb = lock;
211
212 return ret;
213}
214
215/* m5mols_lock_3a() - Lock 3A(Auto Exposure, Auto Whitebalance, Auto Focus) */
216int m5mols_lock_3a(struct m5mols_info *info, bool lock)
217{
218 int ret;
219
220 ret = m5mols_lock_ae(info, lock);
221 if (!ret)
222 ret = m5mols_lock_awb(info, lock);
223 /* Don't need to handle unlocking AF */
224 if (!ret && is_available_af(info) && lock)
225 ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
226
227 return ret;
228}
229
230/* m5mols_set_ctrl() - The main s_ctrl function called by m5mols_set_ctrl() */
231int m5mols_set_ctrl(struct v4l2_ctrl *ctrl)
232{
233 struct v4l2_subdev *sd = to_sd(ctrl);
234 struct m5mols_info *info = to_m5mols(sd);
235 int ret;
236
237 switch (ctrl->id) {
238 case V4L2_CID_ZOOM_ABSOLUTE:
239 return m5mols_write(sd, MON_ZOOM, ctrl->val);
240
241 case V4L2_CID_EXPOSURE_AUTO:
242 ret = m5mols_lock_ae(info,
243 ctrl->val == V4L2_EXPOSURE_AUTO ? false : true);
244 if (!ret && ctrl->val == V4L2_EXPOSURE_AUTO)
245 ret = m5mols_write(sd, AE_MODE, REG_AE_ALL);
246 if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) {
247 int val = info->exposure->val;
248 ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
249 if (!ret)
250 ret = m5mols_write(sd, AE_MAN_GAIN_MON, val);
251 if (!ret)
252 ret = m5mols_write(sd, AE_MAN_GAIN_CAP, val);
253 }
254 return ret;
255
256 case V4L2_CID_AUTO_WHITE_BALANCE:
257 ret = m5mols_lock_awb(info, ctrl->val ? false : true);
258 if (!ret)
259 ret = m5mols_write(sd, AWB_MODE, ctrl->val ?
260 REG_AWB_AUTO : REG_AWB_PRESET);
261 return ret;
262
263 case V4L2_CID_SATURATION:
264 ret = m5mols_write(sd, MON_CHROMA_LVL, ctrl->val);
265 if (!ret)
266 ret = m5mols_write(sd, MON_CHROMA_EN, REG_CHROMA_ON);
267 return ret;
268
269 case V4L2_CID_COLORFX:
270 /*
271 * This control uses two kinds of registers: normal & color.
272 * The normal effect belongs to category 1, while the color
273 * one belongs to category 2.
274 *
275 * The normal effect uses one register: CAT1_EFFECT.
276 * The color effect uses three registers:
277 * CAT2_COLOR_EFFECT, CAT2_CFIXR, CAT2_CFIXB.
278 */
279 ret = m5mols_write(sd, PARM_EFFECT,
280 ctrl->val == V4L2_COLORFX_NEGATIVE ? REG_EFFECT_NEGA :
281 ctrl->val == V4L2_COLORFX_EMBOSS ? REG_EFFECT_EMBOSS :
282 REG_EFFECT_OFF);
283 if (!ret)
284 ret = m5mols_write(sd, MON_EFFECT,
285 ctrl->val == V4L2_COLORFX_SEPIA ?
286 REG_COLOR_EFFECT_ON : REG_COLOR_EFFECT_OFF);
287 if (!ret)
288 ret = m5mols_write(sd, MON_CFIXR,
289 ctrl->val == V4L2_COLORFX_SEPIA ?
290 REG_CFIXR_SEPIA : 0);
291 if (!ret)
292 ret = m5mols_write(sd, MON_CFIXB,
293 ctrl->val == V4L2_COLORFX_SEPIA ?
294 REG_CFIXB_SEPIA : 0);
295 return ret;
296 }
297
298 return -EINVAL;
299}
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
new file mode 100644
index 000000000000..76eac26e84ae
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -0,0 +1,1004 @@
1/*
2 * Driver for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/i2c.h>
17#include <linux/slab.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/version.h>
22#include <linux/gpio.h>
23#include <linux/regulator/consumer.h>
24#include <linux/videodev2.h>
25#include <media/v4l2-ctrls.h>
26#include <media/v4l2-device.h>
27#include <media/v4l2-subdev.h>
28#include <media/m5mols.h>
29
30#include "m5mols.h"
31#include "m5mols_reg.h"
32
33int m5mols_debug;
34module_param(m5mols_debug, int, 0644);
35
36#define MODULE_NAME "M5MOLS"
37#define M5MOLS_I2C_CHECK_RETRY 500
38
39/* The regulator consumer names for external voltage regulators */
40static struct regulator_bulk_data supplies[] = {
41 {
42 .supply = "core", /* ARM core power, 1.2V */
43 }, {
44 .supply = "dig_18", /* digital power 1, 1.8V */
45 }, {
46 .supply = "d_sensor", /* sensor power 1, 1.8V */
47 }, {
48 .supply = "dig_28", /* digital power 2, 2.8V */
49 }, {
50 .supply = "a_sensor", /* analog power */
51 }, {
52 .supply = "dig_12", /* digital power 3, 1.2V */
53 },
54};
55
56static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = {
57 [M5MOLS_RESTYPE_MONITOR] = {
58 .width = 1920,
59 .height = 1080,
60 .code = V4L2_MBUS_FMT_VYUY8_2X8,
61 .field = V4L2_FIELD_NONE,
62 .colorspace = V4L2_COLORSPACE_JPEG,
63 },
64 [M5MOLS_RESTYPE_CAPTURE] = {
65 .width = 1920,
66 .height = 1080,
67 .code = V4L2_MBUS_FMT_JPEG_1X8,
68 .field = V4L2_FIELD_NONE,
69 .colorspace = V4L2_COLORSPACE_JPEG,
70 },
71};
72#define SIZE_DEFAULT_FFMT ARRAY_SIZE(m5mols_default_ffmt)
73
74static const struct m5mols_resolution m5mols_reg_res[] = {
75 { 0x01, M5MOLS_RESTYPE_MONITOR, 128, 96 }, /* SUB-QCIF */
76 { 0x03, M5MOLS_RESTYPE_MONITOR, 160, 120 }, /* QQVGA */
77 { 0x05, M5MOLS_RESTYPE_MONITOR, 176, 144 }, /* QCIF */
78 { 0x06, M5MOLS_RESTYPE_MONITOR, 176, 176 },
79 { 0x08, M5MOLS_RESTYPE_MONITOR, 240, 320 }, /* QVGA */
80 { 0x09, M5MOLS_RESTYPE_MONITOR, 320, 240 }, /* QVGA */
81 { 0x0c, M5MOLS_RESTYPE_MONITOR, 240, 400 }, /* WQVGA */
82 { 0x0d, M5MOLS_RESTYPE_MONITOR, 400, 240 }, /* WQVGA */
83 { 0x0e, M5MOLS_RESTYPE_MONITOR, 352, 288 }, /* CIF */
84 { 0x13, M5MOLS_RESTYPE_MONITOR, 480, 360 },
85 { 0x15, M5MOLS_RESTYPE_MONITOR, 640, 360 }, /* qHD */
86 { 0x17, M5MOLS_RESTYPE_MONITOR, 640, 480 }, /* VGA */
87 { 0x18, M5MOLS_RESTYPE_MONITOR, 720, 480 },
88 { 0x1a, M5MOLS_RESTYPE_MONITOR, 800, 480 }, /* WVGA */
89 { 0x1f, M5MOLS_RESTYPE_MONITOR, 800, 600 }, /* SVGA */
90 { 0x21, M5MOLS_RESTYPE_MONITOR, 1280, 720 }, /* HD */
91 { 0x25, M5MOLS_RESTYPE_MONITOR, 1920, 1080 }, /* 1080p */
92 { 0x29, M5MOLS_RESTYPE_MONITOR, 3264, 2448 }, /* 2.63fps 8M */
93 { 0x39, M5MOLS_RESTYPE_MONITOR, 800, 602 }, /* AHS_MON debug */
94
95 { 0x02, M5MOLS_RESTYPE_CAPTURE, 320, 240 }, /* QVGA */
96 { 0x04, M5MOLS_RESTYPE_CAPTURE, 400, 240 }, /* WQVGA */
97 { 0x07, M5MOLS_RESTYPE_CAPTURE, 480, 360 },
98 { 0x08, M5MOLS_RESTYPE_CAPTURE, 640, 360 }, /* qHD */
99 { 0x09, M5MOLS_RESTYPE_CAPTURE, 640, 480 }, /* VGA */
100 { 0x0a, M5MOLS_RESTYPE_CAPTURE, 800, 480 }, /* WVGA */
101 { 0x10, M5MOLS_RESTYPE_CAPTURE, 1280, 720 }, /* HD */
102 { 0x14, M5MOLS_RESTYPE_CAPTURE, 1280, 960 }, /* 1M */
103 { 0x17, M5MOLS_RESTYPE_CAPTURE, 1600, 1200 }, /* 2M */
104 { 0x19, M5MOLS_RESTYPE_CAPTURE, 1920, 1080 }, /* Full-HD */
105 { 0x1a, M5MOLS_RESTYPE_CAPTURE, 2048, 1152 }, /* 3Mega */
106 { 0x1b, M5MOLS_RESTYPE_CAPTURE, 2048, 1536 },
107 { 0x1c, M5MOLS_RESTYPE_CAPTURE, 2560, 1440 }, /* 4Mega */
108 { 0x1d, M5MOLS_RESTYPE_CAPTURE, 2560, 1536 },
109 { 0x1f, M5MOLS_RESTYPE_CAPTURE, 2560, 1920 }, /* 5Mega */
110 { 0x21, M5MOLS_RESTYPE_CAPTURE, 3264, 1836 }, /* 6Mega */
111 { 0x22, M5MOLS_RESTYPE_CAPTURE, 3264, 1960 },
112 { 0x25, M5MOLS_RESTYPE_CAPTURE, 3264, 2448 }, /* 8Mega */
113};
114
115/**
116 * m5mols_swap_byte - an byte array to integer conversion function
117 * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet
118 *
119 * Convert I2C data byte array with performing any required byte
120 * reordering to assure proper values for each data type, regardless
121 * of the architecture endianness.
122 */
123static u32 m5mols_swap_byte(u8 *data, u8 length)
124{
125 if (length == 1)
126 return *data;
127 else if (length == 2)
128 return be16_to_cpu(*((u16 *)data));
129 else
130 return be32_to_cpu(*((u32 *)data));
131}
132
133/**
134 * m5mols_read - I2C read function
135 * @reg: combination of size, category and command for the I2C packet
136 * @val: read value
137 */
138int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
139{
140 struct i2c_client *client = v4l2_get_subdevdata(sd);
141 u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
142 u8 size = I2C_SIZE(reg);
143 u8 category = I2C_CATEGORY(reg);
144 u8 cmd = I2C_COMMAND(reg);
145 struct i2c_msg msg[2];
146 u8 wbuf[5];
147 int ret;
148
149 if (!client->adapter)
150 return -ENODEV;
151
152 if (size != 1 && size != 2 && size != 4) {
153 v4l2_err(sd, "Wrong data size\n");
154 return -EINVAL;
155 }
156
157 msg[0].addr = client->addr;
158 msg[0].flags = 0;
159 msg[0].len = 5;
160 msg[0].buf = wbuf;
161 wbuf[0] = 5;
162 wbuf[1] = M5MOLS_BYTE_READ;
163 wbuf[2] = category;
164 wbuf[3] = cmd;
165 wbuf[4] = size;
166
167 msg[1].addr = client->addr;
168 msg[1].flags = I2C_M_RD;
169 msg[1].len = size + 1;
170 msg[1].buf = rbuf;
171
172 /* minimum stabilization time */
173 usleep_range(200, 200);
174
175 ret = i2c_transfer(client->adapter, msg, 2);
176 if (ret < 0) {
177 v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n",
178 size, category, cmd, ret);
179 return ret;
180 }
181
182 *val = m5mols_swap_byte(&rbuf[1], size);
183
184 return 0;
185}
186
187/**
188 * m5mols_write - I2C command write function
189 * @reg: combination of size, category and command for the I2C packet
190 * @val: value to write
191 */
192int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
193{
194 struct i2c_client *client = v4l2_get_subdevdata(sd);
195 u8 wbuf[M5MOLS_I2C_MAX_SIZE + 4];
196 u8 category = I2C_CATEGORY(reg);
197 u8 cmd = I2C_COMMAND(reg);
198 u8 size = I2C_SIZE(reg);
199 u32 *buf = (u32 *)&wbuf[4];
200 struct i2c_msg msg[1];
201 int ret;
202
203 if (!client->adapter)
204 return -ENODEV;
205
206 if (size != 1 && size != 2 && size != 4) {
207 v4l2_err(sd, "Wrong data size\n");
208 return -EINVAL;
209 }
210
211 msg->addr = client->addr;
212 msg->flags = 0;
213 msg->len = (u16)size + 4;
214 msg->buf = wbuf;
215 wbuf[0] = size + 4;
216 wbuf[1] = M5MOLS_BYTE_WRITE;
217 wbuf[2] = category;
218 wbuf[3] = cmd;
219
220 *buf = m5mols_swap_byte((u8 *)&val, size);
221
222 usleep_range(200, 200);
223
224 ret = i2c_transfer(client->adapter, msg, 1);
225 if (ret < 0) {
226 v4l2_err(sd, "write failed: size:%d cat:%02x cmd:%02x. %d\n",
227 size, category, cmd, ret);
228 return ret;
229 }
230
231 return 0;
232}
233
234int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
235{
236 u32 busy, i;
237 int ret;
238
239 for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
240 ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
241 if (ret < 0)
242 return ret;
243 if ((busy & mask) == mask)
244 return 0;
245 }
246 return -EBUSY;
247}
248
249/**
250 * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
251 *
252 * Before writing desired interrupt value the INT_FACTOR register should
253 * be read to clear pending interrupts.
254 */
255int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
256{
257 struct m5mols_info *info = to_m5mols(sd);
258 u32 mask = is_available_af(info) ? REG_INT_AF : 0;
259 u32 dummy;
260 int ret;
261
262 ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
263 if (!ret)
264 ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
265 return ret;
266}
267
268/**
269 * m5mols_reg_mode - Write the mode and check busy status
270 *
271 * It always accompanies a little delay changing the M-5MOLS mode, so it is
272 * needed checking current busy status to guarantee right mode.
273 */
274static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
275{
276 int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
277
278 return ret ? ret : m5mols_busy(sd, CAT_SYSTEM, CAT0_SYSMODE, mode);
279}
280
281/**
282 * m5mols_mode - manage the M-5MOLS's mode
283 * @mode: the required operation mode
284 *
285 * The commands of M-5MOLS are grouped into specific modes. Each functionality
286 * can be guaranteed only when the sensor is operating in mode which which
287 * a command belongs to.
288 */
289int m5mols_mode(struct m5mols_info *info, u32 mode)
290{
291 struct v4l2_subdev *sd = &info->sd;
292 int ret = -EINVAL;
293 u32 reg;
294
295 if (mode < REG_PARAMETER && mode > REG_CAPTURE)
296 return ret;
297
298 ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
299 if ((!ret && reg == mode) || ret)
300 return ret;
301
302 switch (reg) {
303 case REG_PARAMETER:
304 ret = m5mols_reg_mode(sd, REG_MONITOR);
305 if (!ret && mode == REG_MONITOR)
306 break;
307 if (!ret)
308 ret = m5mols_reg_mode(sd, REG_CAPTURE);
309 break;
310
311 case REG_MONITOR:
312 if (mode == REG_PARAMETER) {
313 ret = m5mols_reg_mode(sd, REG_PARAMETER);
314 break;
315 }
316
317 ret = m5mols_reg_mode(sd, REG_CAPTURE);
318 break;
319
320 case REG_CAPTURE:
321 ret = m5mols_reg_mode(sd, REG_MONITOR);
322 if (!ret && mode == REG_MONITOR)
323 break;
324 if (!ret)
325 ret = m5mols_reg_mode(sd, REG_PARAMETER);
326 break;
327
328 default:
329 v4l2_warn(sd, "Wrong mode: %d\n", mode);
330 }
331
332 if (!ret)
333 info->mode = mode;
334
335 return ret;
336}
337
338/**
339 * m5mols_get_version - retrieve full revisions information of M-5MOLS
340 *
341 * The version information includes revisions of hardware and firmware,
342 * AutoFocus alghorithm version and the version string.
343 */
344static int m5mols_get_version(struct v4l2_subdev *sd)
345{
346 struct m5mols_info *info = to_m5mols(sd);
347 union {
348 struct m5mols_version ver;
349 u8 bytes[VERSION_SIZE];
350 } version;
351 u32 *value;
352 u8 cmd = CAT0_VER_CUSTOMER;
353 int ret;
354
355 do {
356 value = (u32 *)&version.bytes[cmd];
357 ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
358 if (ret)
359 return ret;
360 } while (cmd++ != CAT0_VER_AWB);
361
362 do {
363 value = (u32 *)&version.bytes[cmd];
364 ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
365 if (ret)
366 return ret;
367 if (cmd >= VERSION_SIZE - 1)
368 return -EINVAL;
369 } while (version.bytes[cmd++]);
370
371 value = (u32 *)&version.bytes[cmd];
372 ret = m5mols_read(sd, AF_VERSION, value);
373 if (ret)
374 return ret;
375
376 /* store version information swapped for being readable */
377 info->ver = version.ver;
378 info->ver.fw = be16_to_cpu(info->ver.fw);
379 info->ver.hw = be16_to_cpu(info->ver.hw);
380 info->ver.param = be16_to_cpu(info->ver.param);
381 info->ver.awb = be16_to_cpu(info->ver.awb);
382
383 v4l2_info(sd, "Manufacturer\t[%s]\n",
384 is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
385 "Samsung Electro-Machanics" :
386 is_manufacturer(info, REG_SAMSUNG_OPTICS) ?
387 "Samsung Fiber-Optics" :
388 is_manufacturer(info, REG_SAMSUNG_TECHWIN) ?
389 "Samsung Techwin" : "None");
390 v4l2_info(sd, "Customer/Project\t[0x%02x/0x%02x]\n",
391 info->ver.customer, info->ver.project);
392
393 if (!is_available_af(info))
394 v4l2_info(sd, "No support Auto Focus on this firmware\n");
395
396 return ret;
397}
398
399/**
400 * __find_restype - Lookup M-5MOLS resolution type according to pixel code
401 * @code: pixel code
402 */
403static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code)
404{
405 enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR;
406
407 do {
408 if (code == m5mols_default_ffmt[type].code)
409 return type;
410 } while (type++ != SIZE_DEFAULT_FFMT);
411
412 return 0;
413}
414
415/**
416 * __find_resolution - Lookup preset and type of M-5MOLS's resolution
417 * @mf: pixel format to find/negotiate the resolution preset for
418 * @type: M-5MOLS resolution type
419 * @resolution: M-5MOLS resolution preset register value
420 *
421 * Find nearest resolution matching resolution preset and adjust mf
422 * to supported values.
423 */
424static int __find_resolution(struct v4l2_subdev *sd,
425 struct v4l2_mbus_framefmt *mf,
426 enum m5mols_restype *type,
427 u32 *resolution)
428{
429 const struct m5mols_resolution *fsize = &m5mols_reg_res[0];
430 const struct m5mols_resolution *match = NULL;
431 enum m5mols_restype stype = __find_restype(mf->code);
432 int i = ARRAY_SIZE(m5mols_reg_res);
433 unsigned int min_err = ~0;
434
435 while (i--) {
436 int err;
437 if (stype == fsize->type) {
438 err = abs(fsize->width - mf->width)
439 + abs(fsize->height - mf->height);
440
441 if (err < min_err) {
442 min_err = err;
443 match = fsize;
444 }
445 }
446 fsize++;
447 }
448 if (match) {
449 mf->width = match->width;
450 mf->height = match->height;
451 *resolution = match->reg;
452 *type = stype;
453 return 0;
454 }
455
456 return -EINVAL;
457}
458
459static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info,
460 struct v4l2_subdev_fh *fh,
461 enum v4l2_subdev_format_whence which,
462 enum m5mols_restype type)
463{
464 if (which == V4L2_SUBDEV_FORMAT_TRY)
465 return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL;
466
467 return &info->ffmt[type];
468}
469
470static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
471 struct v4l2_subdev_format *fmt)
472{
473 struct m5mols_info *info = to_m5mols(sd);
474 struct v4l2_mbus_framefmt *format;
475
476 if (fmt->pad != 0)
477 return -EINVAL;
478
479 format = __find_format(info, fh, fmt->which, info->res_type);
480 if (!format)
481 return -EINVAL;
482
483 fmt->format = *format;
484 return 0;
485}
486
487static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
488 struct v4l2_subdev_format *fmt)
489{
490 struct m5mols_info *info = to_m5mols(sd);
491 struct v4l2_mbus_framefmt *format = &fmt->format;
492 struct v4l2_mbus_framefmt *sfmt;
493 enum m5mols_restype type;
494 u32 resolution = 0;
495 int ret;
496
497 if (fmt->pad != 0)
498 return -EINVAL;
499
500 ret = __find_resolution(sd, format, &type, &resolution);
501 if (ret < 0)
502 return ret;
503
504 sfmt = __find_format(info, fh, fmt->which, type);
505 if (!sfmt)
506 return 0;
507
508 *sfmt = m5mols_default_ffmt[type];
509 sfmt->width = format->width;
510 sfmt->height = format->height;
511
512 if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
513 info->resolution = resolution;
514 info->code = format->code;
515 info->res_type = type;
516 }
517
518 return 0;
519}
520
521static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
522 struct v4l2_subdev_fh *fh,
523 struct v4l2_subdev_mbus_code_enum *code)
524{
525 if (!code || code->index >= SIZE_DEFAULT_FFMT)
526 return -EINVAL;
527
528 code->code = m5mols_default_ffmt[code->index].code;
529
530 return 0;
531}
532
533static struct v4l2_subdev_pad_ops m5mols_pad_ops = {
534 .enum_mbus_code = m5mols_enum_mbus_code,
535 .get_fmt = m5mols_get_fmt,
536 .set_fmt = m5mols_set_fmt,
537};
538
539/**
540 * m5mols_sync_controls - Apply default scene mode and the current controls
541 *
542 * This is used only streaming for syncing between v4l2_ctrl framework and
543 * m5mols's controls. First, do the scenemode to the sensor, then call
544 * v4l2_ctrl_handler_setup. It can be same between some commands and
545 * the scenemode's in the default v4l2_ctrls. But, such commands of control
546 * should be prior to the scenemode's one.
547 */
548int m5mols_sync_controls(struct m5mols_info *info)
549{
550 int ret = -EINVAL;
551
552 if (!is_ctrl_synced(info)) {
553 ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL);
554 if (ret)
555 return ret;
556
557 v4l2_ctrl_handler_setup(&info->handle);
558 info->ctrl_sync = true;
559 }
560
561 return ret;
562}
563
564/**
565 * m5mols_start_monitor - Start the monitor mode
566 *
567 * Before applying the controls setup the resolution and frame rate
568 * in PARAMETER mode, and then switch over to MONITOR mode.
569 */
570static int m5mols_start_monitor(struct m5mols_info *info)
571{
572 struct v4l2_subdev *sd = &info->sd;
573 int ret;
574
575 ret = m5mols_mode(info, REG_PARAMETER);
576 if (!ret)
577 ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution);
578 if (!ret)
579 ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30);
580 if (!ret)
581 ret = m5mols_mode(info, REG_MONITOR);
582 if (!ret)
583 ret = m5mols_sync_controls(info);
584
585 return ret;
586}
587
588static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
589{
590 struct m5mols_info *info = to_m5mols(sd);
591
592 if (enable) {
593 int ret = -EINVAL;
594
595 if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
596 ret = m5mols_start_monitor(info);
597 if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
598 ret = m5mols_start_capture(info);
599
600 return ret;
601 }
602
603 return m5mols_mode(info, REG_PARAMETER);
604}
605
606static const struct v4l2_subdev_video_ops m5mols_video_ops = {
607 .s_stream = m5mols_s_stream,
608};
609
610static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
611{
612 struct v4l2_subdev *sd = to_sd(ctrl);
613 struct m5mols_info *info = to_m5mols(sd);
614 int ret;
615
616 info->mode_save = info->mode;
617
618 ret = m5mols_mode(info, REG_PARAMETER);
619 if (!ret)
620 ret = m5mols_set_ctrl(ctrl);
621 if (!ret)
622 ret = m5mols_mode(info, info->mode_save);
623
624 return ret;
625}
626
627static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
628 .s_ctrl = m5mols_s_ctrl,
629};
630
631static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
632{
633 struct v4l2_subdev *sd = &info->sd;
634 struct i2c_client *client = v4l2_get_subdevdata(sd);
635 const struct m5mols_platform_data *pdata = info->pdata;
636 int ret;
637
638 if (enable) {
639 if (is_powered(info))
640 return 0;
641
642 if (info->set_power) {
643 ret = info->set_power(&client->dev, 1);
644 if (ret)
645 return ret;
646 }
647
648 ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
649 if (ret) {
650 info->set_power(&client->dev, 0);
651 return ret;
652 }
653
654 gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity);
655 usleep_range(1000, 1000);
656 info->power = true;
657
658 return ret;
659 }
660
661 if (!is_powered(info))
662 return 0;
663
664 ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
665 if (ret)
666 return ret;
667
668 if (info->set_power)
669 info->set_power(&client->dev, 0);
670
671 gpio_set_value(pdata->gpio_reset, pdata->reset_polarity);
672 usleep_range(1000, 1000);
673 info->power = false;
674
675 return ret;
676}
677
678/* m5mols_update_fw - optional firmware update routine */
679int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
680 int (*set_power)(struct m5mols_info *, bool))
681{
682 return 0;
683}
684
685/**
686 * m5mols_sensor_armboot - Booting M-5MOLS internal ARM core.
687 *
688 * Booting internal ARM core makes the M-5MOLS is ready for getting commands
689 * with I2C. It's the first thing to be done after it powered up. It must wait
690 * at least 520ms recommended by M-5MOLS datasheet, after executing arm booting.
691 */
692static int m5mols_sensor_armboot(struct v4l2_subdev *sd)
693{
694 int ret;
695
696 ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT);
697 if (ret < 0)
698 return ret;
699
700 msleep(520);
701
702 ret = m5mols_get_version(sd);
703 if (!ret)
704 ret = m5mols_update_fw(sd, m5mols_sensor_power);
705 if (ret)
706 return ret;
707
708 v4l2_dbg(1, m5mols_debug, sd, "Success ARM Booting\n");
709
710 ret = m5mols_write(sd, PARM_INTERFACE, REG_INTERFACE_MIPI);
711 if (!ret)
712 ret = m5mols_enable_interrupt(sd, REG_INT_AF);
713
714 return ret;
715}
716
717static int m5mols_init_controls(struct m5mols_info *info)
718{
719 struct v4l2_subdev *sd = &info->sd;
720 u16 max_exposure;
721 u16 step_zoom;
722 int ret;
723
724 /* Determine value's range & step of controls for various FW version */
725 ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
726 if (!ret)
727 step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
728 if (ret)
729 return ret;
730
731 v4l2_ctrl_handler_init(&info->handle, 6);
732 info->autowb = v4l2_ctrl_new_std(&info->handle,
733 &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE,
734 0, 1, 1, 0);
735 info->saturation = v4l2_ctrl_new_std(&info->handle,
736 &m5mols_ctrl_ops, V4L2_CID_SATURATION,
737 1, 5, 1, 3);
738 info->zoom = v4l2_ctrl_new_std(&info->handle,
739 &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE,
740 1, 70, step_zoom, 1);
741 info->exposure = v4l2_ctrl_new_std(&info->handle,
742 &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
743 0, max_exposure, 1, (int)max_exposure/2);
744 info->colorfx = v4l2_ctrl_new_std_menu(&info->handle,
745 &m5mols_ctrl_ops, V4L2_CID_COLORFX,
746 4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE);
747 info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle,
748 &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
749 1, 0, V4L2_EXPOSURE_MANUAL);
750
751 sd->ctrl_handler = &info->handle;
752 if (info->handle.error) {
753 v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
754 v4l2_ctrl_handler_free(&info->handle);
755 return info->handle.error;
756 }
757
758 v4l2_ctrl_cluster(2, &info->autoexposure);
759
760 return 0;
761}
762
763/**
764 * m5mols_s_power - Main sensor power control function
765 *
766 * To prevent breaking the lens when the sensor is powered off the Soft-Landing
767 * algorithm is called where available. The Soft-Landing algorithm availability
768 * dependends on the firmware provider.
769 */
770static int m5mols_s_power(struct v4l2_subdev *sd, int on)
771{
772 struct m5mols_info *info = to_m5mols(sd);
773 int ret;
774
775 if (on) {
776 ret = m5mols_sensor_power(info, true);
777 if (!ret)
778 ret = m5mols_sensor_armboot(sd);
779 if (!ret)
780 ret = m5mols_init_controls(info);
781 if (ret)
782 return ret;
783
784 info->ffmt[M5MOLS_RESTYPE_MONITOR] =
785 m5mols_default_ffmt[M5MOLS_RESTYPE_MONITOR];
786 info->ffmt[M5MOLS_RESTYPE_CAPTURE] =
787 m5mols_default_ffmt[M5MOLS_RESTYPE_CAPTURE];
788 return ret;
789 }
790
791 if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) {
792 ret = m5mols_mode(info, REG_MONITOR);
793 if (!ret)
794 ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP);
795 if (!ret)
796 ret = m5mols_write(sd, AF_MODE, REG_AF_POWEROFF);
797 if (!ret)
798 ret = m5mols_busy(sd, CAT_SYSTEM, CAT0_STATUS,
799 REG_AF_IDLE);
800 if (!ret)
801 v4l2_info(sd, "Success soft-landing lens\n");
802 }
803
804 ret = m5mols_sensor_power(info, false);
805 if (!ret) {
806 v4l2_ctrl_handler_free(&info->handle);
807 info->ctrl_sync = false;
808 }
809
810 return ret;
811}
812
813static int m5mols_log_status(struct v4l2_subdev *sd)
814{
815 struct m5mols_info *info = to_m5mols(sd);
816
817 v4l2_ctrl_handler_log_status(&info->handle, sd->name);
818
819 return 0;
820}
821
822static const struct v4l2_subdev_core_ops m5mols_core_ops = {
823 .s_power = m5mols_s_power,
824 .g_ctrl = v4l2_subdev_g_ctrl,
825 .s_ctrl = v4l2_subdev_s_ctrl,
826 .queryctrl = v4l2_subdev_queryctrl,
827 .querymenu = v4l2_subdev_querymenu,
828 .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
829 .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
830 .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
831 .log_status = m5mols_log_status,
832};
833
834static const struct v4l2_subdev_ops m5mols_ops = {
835 .core = &m5mols_core_ops,
836 .pad = &m5mols_pad_ops,
837 .video = &m5mols_video_ops,
838};
839
840static void m5mols_irq_work(struct work_struct *work)
841{
842 struct m5mols_info *info =
843 container_of(work, struct m5mols_info, work_irq);
844 struct v4l2_subdev *sd = &info->sd;
845 u32 reg;
846 int ret;
847
848 if (!is_powered(info) ||
849 m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
850 return;
851
852 switch (info->interrupt & REG_INT_MASK) {
853 case REG_INT_AF:
854 if (!is_available_af(info))
855 break;
856 ret = m5mols_read(sd, AF_STATUS, &reg);
857 v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
858 reg == REG_AF_FAIL ? "Failed" :
859 reg == REG_AF_SUCCESS ? "Success" :
860 reg == REG_AF_IDLE ? "Idle" : "Busy");
861 break;
862 case REG_INT_CAPTURE:
863 if (!test_and_set_bit(ST_CAPT_IRQ, &info->flags))
864 wake_up_interruptible(&info->irq_waitq);
865
866 v4l2_dbg(2, m5mols_debug, sd, "CAPTURE\n");
867 break;
868 default:
869 v4l2_dbg(2, m5mols_debug, sd, "Undefined: %02x\n", reg);
870 break;
871 };
872}
873
874static irqreturn_t m5mols_irq_handler(int irq, void *data)
875{
876 struct v4l2_subdev *sd = data;
877 struct m5mols_info *info = to_m5mols(sd);
878
879 schedule_work(&info->work_irq);
880
881 return IRQ_HANDLED;
882}
883
884static int __devinit m5mols_probe(struct i2c_client *client,
885 const struct i2c_device_id *id)
886{
887 const struct m5mols_platform_data *pdata = client->dev.platform_data;
888 struct m5mols_info *info;
889 struct v4l2_subdev *sd;
890 int ret;
891
892 if (pdata == NULL) {
893 dev_err(&client->dev, "No platform data\n");
894 return -EINVAL;
895 }
896
897 if (!gpio_is_valid(pdata->gpio_reset)) {
898 dev_err(&client->dev, "No valid RESET GPIO specified\n");
899 return -EINVAL;
900 }
901
902 if (!pdata->irq) {
903 dev_err(&client->dev, "Interrupt not assigned\n");
904 return -EINVAL;
905 }
906
907 info = kzalloc(sizeof(struct m5mols_info), GFP_KERNEL);
908 if (!info)
909 return -ENOMEM;
910
911 info->pdata = pdata;
912 info->set_power = pdata->set_power;
913
914 ret = gpio_request(pdata->gpio_reset, "M5MOLS_NRST");
915 if (ret) {
916 dev_err(&client->dev, "Failed to request gpio: %d\n", ret);
917 goto out_free;
918 }
919 gpio_direction_output(pdata->gpio_reset, pdata->reset_polarity);
920
921 ret = regulator_bulk_get(&client->dev, ARRAY_SIZE(supplies), supplies);
922 if (ret) {
923 dev_err(&client->dev, "Failed to get regulators: %d\n", ret);
924 goto out_gpio;
925 }
926
927 sd = &info->sd;
928 strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
929 v4l2_i2c_subdev_init(sd, client, &m5mols_ops);
930
931 info->pad.flags = MEDIA_PAD_FL_SOURCE;
932 ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
933 if (ret < 0)
934 goto out_reg;
935 sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
936
937 init_waitqueue_head(&info->irq_waitq);
938 INIT_WORK(&info->work_irq, m5mols_irq_work);
939 ret = request_irq(pdata->irq, m5mols_irq_handler,
940 IRQF_TRIGGER_RISING, MODULE_NAME, sd);
941 if (ret) {
942 dev_err(&client->dev, "Interrupt request failed: %d\n", ret);
943 goto out_me;
944 }
945 info->res_type = M5MOLS_RESTYPE_MONITOR;
946 return 0;
947out_me:
948 media_entity_cleanup(&sd->entity);
949out_reg:
950 regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
951out_gpio:
952 gpio_free(pdata->gpio_reset);
953out_free:
954 kfree(info);
955 return ret;
956}
957
958static int __devexit m5mols_remove(struct i2c_client *client)
959{
960 struct v4l2_subdev *sd = i2c_get_clientdata(client);
961 struct m5mols_info *info = to_m5mols(sd);
962
963 v4l2_device_unregister_subdev(sd);
964 free_irq(info->pdata->irq, sd);
965
966 regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
967 gpio_free(info->pdata->gpio_reset);
968 media_entity_cleanup(&sd->entity);
969 kfree(info);
970 return 0;
971}
972
973static const struct i2c_device_id m5mols_id[] = {
974 { MODULE_NAME, 0 },
975 { },
976};
977MODULE_DEVICE_TABLE(i2c, m5mols_id);
978
979static struct i2c_driver m5mols_i2c_driver = {
980 .driver = {
981 .name = MODULE_NAME,
982 },
983 .probe = m5mols_probe,
984 .remove = __devexit_p(m5mols_remove),
985 .id_table = m5mols_id,
986};
987
988static int __init m5mols_mod_init(void)
989{
990 return i2c_add_driver(&m5mols_i2c_driver);
991}
992
993static void __exit m5mols_mod_exit(void)
994{
995 i2c_del_driver(&m5mols_i2c_driver);
996}
997
998module_init(m5mols_mod_init);
999module_exit(m5mols_mod_exit);
1000
1001MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>");
1002MODULE_AUTHOR("Dongsoo Kim <dongsoo45.kim@samsung.com>");
1003MODULE_DESCRIPTION("Fujitsu M-5MOLS 8M Pixel camera driver");
1004MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
new file mode 100644
index 000000000000..b83e36fc6ac6
--- /dev/null
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -0,0 +1,399 @@
1/*
2 * Register map for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef M5MOLS_REG_H
17#define M5MOLS_REG_H
18
19#define M5MOLS_I2C_MAX_SIZE 4
20#define M5MOLS_BYTE_READ 0x01
21#define M5MOLS_BYTE_WRITE 0x02
22
23#define I2C_CATEGORY(__cat) ((__cat >> 16) & 0xff)
24#define I2C_COMMAND(__comm) ((__comm >> 8) & 0xff)
25#define I2C_SIZE(__reg_s) ((__reg_s) & 0xff)
26#define I2C_REG(__cat, __cmd, __reg_s) ((__cat << 16) | (__cmd << 8) | __reg_s)
27
28/*
29 * Category section register
30 *
31 * The category means set including relevant command of M-5MOLS.
32 */
33#define CAT_SYSTEM 0x00
34#define CAT_PARAM 0x01
35#define CAT_MONITOR 0x02
36#define CAT_AE 0x03
37#define CAT_WB 0x06
38#define CAT_EXIF 0x07
39#define CAT_FD 0x09
40#define CAT_LENS 0x0a
41#define CAT_CAPT_PARM 0x0b
42#define CAT_CAPT_CTRL 0x0c
43#define CAT_FLASH 0x0f /* related to FW, revisions, booting */
44
45/*
46 * Category 0 - SYSTEM mode
47 *
48 * The SYSTEM mode in the M-5MOLS means area available to handle with the whole
49 * & all-round system of sensor. It deals with version/interrupt/setting mode &
50 * even sensor's status. Especially, the M-5MOLS sensor with ISP varies by
51 * packaging & manufacturer, even the customer and project code. And the
52 * function details may vary among them. The version information helps to
53 * determine what methods shall be used in the driver.
54 *
55 * There is many registers between customer version address and awb one. For
56 * more specific contents, see definition if file m5mols.h.
57 */
58#define CAT0_VER_CUSTOMER 0x00 /* customer version */
59#define CAT0_VER_AWB 0x09 /* Auto WB version */
60#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
61#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
62#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
63#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
64#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
65
66#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
67#define REG_SYSINIT 0x00 /* SYSTEM mode */
68#define REG_PARAMETER 0x01 /* PARAMETER mode */
69#define REG_MONITOR 0x02 /* MONITOR mode */
70#define REG_CAPTURE 0x03 /* CAPTURE mode */
71
72#define SYSTEM_CMD(__cmd) I2C_REG(CAT_SYSTEM, cmd, 1)
73#define SYSTEM_VER_STRING I2C_REG(CAT_SYSTEM, CAT0_VER_STRING, 1)
74#define REG_SAMSUNG_ELECTRO "SE" /* Samsung Electro-Mechanics */
75#define REG_SAMSUNG_OPTICS "OP" /* Samsung Fiber-Optics */
76#define REG_SAMSUNG_TECHWIN "TB" /* Samsung Techwin */
77
78#define SYSTEM_INT_FACTOR I2C_REG(CAT_SYSTEM, CAT0_INT_FACTOR, 1)
79#define SYSTEM_INT_ENABLE I2C_REG(CAT_SYSTEM, CAT0_INT_ENABLE, 1)
80#define REG_INT_MODE (1 << 0)
81#define REG_INT_AF (1 << 1)
82#define REG_INT_ZOOM (1 << 2)
83#define REG_INT_CAPTURE (1 << 3)
84#define REG_INT_FRAMESYNC (1 << 4)
85#define REG_INT_FD (1 << 5)
86#define REG_INT_LENS_INIT (1 << 6)
87#define REG_INT_SOUND (1 << 7)
88#define REG_INT_MASK 0x0f
89
90/*
91 * category 1 - PARAMETER mode
92 *
93 * This category supports function of camera features of M-5MOLS. It means we
94 * can handle with preview(MONITOR) resolution size/frame per second/interface
95 * between the sensor and the Application Processor/even the image effect.
96 */
97#define CAT1_DATA_INTERFACE 0x00 /* interface between sensor and AP */
98#define CAT1_MONITOR_SIZE 0x01 /* resolution at the MONITOR mode */
99#define CAT1_MONITOR_FPS 0x02 /* frame per second at this mode */
100#define CAT1_EFFECT 0x0b /* image effects */
101
102#define PARM_MON_SIZE I2C_REG(CAT_PARAM, CAT1_MONITOR_SIZE, 1)
103
104#define PARM_MON_FPS I2C_REG(CAT_PARAM, CAT1_MONITOR_FPS, 1)
105#define REG_FPS_30 0x02
106
107#define PARM_INTERFACE I2C_REG(CAT_PARAM, CAT1_DATA_INTERFACE, 1)
108#define REG_INTERFACE_MIPI 0x02
109
110#define PARM_EFFECT I2C_REG(CAT_PARAM, CAT1_EFFECT, 1)
111#define REG_EFFECT_OFF 0x00
112#define REG_EFFECT_NEGA 0x01
113#define REG_EFFECT_EMBOSS 0x06
114#define REG_EFFECT_OUTLINE 0x07
115#define REG_EFFECT_WATERCOLOR 0x08
116
117/*
118 * Category 2 - MONITOR mode
119 *
120 * The MONITOR mode is same as preview mode as we said. The M-5MOLS has another
121 * mode named "Preview", but this preview mode is used at the case specific
122 * vider-recording mode. This mmode supports only YUYV format. On the other
123 * hand, the JPEG & RAW formats is supports by CAPTURE mode. And, there are
124 * another options like zoom/color effect(different with effect in PARAMETER
125 * mode)/anti hand shaking algorithm.
126 */
127#define CAT2_ZOOM 0x01 /* set the zoom position & execute */
128#define CAT2_ZOOM_STEP 0x03 /* set the zoom step */
129#define CAT2_CFIXB 0x09 /* CB value for color effect */
130#define CAT2_CFIXR 0x0a /* CR value for color effect */
131#define CAT2_COLOR_EFFECT 0x0b /* set on/off of color effect */
132#define CAT2_CHROMA_LVL 0x0f /* set chroma level */
133#define CAT2_CHROMA_EN 0x10 /* set on/off of choroma */
134#define CAT2_EDGE_LVL 0x11 /* set sharpness level */
135#define CAT2_EDGE_EN 0x12 /* set on/off sharpness */
136#define CAT2_TONE_CTL 0x25 /* set tone color(contrast) */
137
138#define MON_ZOOM I2C_REG(CAT_MONITOR, CAT2_ZOOM, 1)
139
140#define MON_CFIXR I2C_REG(CAT_MONITOR, CAT2_CFIXR, 1)
141#define MON_CFIXB I2C_REG(CAT_MONITOR, CAT2_CFIXB, 1)
142#define REG_CFIXB_SEPIA 0xd8
143#define REG_CFIXR_SEPIA 0x18
144
145#define MON_EFFECT I2C_REG(CAT_MONITOR, CAT2_COLOR_EFFECT, 1)
146#define REG_COLOR_EFFECT_OFF 0x00
147#define REG_COLOR_EFFECT_ON 0x01
148
149#define MON_CHROMA_EN I2C_REG(CAT_MONITOR, CAT2_CHROMA_EN, 1)
150#define MON_CHROMA_LVL I2C_REG(CAT_MONITOR, CAT2_CHROMA_LVL, 1)
151#define REG_CHROMA_OFF 0x00
152#define REG_CHROMA_ON 0x01
153
154#define MON_EDGE_EN I2C_REG(CAT_MONITOR, CAT2_EDGE_EN, 1)
155#define MON_EDGE_LVL I2C_REG(CAT_MONITOR, CAT2_EDGE_LVL, 1)
156#define REG_EDGE_OFF 0x00
157#define REG_EDGE_ON 0x01
158
159#define MON_TONE_CTL I2C_REG(CAT_MONITOR, CAT2_TONE_CTL, 1)
160
161/*
162 * Category 3 - Auto Exposure
163 *
164 * The M-5MOLS exposure capbility is detailed as which is similar to digital
165 * camera. This category supports AE locking/various AE mode(range of exposure)
166 * /ISO/flickering/EV bias/shutter/meteoring, and anything else. And the
167 * maximum/minimum exposure gain value depending on M-5MOLS firmware, may be
168 * different. So, this category also provide getting the max/min values. And,
169 * each MONITOR and CAPTURE mode has each gain/shutter/max exposure values.
170 */
171#define CAT3_AE_LOCK 0x00 /* locking Auto exposure */
172#define CAT3_AE_MODE 0x01 /* set AE mode, mode means range */
173#define CAT3_ISO 0x05 /* set ISO */
174#define CAT3_EV_PRESET_MONITOR 0x0a /* EV(scenemode) preset for MONITOR */
175#define CAT3_EV_PRESET_CAPTURE 0x0b /* EV(scenemode) preset for CAPTURE */
176#define CAT3_MANUAL_GAIN_MON 0x12 /* meteoring value for the MONITOR */
177#define CAT3_MAX_GAIN_MON 0x1a /* max gain value for the MONITOR */
178#define CAT3_MANUAL_GAIN_CAP 0x26 /* meteoring value for the CAPTURE */
179#define CAT3_AE_INDEX 0x38 /* AE index */
180
181#define AE_LOCK I2C_REG(CAT_AE, CAT3_AE_LOCK, 1)
182#define REG_AE_UNLOCK 0x00
183#define REG_AE_LOCK 0x01
184
185#define AE_MODE I2C_REG(CAT_AE, CAT3_AE_MODE, 1)
186#define REG_AE_OFF 0x00 /* AE off */
187#define REG_AE_ALL 0x01 /* calc AE in all block integral */
188#define REG_AE_CENTER 0x03 /* calc AE in center weighted */
189#define REG_AE_SPOT 0x06 /* calc AE in specific spot */
190
191#define AE_ISO I2C_REG(CAT_AE, CAT3_ISO, 1)
192#define REG_ISO_AUTO 0x00
193#define REG_ISO_50 0x01
194#define REG_ISO_100 0x02
195#define REG_ISO_200 0x03
196#define REG_ISO_400 0x04
197#define REG_ISO_800 0x05
198
199#define AE_EV_PRESET_MONITOR I2C_REG(CAT_AE, CAT3_EV_PRESET_MONITOR, 1)
200#define AE_EV_PRESET_CAPTURE I2C_REG(CAT_AE, CAT3_EV_PRESET_CAPTURE, 1)
201#define REG_SCENE_NORMAL 0x00
202#define REG_SCENE_PORTRAIT 0x01
203#define REG_SCENE_LANDSCAPE 0x02
204#define REG_SCENE_SPORTS 0x03
205#define REG_SCENE_PARTY_INDOOR 0x04
206#define REG_SCENE_BEACH_SNOW 0x05
207#define REG_SCENE_SUNSET 0x06
208#define REG_SCENE_DAWN_DUSK 0x07
209#define REG_SCENE_FALL 0x08
210#define REG_SCENE_NIGHT 0x09
211#define REG_SCENE_AGAINST_LIGHT 0x0a
212#define REG_SCENE_FIRE 0x0b
213#define REG_SCENE_TEXT 0x0c
214#define REG_SCENE_CANDLE 0x0d
215
216#define AE_MAN_GAIN_MON I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_MON, 2)
217#define AE_MAX_GAIN_MON I2C_REG(CAT_AE, CAT3_MAX_GAIN_MON, 2)
218#define AE_MAN_GAIN_CAP I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_CAP, 2)
219
220#define AE_INDEX I2C_REG(CAT_AE, CAT3_AE_INDEX, 1)
221#define REG_AE_INDEX_20_NEG 0x00
222#define REG_AE_INDEX_15_NEG 0x01
223#define REG_AE_INDEX_10_NEG 0x02
224#define REG_AE_INDEX_05_NEG 0x03
225#define REG_AE_INDEX_00 0x04
226#define REG_AE_INDEX_05_POS 0x05
227#define REG_AE_INDEX_10_POS 0x06
228#define REG_AE_INDEX_15_POS 0x07
229#define REG_AE_INDEX_20_POS 0x08
230
231/*
232 * Category 6 - White Balance
233 *
234 * This category provide AWB locking/mode/preset/speed/gain bias, etc.
235 */
236#define CAT6_AWB_LOCK 0x00 /* locking Auto Whitebalance */
237#define CAT6_AWB_MODE 0x02 /* set Auto or Manual */
238#define CAT6_AWB_MANUAL 0x03 /* set Manual(preset) value */
239
240#define AWB_LOCK I2C_REG(CAT_WB, CAT6_AWB_LOCK, 1)
241#define REG_AWB_UNLOCK 0x00
242#define REG_AWB_LOCK 0x01
243
244#define AWB_MODE I2C_REG(CAT_WB, CAT6_AWB_MODE, 1)
245#define REG_AWB_AUTO 0x01 /* AWB off */
246#define REG_AWB_PRESET 0x02 /* AWB preset */
247
248#define AWB_MANUAL I2C_REG(CAT_WB, CAT6_AWB_MANUAL, 1)
249#define REG_AWB_INCANDESCENT 0x01
250#define REG_AWB_FLUORESCENT_1 0x02
251#define REG_AWB_FLUORESCENT_2 0x03
252#define REG_AWB_DAYLIGHT 0x04
253#define REG_AWB_CLOUDY 0x05
254#define REG_AWB_SHADE 0x06
255#define REG_AWB_HORIZON 0x07
256#define REG_AWB_LEDLIGHT 0x09
257
258/*
259 * Category 7 - EXIF information
260 */
261#define CAT7_INFO_EXPTIME_NU 0x00
262#define CAT7_INFO_EXPTIME_DE 0x04
263#define CAT7_INFO_TV_NU 0x08
264#define CAT7_INFO_TV_DE 0x0c
265#define CAT7_INFO_AV_NU 0x10
266#define CAT7_INFO_AV_DE 0x14
267#define CAT7_INFO_BV_NU 0x18
268#define CAT7_INFO_BV_DE 0x1c
269#define CAT7_INFO_EBV_NU 0x20
270#define CAT7_INFO_EBV_DE 0x24
271#define CAT7_INFO_ISO 0x28
272#define CAT7_INFO_FLASH 0x2a
273#define CAT7_INFO_SDR 0x2c
274#define CAT7_INFO_QVAL 0x2e
275
276#define EXIF_INFO_EXPTIME_NU I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_NU, 4)
277#define EXIF_INFO_EXPTIME_DE I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_DE, 4)
278#define EXIF_INFO_TV_NU I2C_REG(CAT_EXIF, CAT7_INFO_TV_NU, 4)
279#define EXIF_INFO_TV_DE I2C_REG(CAT_EXIF, CAT7_INFO_TV_DE, 4)
280#define EXIF_INFO_AV_NU I2C_REG(CAT_EXIF, CAT7_INFO_AV_NU, 4)
281#define EXIF_INFO_AV_DE I2C_REG(CAT_EXIF, CAT7_INFO_AV_DE, 4)
282#define EXIF_INFO_BV_NU I2C_REG(CAT_EXIF, CAT7_INFO_BV_NU, 4)
283#define EXIF_INFO_BV_DE I2C_REG(CAT_EXIF, CAT7_INFO_BV_DE, 4)
284#define EXIF_INFO_EBV_NU I2C_REG(CAT_EXIF, CAT7_INFO_EBV_NU, 4)
285#define EXIF_INFO_EBV_DE I2C_REG(CAT_EXIF, CAT7_INFO_EBV_DE, 4)
286#define EXIF_INFO_ISO I2C_REG(CAT_EXIF, CAT7_INFO_ISO, 2)
287#define EXIF_INFO_FLASH I2C_REG(CAT_EXIF, CAT7_INFO_FLASH, 2)
288#define EXIF_INFO_SDR I2C_REG(CAT_EXIF, CAT7_INFO_SDR, 2)
289#define EXIF_INFO_QVAL I2C_REG(CAT_EXIF, CAT7_INFO_QVAL, 2)
290
291/*
292 * Category 9 - Face Detection
293 */
294#define CAT9_FD_CTL 0x00
295
296#define FD_CTL I2C_REG(CAT_FD, CAT9_FD_CTL, 1)
297#define BIT_FD_EN 0
298#define BIT_FD_DRAW_FACE_FRAME 4
299#define BIT_FD_DRAW_SMILE_LVL 6
300#define REG_FD(shift) (1 << shift)
301#define REG_FD_OFF 0x0
302
303/*
304 * Category A - Lens Parameter
305 */
306#define CATA_AF_MODE 0x01
307#define CATA_AF_EXECUTE 0x02
308#define CATA_AF_STATUS 0x03
309#define CATA_AF_VERSION 0x0a
310
311#define AF_MODE I2C_REG(CAT_LENS, CATA_AF_MODE, 1)
312#define REG_AF_NORMAL 0x00 /* Normal AF, one time */
313#define REG_AF_MACRO 0x01 /* Macro AF, one time */
314#define REG_AF_POWEROFF 0x07
315
316#define AF_EXECUTE I2C_REG(CAT_LENS, CATA_AF_EXECUTE, 1)
317#define REG_AF_STOP 0x00
318#define REG_AF_EXE_AUTO 0x01
319#define REG_AF_EXE_CAF 0x02
320
321#define AF_STATUS I2C_REG(CAT_LENS, CATA_AF_STATUS, 1)
322#define REG_AF_FAIL 0x00
323#define REG_AF_SUCCESS 0x02
324#define REG_AF_IDLE 0x04
325#define REG_AF_BUSY 0x05
326
327#define AF_VERSION I2C_REG(CAT_LENS, CATA_AF_VERSION, 1)
328
329/*
330 * Category B - CAPTURE Parameter
331 */
332#define CATB_YUVOUT_MAIN 0x00
333#define CATB_MAIN_IMAGE_SIZE 0x01
334#define CATB_MCC_MODE 0x1d
335#define CATB_WDR_EN 0x2c
336#define CATB_LIGHT_CTRL 0x40
337#define CATB_FLASH_CTRL 0x41
338
339#define CAPP_YUVOUT_MAIN I2C_REG(CAT_CAPT_PARM, CATB_YUVOUT_MAIN, 1)
340#define REG_YUV422 0x00
341#define REG_BAYER10 0x05
342#define REG_BAYER8 0x06
343#define REG_JPEG 0x10
344
345#define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, CATB_MAIN_IMAGE_SIZE, 1)
346
347#define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, CATB_MCC_MODE, 1)
348#define REG_MCC_OFF 0x00
349#define REG_MCC_NORMAL 0x01
350
351#define CAPP_WDR_EN I2C_REG(CAT_CAPT_PARM, CATB_WDR_EN, 1)
352#define REG_WDR_OFF 0x00
353#define REG_WDR_ON 0x01
354#define REG_WDR_AUTO 0x02
355
356#define CAPP_LIGHT_CTRL I2C_REG(CAT_CAPT_PARM, CATB_LIGHT_CTRL, 1)
357#define REG_LIGHT_OFF 0x00
358#define REG_LIGHT_ON 0x01
359#define REG_LIGHT_AUTO 0x02
360
361#define CAPP_FLASH_CTRL I2C_REG(CAT_CAPT_PARM, CATB_FLASH_CTRL, 1)
362#define REG_FLASH_OFF 0x00
363#define REG_FLASH_ON 0x01
364#define REG_FLASH_AUTO 0x02
365
366/*
367 * Category C - CAPTURE Control
368 */
369#define CATC_CAP_MODE 0x00
370#define CATC_CAP_SEL_FRAME 0x06 /* It determines Single or Multi */
371#define CATC_CAP_START 0x09
372#define CATC_CAP_IMAGE_SIZE 0x0d
373#define CATC_CAP_THUMB_SIZE 0x11
374
375#define CAPC_MODE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_MODE, 1)
376#define REG_CAP_NONE 0x00
377#define REG_CAP_ANTI_SHAKE 0x02
378
379#define CAPC_SEL_FRAME I2C_REG(CAT_CAPT_CTRL, CATC_CAP_SEL_FRAME, 1)
380
381#define CAPC_START I2C_REG(CAT_CAPT_CTRL, CATC_CAP_START, 1)
382#define REG_CAP_START_MAIN 0x01
383#define REG_CAP_START_THUMB 0x03
384
385#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
386#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
387
388/*
389 * Category F - Flash
390 *
391 * This mode provides functions about internal flash stuff and system startup.
392 */
393#define CATF_CAM_START 0x12 /* It starts internal ARM core booting
394 * after power-up */
395
396#define FLASH_CAM_START I2C_REG(CAT_FLASH, CATF_CAM_START, 1)
397#define REG_START_ARM_BOOT 0x01
398
399#endif /* M5MOLS_REG_H */
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index d4fe7bc92a1d..4ada9be1d430 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -47,7 +47,7 @@
47#include <plat/dma.h> 47#include <plat/dma.h>
48#include <plat/vram.h> 48#include <plat/vram.h>
49#include <plat/vrfb.h> 49#include <plat/vrfb.h>
50#include <plat/display.h> 50#include <video/omapdss.h>
51 51
52#include "omap_voutlib.h" 52#include "omap_voutlib.h"
53#include "omap_voutdef.h" 53#include "omap_voutdef.h"
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
index ea3a047f8bca..659497b84996 100644
--- a/drivers/media/video/omap/omap_voutdef.h
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -11,7 +11,7 @@
11#ifndef OMAP_VOUTDEF_H 11#ifndef OMAP_VOUTDEF_H
12#define OMAP_VOUTDEF_H 12#define OMAP_VOUTDEF_H
13 13
14#include <plat/display.h> 14#include <video/omapdss.h>
15 15
16#define YUYV_BPP 2 16#define YUYV_BPP 2
17#define RGB565_BPP 2 17#define RGB565_BPP 2
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index 84d4c7c83435..fc611ebeb82c 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -24,7 +24,6 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/dmaengine.h> 26#include <linux/dmaengine.h>
27#include <linux/mfd/core.h>
28#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
30#include <linux/list.h> 29#include <linux/list.h>
@@ -791,7 +790,7 @@ static int __devinit timblogiw_probe(struct platform_device *pdev)
791{ 790{
792 int err; 791 int err;
793 struct timblogiw *lw = NULL; 792 struct timblogiw *lw = NULL;
794 struct timb_video_platform_data *pdata = mfd_get_data(pdev); 793 struct timb_video_platform_data *pdata = pdev->dev.platform_data;
795 794
796 if (!pdata) { 795 if (!pdata) {
797 dev_err(&pdev->dev, "No platform data\n"); 796 dev_err(&pdev->dev, "No platform data\n");
diff --git a/drivers/media/video/uvc/Makefile b/drivers/media/video/uvc/Makefile
index 968c1994eda0..2071ca8a2f03 100644
--- a/drivers/media/video/uvc/Makefile
+++ b/drivers/media/video/uvc/Makefile
@@ -1,3 +1,6 @@
1uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \ 1uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \
2 uvc_status.o uvc_isight.o 2 uvc_status.o uvc_isight.o
3ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
4uvcvideo-objs += uvc_entity.o
5endif
3obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o 6obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 823f4b389745..b6eae48d7fb8 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -248,7 +248,7 @@ uint32_t uvc_fraction_to_interval(uint32_t numerator, uint32_t denominator)
248 * Terminal and unit management 248 * Terminal and unit management
249 */ 249 */
250 250
251static struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id) 251struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
252{ 252{
253 struct uvc_entity *entity; 253 struct uvc_entity *entity;
254 254
@@ -795,9 +795,12 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
795 struct uvc_entity *entity; 795 struct uvc_entity *entity;
796 unsigned int num_inputs; 796 unsigned int num_inputs;
797 unsigned int size; 797 unsigned int size;
798 unsigned int i;
798 799
800 extra_size = ALIGN(extra_size, sizeof(*entity->pads));
799 num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; 801 num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
800 size = sizeof(*entity) + extra_size + num_inputs; 802 size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
803 + num_inputs;
801 entity = kzalloc(size, GFP_KERNEL); 804 entity = kzalloc(size, GFP_KERNEL);
802 if (entity == NULL) 805 if (entity == NULL)
803 return NULL; 806 return NULL;
@@ -805,8 +808,17 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
805 entity->id = id; 808 entity->id = id;
806 entity->type = type; 809 entity->type = type;
807 810
811 entity->num_links = 0;
812 entity->num_pads = num_pads;
813 entity->pads = ((void *)(entity + 1)) + extra_size;
814
815 for (i = 0; i < num_inputs; ++i)
816 entity->pads[i].flags = MEDIA_PAD_FL_SINK;
817 if (!UVC_ENTITY_IS_OTERM(entity))
818 entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
819
808 entity->bNrInPins = num_inputs; 820 entity->bNrInPins = num_inputs;
809 entity->baSourceID = ((__u8 *)entity) + sizeof(*entity) + extra_size; 821 entity->baSourceID = (__u8 *)(&entity->pads[num_pads]);
810 822
811 return entity; 823 return entity;
812} 824}
@@ -1585,6 +1597,13 @@ static void uvc_delete(struct uvc_device *dev)
1585 uvc_status_cleanup(dev); 1597 uvc_status_cleanup(dev);
1586 uvc_ctrl_cleanup_device(dev); 1598 uvc_ctrl_cleanup_device(dev);
1587 1599
1600 if (dev->vdev.dev)
1601 v4l2_device_unregister(&dev->vdev);
1602#ifdef CONFIG_MEDIA_CONTROLLER
1603 if (media_devnode_is_registered(&dev->mdev.devnode))
1604 media_device_unregister(&dev->mdev);
1605#endif
1606
1588 list_for_each_safe(p, n, &dev->chains) { 1607 list_for_each_safe(p, n, &dev->chains) {
1589 struct uvc_video_chain *chain; 1608 struct uvc_video_chain *chain;
1590 chain = list_entry(p, struct uvc_video_chain, list); 1609 chain = list_entry(p, struct uvc_video_chain, list);
@@ -1594,6 +1613,13 @@ static void uvc_delete(struct uvc_device *dev)
1594 list_for_each_safe(p, n, &dev->entities) { 1613 list_for_each_safe(p, n, &dev->entities) {
1595 struct uvc_entity *entity; 1614 struct uvc_entity *entity;
1596 entity = list_entry(p, struct uvc_entity, list); 1615 entity = list_entry(p, struct uvc_entity, list);
1616#ifdef CONFIG_MEDIA_CONTROLLER
1617 uvc_mc_cleanup_entity(entity);
1618#endif
1619 if (entity->vdev) {
1620 video_device_release(entity->vdev);
1621 entity->vdev = NULL;
1622 }
1597 kfree(entity); 1623 kfree(entity);
1598 } 1624 }
1599 1625
@@ -1616,8 +1642,6 @@ static void uvc_release(struct video_device *vdev)
1616 struct uvc_streaming *stream = video_get_drvdata(vdev); 1642 struct uvc_streaming *stream = video_get_drvdata(vdev);
1617 struct uvc_device *dev = stream->dev; 1643 struct uvc_device *dev = stream->dev;
1618 1644
1619 video_device_release(vdev);
1620
1621 /* Decrement the registered streams count and delete the device when it 1645 /* Decrement the registered streams count and delete the device when it
1622 * reaches zero. 1646 * reaches zero.
1623 */ 1647 */
@@ -1682,7 +1706,7 @@ static int uvc_register_video(struct uvc_device *dev,
1682 * unregistered before the reference is released, so we don't need to 1706 * unregistered before the reference is released, so we don't need to
1683 * get another one. 1707 * get another one.
1684 */ 1708 */
1685 vdev->parent = &dev->intf->dev; 1709 vdev->v4l2_dev = &dev->vdev;
1686 vdev->fops = &uvc_fops; 1710 vdev->fops = &uvc_fops;
1687 vdev->release = uvc_release; 1711 vdev->release = uvc_release;
1688 strlcpy(vdev->name, dev->name, sizeof vdev->name); 1712 strlcpy(vdev->name, dev->name, sizeof vdev->name);
@@ -1731,6 +1755,8 @@ static int uvc_register_terms(struct uvc_device *dev,
1731 ret = uvc_register_video(dev, stream); 1755 ret = uvc_register_video(dev, stream);
1732 if (ret < 0) 1756 if (ret < 0)
1733 return ret; 1757 return ret;
1758
1759 term->vdev = stream->vdev;
1734 } 1760 }
1735 1761
1736 return 0; 1762 return 0;
@@ -1745,6 +1771,14 @@ static int uvc_register_chains(struct uvc_device *dev)
1745 ret = uvc_register_terms(dev, chain); 1771 ret = uvc_register_terms(dev, chain);
1746 if (ret < 0) 1772 if (ret < 0)
1747 return ret; 1773 return ret;
1774
1775#ifdef CONFIG_MEDIA_CONTROLLER
1776 ret = uvc_mc_register_entities(chain);
1777 if (ret < 0) {
1778 uvc_printk(KERN_INFO, "Failed to register entites "
1779 "(%d).\n", ret);
1780 }
1781#endif
1748 } 1782 }
1749 1783
1750 return 0; 1784 return 0;
@@ -1814,6 +1848,24 @@ static int uvc_probe(struct usb_interface *intf,
1814 "linux-uvc-devel mailing list.\n"); 1848 "linux-uvc-devel mailing list.\n");
1815 } 1849 }
1816 1850
1851 /* Register the media and V4L2 devices. */
1852#ifdef CONFIG_MEDIA_CONTROLLER
1853 dev->mdev.dev = &intf->dev;
1854 strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
1855 if (udev->serial)
1856 strlcpy(dev->mdev.serial, udev->serial,
1857 sizeof(dev->mdev.serial));
1858 strcpy(dev->mdev.bus_info, udev->devpath);
1859 dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1860 dev->mdev.driver_version = DRIVER_VERSION_NUMBER;
1861 if (media_device_register(&dev->mdev) < 0)
1862 goto error;
1863
1864 dev->vdev.mdev = &dev->mdev;
1865#endif
1866 if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
1867 goto error;
1868
1817 /* Initialize controls. */ 1869 /* Initialize controls. */
1818 if (uvc_ctrl_init_device(dev) < 0) 1870 if (uvc_ctrl_init_device(dev) < 0)
1819 goto error; 1871 goto error;
@@ -1822,7 +1874,7 @@ static int uvc_probe(struct usb_interface *intf,
1822 if (uvc_scan_device(dev) < 0) 1874 if (uvc_scan_device(dev) < 0)
1823 goto error; 1875 goto error;
1824 1876
1825 /* Register video devices. */ 1877 /* Register video device nodes. */
1826 if (uvc_register_chains(dev) < 0) 1878 if (uvc_register_chains(dev) < 0)
1827 goto error; 1879 goto error;
1828 1880
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
new file mode 100644
index 000000000000..ede7852bb1df
--- /dev/null
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -0,0 +1,118 @@
1/*
2 * uvc_entity.c -- USB Video Class driver
3 *
4 * Copyright (C) 2005-2011
5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/videodev2.h>
17
18#include <media/v4l2-common.h>
19
20#include "uvcvideo.h"
21
22/* ------------------------------------------------------------------------
23 * Video subdevices registration and unregistration
24 */
25
26static int uvc_mc_register_entity(struct uvc_video_chain *chain,
27 struct uvc_entity *entity)
28{
29 const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
30 struct uvc_entity *remote;
31 unsigned int i;
32 u8 remote_pad;
33 int ret;
34
35 for (i = 0; i < entity->num_pads; ++i) {
36 struct media_entity *source;
37 struct media_entity *sink;
38
39 if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
40 continue;
41
42 remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
43 if (remote == NULL)
44 return -EINVAL;
45
46 source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
47 ? &remote->vdev->entity : &remote->subdev.entity;
48 sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
49 ? &entity->vdev->entity : &entity->subdev.entity;
50
51 remote_pad = remote->num_pads - 1;
52 ret = media_entity_create_link(source, remote_pad,
53 sink, i, flags);
54 if (ret < 0)
55 return ret;
56 }
57
58 if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
59 ret = v4l2_device_register_subdev(&chain->dev->vdev,
60 &entity->subdev);
61
62 return ret;
63}
64
65static struct v4l2_subdev_ops uvc_subdev_ops = {
66};
67
68void uvc_mc_cleanup_entity(struct uvc_entity *entity)
69{
70 if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
71 media_entity_cleanup(&entity->subdev.entity);
72 else if (entity->vdev != NULL)
73 media_entity_cleanup(&entity->vdev->entity);
74}
75
76static int uvc_mc_init_entity(struct uvc_entity *entity)
77{
78 int ret;
79
80 if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
81 v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
82 strlcpy(entity->subdev.name, entity->name,
83 sizeof(entity->subdev.name));
84
85 ret = media_entity_init(&entity->subdev.entity,
86 entity->num_pads, entity->pads, 0);
87 } else
88 ret = media_entity_init(&entity->vdev->entity,
89 entity->num_pads, entity->pads, 0);
90
91 return ret;
92}
93
94int uvc_mc_register_entities(struct uvc_video_chain *chain)
95{
96 struct uvc_entity *entity;
97 int ret;
98
99 list_for_each_entry(entity, &chain->entities, chain) {
100 ret = uvc_mc_init_entity(entity);
101 if (ret < 0) {
102 uvc_printk(KERN_INFO, "Failed to initialize entity for "
103 "entity %u\n", entity->id);
104 return ret;
105 }
106 }
107
108 list_for_each_entry(entity, &chain->entities, chain) {
109 ret = uvc_mc_register_entity(chain, entity);
110 if (ret < 0) {
111 uvc_printk(KERN_INFO, "Failed to register entity for "
112 "entity %u\n", entity->id);
113 return ret;
114 }
115 }
116
117 return 0;
118}
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 7cf224bae2e5..20107fd3574d 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -98,8 +98,11 @@ struct uvc_xu_control {
98#ifdef __KERNEL__ 98#ifdef __KERNEL__
99 99
100#include <linux/poll.h> 100#include <linux/poll.h>
101#include <linux/usb.h>
101#include <linux/usb/video.h> 102#include <linux/usb/video.h>
102#include <linux/uvcvideo.h> 103#include <linux/uvcvideo.h>
104#include <media/media-device.h>
105#include <media/v4l2-device.h>
103 106
104/* -------------------------------------------------------------------------- 107/* --------------------------------------------------------------------------
105 * UVC constants 108 * UVC constants
@@ -301,6 +304,13 @@ struct uvc_entity {
301 __u16 type; 304 __u16 type;
302 char name[64]; 305 char name[64];
303 306
307 /* Media controller-related fields. */
308 struct video_device *vdev;
309 struct v4l2_subdev subdev;
310 unsigned int num_pads;
311 unsigned int num_links;
312 struct media_pad *pads;
313
304 union { 314 union {
305 struct { 315 struct {
306 __u16 wObjectiveFocalLengthMin; 316 __u16 wObjectiveFocalLengthMin;
@@ -504,6 +514,10 @@ struct uvc_device {
504 atomic_t nmappings; 514 atomic_t nmappings;
505 515
506 /* Video control interface */ 516 /* Video control interface */
517#ifdef CONFIG_MEDIA_CONTROLLER
518 struct media_device mdev;
519#endif
520 struct v4l2_device vdev;
507 __u16 uvc_version; 521 __u16 uvc_version;
508 __u32 clock_frequency; 522 __u32 clock_frequency;
509 523
@@ -583,6 +597,8 @@ extern unsigned int uvc_timeout_param;
583/* Core driver */ 597/* Core driver */
584extern struct uvc_driver uvc_driver; 598extern struct uvc_driver uvc_driver;
585 599
600extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
601
586/* Video buffers queue management. */ 602/* Video buffers queue management. */
587extern void uvc_queue_init(struct uvc_video_queue *queue, 603extern void uvc_queue_init(struct uvc_video_queue *queue,
588 enum v4l2_buf_type type, int drop_corrupted); 604 enum v4l2_buf_type type, int drop_corrupted);
@@ -616,6 +632,10 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
616/* V4L2 interface */ 632/* V4L2 interface */
617extern const struct v4l2_file_operations uvc_fops; 633extern const struct v4l2_file_operations uvc_fops;
618 634
635/* Media controller */
636extern int uvc_mc_register_entities(struct uvc_video_chain *chain);
637extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
638
619/* Video */ 639/* Video */
620extern int uvc_video_init(struct uvc_streaming *stream); 640extern int uvc_video_init(struct uvc_streaming *stream);
621extern int uvc_video_suspend(struct uvc_streaming *stream); 641extern int uvc_video_suspend(struct uvc_streaming *stream);
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 011cb6ce861b..17dfe9bb6d27 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -21,13 +21,13 @@
21 21
22#define INT_STATUS_NUM 3 22#define INT_STATUS_NUM 3
23 23
24static struct resource bk_resources[] __initdata = { 24static struct resource bk_resources[] __devinitdata = {
25 {PM8606_BACKLIGHT1, PM8606_BACKLIGHT1, "backlight-0", IORESOURCE_IO,}, 25 {PM8606_BACKLIGHT1, PM8606_BACKLIGHT1, "backlight-0", IORESOURCE_IO,},
26 {PM8606_BACKLIGHT2, PM8606_BACKLIGHT2, "backlight-1", IORESOURCE_IO,}, 26 {PM8606_BACKLIGHT2, PM8606_BACKLIGHT2, "backlight-1", IORESOURCE_IO,},
27 {PM8606_BACKLIGHT3, PM8606_BACKLIGHT3, "backlight-2", IORESOURCE_IO,}, 27 {PM8606_BACKLIGHT3, PM8606_BACKLIGHT3, "backlight-2", IORESOURCE_IO,},
28}; 28};
29 29
30static struct resource led_resources[] __initdata = { 30static struct resource led_resources[] __devinitdata = {
31 {PM8606_LED1_RED, PM8606_LED1_RED, "led0-red", IORESOURCE_IO,}, 31 {PM8606_LED1_RED, PM8606_LED1_RED, "led0-red", IORESOURCE_IO,},
32 {PM8606_LED1_GREEN, PM8606_LED1_GREEN, "led0-green", IORESOURCE_IO,}, 32 {PM8606_LED1_GREEN, PM8606_LED1_GREEN, "led0-green", IORESOURCE_IO,},
33 {PM8606_LED1_BLUE, PM8606_LED1_BLUE, "led0-blue", IORESOURCE_IO,}, 33 {PM8606_LED1_BLUE, PM8606_LED1_BLUE, "led0-blue", IORESOURCE_IO,},
@@ -36,7 +36,7 @@ static struct resource led_resources[] __initdata = {
36 {PM8606_LED2_BLUE, PM8606_LED2_BLUE, "led1-blue", IORESOURCE_IO,}, 36 {PM8606_LED2_BLUE, PM8606_LED2_BLUE, "led1-blue", IORESOURCE_IO,},
37}; 37};
38 38
39static struct resource regulator_resources[] __initdata = { 39static struct resource regulator_resources[] __devinitdata = {
40 {PM8607_ID_BUCK1, PM8607_ID_BUCK1, "buck-1", IORESOURCE_IO,}, 40 {PM8607_ID_BUCK1, PM8607_ID_BUCK1, "buck-1", IORESOURCE_IO,},
41 {PM8607_ID_BUCK2, PM8607_ID_BUCK2, "buck-2", IORESOURCE_IO,}, 41 {PM8607_ID_BUCK2, PM8607_ID_BUCK2, "buck-2", IORESOURCE_IO,},
42 {PM8607_ID_BUCK3, PM8607_ID_BUCK3, "buck-3", IORESOURCE_IO,}, 42 {PM8607_ID_BUCK3, PM8607_ID_BUCK3, "buck-3", IORESOURCE_IO,},
@@ -57,15 +57,15 @@ static struct resource regulator_resources[] __initdata = {
57 {PM8607_ID_LDO15, PM8607_ID_LDO15, "ldo-15", IORESOURCE_IO,}, 57 {PM8607_ID_LDO15, PM8607_ID_LDO15, "ldo-15", IORESOURCE_IO,},
58}; 58};
59 59
60static struct resource touch_resources[] __initdata = { 60static struct resource touch_resources[] __devinitdata = {
61 {PM8607_IRQ_PEN, PM8607_IRQ_PEN, "touch", IORESOURCE_IRQ,}, 61 {PM8607_IRQ_PEN, PM8607_IRQ_PEN, "touch", IORESOURCE_IRQ,},
62}; 62};
63 63
64static struct resource onkey_resources[] __initdata = { 64static struct resource onkey_resources[] __devinitdata = {
65 {PM8607_IRQ_ONKEY, PM8607_IRQ_ONKEY, "onkey", IORESOURCE_IRQ,}, 65 {PM8607_IRQ_ONKEY, PM8607_IRQ_ONKEY, "onkey", IORESOURCE_IRQ,},
66}; 66};
67 67
68static struct resource codec_resources[] __initdata = { 68static struct resource codec_resources[] __devinitdata = {
69 /* Headset microphone insertion or removal */ 69 /* Headset microphone insertion or removal */
70 {PM8607_IRQ_MICIN, PM8607_IRQ_MICIN, "micin", IORESOURCE_IRQ,}, 70 {PM8607_IRQ_MICIN, PM8607_IRQ_MICIN, "micin", IORESOURCE_IRQ,},
71 /* Hook-switch press or release */ 71 /* Hook-switch press or release */
@@ -76,12 +76,12 @@ static struct resource codec_resources[] __initdata = {
76 {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,}, 76 {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,},
77}; 77};
78 78
79static struct resource battery_resources[] __initdata = { 79static struct resource battery_resources[] __devinitdata = {
80 {PM8607_IRQ_CC, PM8607_IRQ_CC, "columb counter", IORESOURCE_IRQ,}, 80 {PM8607_IRQ_CC, PM8607_IRQ_CC, "columb counter", IORESOURCE_IRQ,},
81 {PM8607_IRQ_BAT, PM8607_IRQ_BAT, "battery", IORESOURCE_IRQ,}, 81 {PM8607_IRQ_BAT, PM8607_IRQ_BAT, "battery", IORESOURCE_IRQ,},
82}; 82};
83 83
84static struct resource charger_resources[] __initdata = { 84static struct resource charger_resources[] __devinitdata = {
85 {PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,}, 85 {PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,},
86 {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,}, 86 {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,},
87 {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging timeout", IORESOURCE_IRQ,}, 87 {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging timeout", IORESOURCE_IRQ,},
@@ -90,13 +90,17 @@ static struct resource charger_resources[] __initdata = {
90 {PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,}, 90 {PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,},
91}; 91};
92 92
93static struct mfd_cell bk_devs[] __initdata = { 93static struct resource rtc_resources[] __devinitdata = {
94 {PM8607_IRQ_RTC, PM8607_IRQ_RTC, "rtc", IORESOURCE_IRQ,},
95};
96
97static struct mfd_cell bk_devs[] = {
94 {"88pm860x-backlight", 0,}, 98 {"88pm860x-backlight", 0,},
95 {"88pm860x-backlight", 1,}, 99 {"88pm860x-backlight", 1,},
96 {"88pm860x-backlight", 2,}, 100 {"88pm860x-backlight", 2,},
97}; 101};
98 102
99static struct mfd_cell led_devs[] __initdata = { 103static struct mfd_cell led_devs[] = {
100 {"88pm860x-led", 0,}, 104 {"88pm860x-led", 0,},
101 {"88pm860x-led", 1,}, 105 {"88pm860x-led", 1,},
102 {"88pm860x-led", 2,}, 106 {"88pm860x-led", 2,},
@@ -105,7 +109,7 @@ static struct mfd_cell led_devs[] __initdata = {
105 {"88pm860x-led", 5,}, 109 {"88pm860x-led", 5,},
106}; 110};
107 111
108static struct mfd_cell regulator_devs[] __initdata = { 112static struct mfd_cell regulator_devs[] = {
109 {"88pm860x-regulator", 0,}, 113 {"88pm860x-regulator", 0,},
110 {"88pm860x-regulator", 1,}, 114 {"88pm860x-regulator", 1,},
111 {"88pm860x-regulator", 2,}, 115 {"88pm860x-regulator", 2,},
@@ -126,15 +130,15 @@ static struct mfd_cell regulator_devs[] __initdata = {
126 {"88pm860x-regulator", 17,}, 130 {"88pm860x-regulator", 17,},
127}; 131};
128 132
129static struct mfd_cell touch_devs[] __initdata = { 133static struct mfd_cell touch_devs[] = {
130 {"88pm860x-touch", -1,}, 134 {"88pm860x-touch", -1,},
131}; 135};
132 136
133static struct mfd_cell onkey_devs[] __initdata = { 137static struct mfd_cell onkey_devs[] = {
134 {"88pm860x-onkey", -1,}, 138 {"88pm860x-onkey", -1,},
135}; 139};
136 140
137static struct mfd_cell codec_devs[] __initdata = { 141static struct mfd_cell codec_devs[] = {
138 {"88pm860x-codec", -1,}, 142 {"88pm860x-codec", -1,},
139}; 143};
140 144
@@ -143,11 +147,10 @@ static struct mfd_cell power_devs[] = {
143 {"88pm860x-charger", -1,}, 147 {"88pm860x-charger", -1,},
144}; 148};
145 149
146static struct pm860x_backlight_pdata bk_pdata[ARRAY_SIZE(bk_devs)]; 150static struct mfd_cell rtc_devs[] = {
147static struct pm860x_led_pdata led_pdata[ARRAY_SIZE(led_devs)]; 151 {"88pm860x-rtc", -1,},
148static struct regulator_init_data regulator_pdata[ARRAY_SIZE(regulator_devs)]; 152};
149static struct pm860x_touch_pdata touch_pdata; 153
150static struct pm860x_power_pdata power_pdata;
151 154
152struct pm860x_irq_data { 155struct pm860x_irq_data {
153 int reg; 156 int reg;
@@ -501,7 +504,6 @@ static void device_irq_exit(struct pm860x_chip *chip)
501} 504}
502 505
503static void __devinit device_bk_init(struct pm860x_chip *chip, 506static void __devinit device_bk_init(struct pm860x_chip *chip,
504 struct i2c_client *i2c,
505 struct pm860x_platform_data *pdata) 507 struct pm860x_platform_data *pdata)
506{ 508{
507 int ret; 509 int ret;
@@ -514,13 +516,12 @@ static void __devinit device_bk_init(struct pm860x_chip *chip,
514 pdata->num_backlights = ARRAY_SIZE(bk_devs); 516 pdata->num_backlights = ARRAY_SIZE(bk_devs);
515 517
516 for (i = 0; i < pdata->num_backlights; i++) { 518 for (i = 0; i < pdata->num_backlights; i++) {
517 memcpy(&bk_pdata[i], &pdata->backlight[i], 519 bk_devs[i].platform_data = &pdata->backlight[i];
518 sizeof(struct pm860x_backlight_pdata)); 520 bk_devs[i].pdata_size = sizeof(struct pm860x_backlight_pdata);
519 bk_devs[i].mfd_data = &bk_pdata[i];
520 521
521 for (j = 0; j < ARRAY_SIZE(bk_devs); j++) { 522 for (j = 0; j < ARRAY_SIZE(bk_devs); j++) {
522 id = bk_resources[j].start; 523 id = bk_resources[j].start;
523 if (bk_pdata[i].flags != id) 524 if (pdata->backlight[i].flags != id)
524 continue; 525 continue;
525 526
526 bk_devs[i].num_resources = 1; 527 bk_devs[i].num_resources = 1;
@@ -538,7 +539,6 @@ static void __devinit device_bk_init(struct pm860x_chip *chip,
538} 539}
539 540
540static void __devinit device_led_init(struct pm860x_chip *chip, 541static void __devinit device_led_init(struct pm860x_chip *chip,
541 struct i2c_client *i2c,
542 struct pm860x_platform_data *pdata) 542 struct pm860x_platform_data *pdata)
543{ 543{
544 int ret; 544 int ret;
@@ -551,13 +551,12 @@ static void __devinit device_led_init(struct pm860x_chip *chip,
551 pdata->num_leds = ARRAY_SIZE(led_devs); 551 pdata->num_leds = ARRAY_SIZE(led_devs);
552 552
553 for (i = 0; i < pdata->num_leds; i++) { 553 for (i = 0; i < pdata->num_leds; i++) {
554 memcpy(&led_pdata[i], &pdata->led[i], 554 led_devs[i].platform_data = &pdata->led[i];
555 sizeof(struct pm860x_led_pdata)); 555 led_devs[i].pdata_size = sizeof(struct pm860x_led_pdata);
556 led_devs[i].mfd_data = &led_pdata[i];
557 556
558 for (j = 0; j < ARRAY_SIZE(led_devs); j++) { 557 for (j = 0; j < ARRAY_SIZE(led_devs); j++) {
559 id = led_resources[j].start; 558 id = led_resources[j].start;
560 if (led_pdata[i].flags != id) 559 if (pdata->led[i].flags != id)
561 continue; 560 continue;
562 561
563 led_devs[i].num_resources = 1; 562 led_devs[i].num_resources = 1;
@@ -575,12 +574,11 @@ static void __devinit device_led_init(struct pm860x_chip *chip,
575} 574}
576 575
577static void __devinit device_regulator_init(struct pm860x_chip *chip, 576static void __devinit device_regulator_init(struct pm860x_chip *chip,
578 struct i2c_client *i2c,
579 struct pm860x_platform_data *pdata) 577 struct pm860x_platform_data *pdata)
580{ 578{
581 struct regulator_init_data *initdata; 579 struct regulator_init_data *initdata;
582 int ret; 580 int ret;
583 int i, j; 581 int i, seq;
584 582
585 if ((pdata == NULL) || (pdata->regulator == NULL)) 583 if ((pdata == NULL) || (pdata->regulator == NULL))
586 return; 584 return;
@@ -588,41 +586,21 @@ static void __devinit device_regulator_init(struct pm860x_chip *chip,
588 if (pdata->num_regulators > ARRAY_SIZE(regulator_devs)) 586 if (pdata->num_regulators > ARRAY_SIZE(regulator_devs))
589 pdata->num_regulators = ARRAY_SIZE(regulator_devs); 587 pdata->num_regulators = ARRAY_SIZE(regulator_devs);
590 588
591 for (i = 0, j = -1; i < pdata->num_regulators; i++) { 589 for (i = 0, seq = -1; i < pdata->num_regulators; i++) {
592 initdata = &pdata->regulator[i]; 590 initdata = &pdata->regulator[i];
593 if (strstr(initdata->constraints.name, "BUCK")) { 591 seq = *(unsigned int *)initdata->driver_data;
594 sscanf(initdata->constraints.name, "BUCK%d", &j); 592 if ((seq < 0) || (seq > PM8607_ID_RG_MAX)) {
595 /* BUCK1 ~ BUCK3 */ 593 dev_err(chip->dev, "Wrong ID(%d) on regulator(%s)\n",
596 if ((j < 1) || (j > 3)) { 594 seq, initdata->constraints.name);
597 dev_err(chip->dev, "Failed to add constraint "
598 "(%s)\n", initdata->constraints.name);
599 goto out;
600 }
601 j = (j - 1) + PM8607_ID_BUCK1;
602 }
603 if (strstr(initdata->constraints.name, "LDO")) {
604 sscanf(initdata->constraints.name, "LDO%d", &j);
605 /* LDO1 ~ LDO15 */
606 if ((j < 1) || (j > 15)) {
607 dev_err(chip->dev, "Failed to add constraint "
608 "(%s)\n", initdata->constraints.name);
609 goto out;
610 }
611 j = (j - 1) + PM8607_ID_LDO1;
612 }
613 if (j == -1) {
614 dev_err(chip->dev, "Failed to add constraint (%s)\n",
615 initdata->constraints.name);
616 goto out; 595 goto out;
617 } 596 }
618 memcpy(&regulator_pdata[i], &pdata->regulator[i], 597 regulator_devs[i].platform_data = &pdata->regulator[i];
619 sizeof(struct regulator_init_data)); 598 regulator_devs[i].pdata_size = sizeof(struct regulator_init_data);
620 regulator_devs[i].mfd_data = &regulator_pdata[i];
621 regulator_devs[i].num_resources = 1; 599 regulator_devs[i].num_resources = 1;
622 regulator_devs[i].resources = &regulator_resources[j]; 600 regulator_devs[i].resources = &regulator_resources[seq];
623 601
624 ret = mfd_add_devices(chip->dev, 0, &regulator_devs[i], 1, 602 ret = mfd_add_devices(chip->dev, 0, &regulator_devs[i], 1,
625 &regulator_resources[j], 0); 603 &regulator_resources[seq], 0);
626 if (ret < 0) { 604 if (ret < 0) {
627 dev_err(chip->dev, "Failed to add regulator subdev\n"); 605 dev_err(chip->dev, "Failed to add regulator subdev\n");
628 goto out; 606 goto out;
@@ -632,17 +610,35 @@ out:
632 return; 610 return;
633} 611}
634 612
613static void __devinit device_rtc_init(struct pm860x_chip *chip,
614 struct pm860x_platform_data *pdata)
615{
616 int ret;
617
618 if ((pdata == NULL))
619 return;
620
621 rtc_devs[0].platform_data = pdata->rtc;
622 rtc_devs[0].pdata_size = sizeof(struct pm860x_rtc_pdata);
623 rtc_devs[0].num_resources = ARRAY_SIZE(rtc_resources);
624 rtc_devs[0].resources = &rtc_resources[0];
625 ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
626 ARRAY_SIZE(rtc_devs), &rtc_resources[0],
627 chip->irq_base);
628 if (ret < 0)
629 dev_err(chip->dev, "Failed to add rtc subdev\n");
630}
631
635static void __devinit device_touch_init(struct pm860x_chip *chip, 632static void __devinit device_touch_init(struct pm860x_chip *chip,
636 struct i2c_client *i2c,
637 struct pm860x_platform_data *pdata) 633 struct pm860x_platform_data *pdata)
638{ 634{
639 int ret; 635 int ret;
640 636
641 if ((pdata == NULL) || (pdata->touch == NULL)) 637 if (pdata == NULL)
642 return; 638 return;
643 639
644 memcpy(&touch_pdata, pdata->touch, sizeof(struct pm860x_touch_pdata)); 640 touch_devs[0].platform_data = pdata->touch;
645 touch_devs[0].mfd_data = &touch_pdata; 641 touch_devs[0].pdata_size = sizeof(struct pm860x_touch_pdata);
646 touch_devs[0].num_resources = ARRAY_SIZE(touch_resources); 642 touch_devs[0].num_resources = ARRAY_SIZE(touch_resources);
647 touch_devs[0].resources = &touch_resources[0]; 643 touch_devs[0].resources = &touch_resources[0];
648 ret = mfd_add_devices(chip->dev, 0, &touch_devs[0], 644 ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
@@ -653,16 +649,15 @@ static void __devinit device_touch_init(struct pm860x_chip *chip,
653} 649}
654 650
655static void __devinit device_power_init(struct pm860x_chip *chip, 651static void __devinit device_power_init(struct pm860x_chip *chip,
656 struct i2c_client *i2c,
657 struct pm860x_platform_data *pdata) 652 struct pm860x_platform_data *pdata)
658{ 653{
659 int ret; 654 int ret;
660 655
661 if ((pdata == NULL) || (pdata->power == NULL)) 656 if (pdata == NULL)
662 return; 657 return;
663 658
664 memcpy(&power_pdata, pdata->power, sizeof(struct pm860x_power_pdata)); 659 power_devs[0].platform_data = pdata->power;
665 power_devs[0].mfd_data = &power_pdata; 660 power_devs[0].pdata_size = sizeof(struct pm860x_power_pdata);
666 power_devs[0].num_resources = ARRAY_SIZE(battery_resources); 661 power_devs[0].num_resources = ARRAY_SIZE(battery_resources);
667 power_devs[0].resources = &battery_resources[0], 662 power_devs[0].resources = &battery_resources[0],
668 ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1, 663 ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1,
@@ -670,7 +665,8 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
670 if (ret < 0) 665 if (ret < 0)
671 dev_err(chip->dev, "Failed to add battery subdev\n"); 666 dev_err(chip->dev, "Failed to add battery subdev\n");
672 667
673 power_devs[1].mfd_data = &power_pdata; 668 power_devs[1].platform_data = pdata->power;
669 power_devs[1].pdata_size = sizeof(struct pm860x_power_pdata);
674 power_devs[1].num_resources = ARRAY_SIZE(charger_resources); 670 power_devs[1].num_resources = ARRAY_SIZE(charger_resources);
675 power_devs[1].resources = &charger_resources[0], 671 power_devs[1].resources = &charger_resources[0],
676 ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1, 672 ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1,
@@ -680,7 +676,6 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
680} 676}
681 677
682static void __devinit device_onkey_init(struct pm860x_chip *chip, 678static void __devinit device_onkey_init(struct pm860x_chip *chip,
683 struct i2c_client *i2c,
684 struct pm860x_platform_data *pdata) 679 struct pm860x_platform_data *pdata)
685{ 680{
686 int ret; 681 int ret;
@@ -695,7 +690,6 @@ static void __devinit device_onkey_init(struct pm860x_chip *chip,
695} 690}
696 691
697static void __devinit device_codec_init(struct pm860x_chip *chip, 692static void __devinit device_codec_init(struct pm860x_chip *chip,
698 struct i2c_client *i2c,
699 struct pm860x_platform_data *pdata) 693 struct pm860x_platform_data *pdata)
700{ 694{
701 int ret; 695 int ret;
@@ -763,11 +757,12 @@ static void __devinit device_8607_init(struct pm860x_chip *chip,
763 if (ret < 0) 757 if (ret < 0)
764 goto out; 758 goto out;
765 759
766 device_regulator_init(chip, i2c, pdata); 760 device_regulator_init(chip, pdata);
767 device_onkey_init(chip, i2c, pdata); 761 device_rtc_init(chip, pdata);
768 device_touch_init(chip, i2c, pdata); 762 device_onkey_init(chip, pdata);
769 device_power_init(chip, i2c, pdata); 763 device_touch_init(chip, pdata);
770 device_codec_init(chip, i2c, pdata); 764 device_power_init(chip, pdata);
765 device_codec_init(chip, pdata);
771out: 766out:
772 return; 767 return;
773} 768}
@@ -779,8 +774,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip,
779 774
780 switch (chip->id) { 775 switch (chip->id) {
781 case CHIP_PM8606: 776 case CHIP_PM8606:
782 device_bk_init(chip, chip->client, pdata); 777 device_bk_init(chip, pdata);
783 device_led_init(chip, chip->client, pdata); 778 device_led_init(chip, pdata);
784 break; 779 break;
785 case CHIP_PM8607: 780 case CHIP_PM8607:
786 device_8607_init(chip, chip->client, pdata); 781 device_8607_init(chip, chip->client, pdata);
@@ -790,8 +785,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip,
790 if (chip->companion) { 785 if (chip->companion) {
791 switch (chip->id) { 786 switch (chip->id) {
792 case CHIP_PM8607: 787 case CHIP_PM8607:
793 device_bk_init(chip, chip->companion, pdata); 788 device_bk_init(chip, pdata);
794 device_led_init(chip, chip->companion, pdata); 789 device_led_init(chip, pdata);
795 break; 790 break;
796 case CHIP_PM8606: 791 case CHIP_PM8606:
797 device_8607_init(chip, chip->companion, pdata); 792 device_8607_init(chip, chip->companion, pdata);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3ed3ff06be5d..b6c267724e14 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -157,6 +157,20 @@ config TPS6507X
157 This driver can also be built as a module. If so, the module 157 This driver can also be built as a module. If so, the module
158 will be called tps6507x. 158 will be called tps6507x.
159 159
160config MFD_TPS6586X
161 bool "TPS6586x Power Management chips"
162 depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
163 select MFD_CORE
164 help
165 If you say yes here you get support for the TPS6586X series of
166 Power Management chips.
167 This driver provides common support for accessing the device,
168 additional drivers must be enabled in order to use the
169 functionality of the device.
170
171 This driver can also be built as a module. If so, the module
172 will be called tps6586x.
173
160config MENELAUS 174config MENELAUS
161 bool "Texas Instruments TWL92330/Menelaus PM chip" 175 bool "Texas Instruments TWL92330/Menelaus PM chip"
162 depends on I2C=y && ARCH_OMAP2 176 depends on I2C=y && ARCH_OMAP2
@@ -455,6 +469,20 @@ config MFD_PCF50633
455 facilities, and registers devices for the various functions 469 facilities, and registers devices for the various functions
456 so that function-specific drivers can bind to them. 470 so that function-specific drivers can bind to them.
457 471
472config PCF50633_ADC
473 tristate "Support for NXP PCF50633 ADC"
474 depends on MFD_PCF50633
475 help
476 Say yes here if you want to include support for ADC in the
477 NXP PCF50633 chip.
478
479config PCF50633_GPIO
480 tristate "Support for NXP PCF50633 GPIO"
481 depends on MFD_PCF50633
482 help
483 Say yes here if you want to include support GPIO for pins on
484 the PCF50633 chip.
485
458config MFD_MC13783 486config MFD_MC13783
459 tristate 487 tristate
460 488
@@ -470,20 +498,6 @@ config MFD_MC13XXX
470 additional drivers must be enabled in order to use the 498 additional drivers must be enabled in order to use the
471 functionality of the device. 499 functionality of the device.
472 500
473config PCF50633_ADC
474 tristate "Support for NXP PCF50633 ADC"
475 depends on MFD_PCF50633
476 help
477 Say yes here if you want to include support for ADC in the
478 NXP PCF50633 chip.
479
480config PCF50633_GPIO
481 tristate "Support for NXP PCF50633 GPIO"
482 depends on MFD_PCF50633
483 help
484 Say yes here if you want to include support GPIO for pins on
485 the PCF50633 chip.
486
487config ABX500_CORE 501config ABX500_CORE
488 bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions" 502 bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
489 default y if ARCH_U300 || ARCH_U8500 503 default y if ARCH_U300 || ARCH_U8500
@@ -538,7 +552,7 @@ config AB8500_CORE
538 552
539config AB8500_I2C_CORE 553config AB8500_I2C_CORE
540 bool "AB8500 register access via PRCMU I2C" 554 bool "AB8500 register access via PRCMU I2C"
541 depends on AB8500_CORE && UX500_SOC_DB8500 555 depends on AB8500_CORE && MFD_DB8500_PRCMU
542 default y 556 default y
543 help 557 help
544 This enables register access to the AB8500 chip via PRCMU I2C. 558 This enables register access to the AB8500 chip via PRCMU I2C.
@@ -575,6 +589,26 @@ config AB3550_CORE
575 LEDs, vibrator, system power and temperature, power management 589 LEDs, vibrator, system power and temperature, power management
576 and ALSA sound. 590 and ALSA sound.
577 591
592config MFD_DB8500_PRCMU
593 bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
594 depends on UX500_SOC_DB8500
595 select MFD_CORE
596 help
597 Select this option to enable support for the DB8500 Power Reset
598 and Control Management Unit. This is basically an autonomous
599 system controller running an XP70 microprocessor, which is accessed
600 through a register map.
601
602config MFD_DB5500_PRCMU
603 bool "ST-Ericsson DB5500 Power Reset Control Management Unit"
604 depends on UX500_SOC_DB5500
605 select MFD_CORE
606 help
607 Select this option to enable support for the DB5500 Power Reset
608 and Control Management Unit. This is basically an autonomous
609 system controller running an XP70 microprocessor, which is accessed
610 through a register map.
611
578config MFD_CS5535 612config MFD_CS5535
579 tristate "Support for CS5535 and CS5536 southbridge core functions" 613 tristate "Support for CS5535 and CS5536 southbridge core functions"
580 select MFD_CORE 614 select MFD_CORE
@@ -629,20 +663,6 @@ config MFD_JZ4740_ADC
629 Say yes here if you want support for the ADC unit in the JZ4740 SoC. 663 Say yes here if you want support for the ADC unit in the JZ4740 SoC.
630 This driver is necessary for jz4740-battery and jz4740-hwmon driver. 664 This driver is necessary for jz4740-battery and jz4740-hwmon driver.
631 665
632config MFD_TPS6586X
633 bool "TPS6586x Power Management chips"
634 depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
635 select MFD_CORE
636 help
637 If you say yes here you get support for the TPS6586X series of
638 Power Management chips.
639 This driver provides common support for accessing the device,
640 additional drivers must be enabled in order to use the
641 functionality of the device.
642
643 This driver can also be built as a module. If so, the module
644 will be called tps6586x.
645
646config MFD_VX855 666config MFD_VX855
647 tristate "Support for VIA VX855/VX875 integrated south bridge" 667 tristate "Support for VIA VX855/VX875 integrated south bridge"
648 depends on PCI 668 depends on PCI
@@ -671,6 +691,43 @@ config MFD_OMAP_USB_HOST
671 This MFD driver does the required setup functionalities for 691 This MFD driver does the required setup functionalities for
672 OMAP USB Host drivers. 692 OMAP USB Host drivers.
673 693
694config MFD_PM8XXX
695 tristate
696
697config MFD_PM8921_CORE
698 tristate "Qualcomm PM8921 PMIC chip"
699 depends on MSM_SSBI
700 select MFD_CORE
701 select MFD_PM8XXX
702 help
703 If you say yes to this option, support will be included for the
704 built-in PM8921 PMIC chip.
705
706 This is required if your board has a PM8921 and uses its features,
707 such as: MPPs, GPIOs, regulators, interrupts, and PWM.
708
709 Say M here if you want to include support for PM8921 chip as a module.
710 This will build a module called "pm8921-core".
711
712config MFD_PM8XXX_IRQ
713 bool "Support for Qualcomm PM8xxx IRQ features"
714 depends on MFD_PM8XXX
715 default y if MFD_PM8XXX
716 help
717 This is the IRQ driver for Qualcomm PM 8xxx PMIC chips.
718
719 This is required to use certain other PM 8xxx features, such as GPIO
720 and MPP.
721
722config MFD_TPS65910
723 bool "TPS65910 Power Management chip"
724 depends on I2C=y
725 select MFD_CORE
726 select GPIO_TPS65910
727 help
728 if you say yes here you get support for the TPS65910 series of
729 Power Management chips.
730
674endif # MFD_SUPPORT 731endif # MFD_SUPPORT
675 732
676menu "Multimedia Capabilities Port drivers" 733menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 419caa9d7dcf..efe3cc33ed92 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -74,9 +74,12 @@ obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
74obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o 74obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
75obj-$(CONFIG_AB3550_CORE) += ab3550-core.o 75obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
76obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o 76obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
77obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
78obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o 77obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
79obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o 78obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
79obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
80# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
81obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
82obj-$(CONFIG_MFD_DB5500_PRCMU) += db5500-prcmu.o
80obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o 83obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
81obj-$(CONFIG_PMIC_ADP5520) += adp5520.o 84obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
82obj-$(CONFIG_LPC_SCH) += lpc_sch.o 85obj-$(CONFIG_LPC_SCH) += lpc_sch.o
@@ -88,3 +91,6 @@ obj-$(CONFIG_MFD_VX855) += vx855.o
88obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o 91obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o
89obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o 92obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
90obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o 93obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
94obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
95obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
96obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index a751927047ac..a20e1c41bed2 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -949,8 +949,10 @@ static int __devinit ab3100_probe(struct i2c_client *client,
949 goto exit_no_ops; 949 goto exit_no_ops;
950 950
951 /* Set up and register the platform devices. */ 951 /* Set up and register the platform devices. */
952 for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) 952 for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) {
953 ab3100_devs[i].mfd_data = ab3100_plf_data; 953 ab3100_devs[i].platform_data = ab3100_plf_data;
954 ab3100_devs[i].pdata_size = sizeof(struct ab3100_platform_data);
955 }
954 956
955 err = mfd_add_devices(&client->dev, 0, ab3100_devs, 957 err = mfd_add_devices(&client->dev, 0, ab3100_devs,
956 ARRAY_SIZE(ab3100_devs), NULL, 0); 958 ARRAY_SIZE(ab3100_devs), NULL, 0);
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index ff86acf3e6bd..3d7dce671b93 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1320,8 +1320,10 @@ static int __init ab3550_probe(struct i2c_client *client,
1320 goto exit_no_ops; 1320 goto exit_no_ops;
1321 1321
1322 /* Set up and register the platform devices. */ 1322 /* Set up and register the platform devices. */
1323 for (i = 0; i < AB3550_NUM_DEVICES; i++) 1323 for (i = 0; i < AB3550_NUM_DEVICES; i++) {
1324 ab3550_devs[i].mfd_data = ab3550_plf_data->dev_data[i]; 1324 ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
1325 ab3550_devs[i].pdata_size = ab3550_plf_data->dev_data_sz[i];
1326 }
1325 1327
1326 err = mfd_add_devices(&client->dev, 0, ab3550_devs, 1328 err = mfd_add_devices(&client->dev, 0, ab3550_devs,
1327 ARRAY_SIZE(ab3550_devs), NULL, 1329 ARRAY_SIZE(ab3550_devs), NULL,
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 67d01c938284..fc0c1af1566e 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -254,8 +254,9 @@ static void ab8500_irq_sync_unlock(struct irq_data *data)
254 if (new == old) 254 if (new == old)
255 continue; 255 continue;
256 256
257 /* Interrupt register 12 does'nt exist prior to version 0x20 */ 257 /* Interrupt register 12 doesn't exist prior to version 2.0 */
258 if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20) 258 if (ab8500_irq_regoffset[i] == 11 &&
259 ab8500->chip_id < AB8500_CUT2P0)
259 continue; 260 continue;
260 261
261 ab8500->oldmask[i] = new; 262 ab8500->oldmask[i] = new;
@@ -307,8 +308,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
307 int status; 308 int status;
308 u8 value; 309 u8 value;
309 310
310 /* Interrupt register 12 does'nt exist prior to version 0x20 */ 311 /* Interrupt register 12 doesn't exist prior to version 2.0 */
311 if (regoffset == 11 && ab8500->chip_id < 0x20) 312 if (regoffset == 11 && ab8500->chip_id < AB8500_CUT2P0)
312 continue; 313 continue;
313 314
314 status = get_register_interruptible(ab8500, AB8500_INTERRUPT, 315 status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
@@ -724,17 +725,15 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
724 if (ret < 0) 725 if (ret < 0)
725 return ret; 726 return ret;
726 727
727 /* 728 switch (value) {
728 * 0x0 - Early Drop 729 case AB8500_CUTEARLY:
729 * 0x10 - Cut 1.0 730 case AB8500_CUT1P0:
730 * 0x11 - Cut 1.1 731 case AB8500_CUT1P1:
731 * 0x20 - Cut 2.0 732 case AB8500_CUT2P0:
732 * 0x30 - Cut 3.0 733 case AB8500_CUT3P0:
733 */
734 if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20 ||
735 value == 0x30) {
736 dev_info(ab8500->dev, "detected chip, revision: %#x\n", value); 734 dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
737 } else { 735 break;
736 default:
738 dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value); 737 dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value);
739 return -EINVAL; 738 return -EINVAL;
740 } 739 }
@@ -763,8 +762,9 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
763 762
764 /* Clear and mask all interrupts */ 763 /* Clear and mask all interrupts */
765 for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { 764 for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
766 /* Interrupt register 12 does'nt exist prior to version 0x20 */ 765 /* Interrupt register 12 doesn't exist prior to version 2.0 */
767 if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20) 766 if (ab8500_irq_regoffset[i] == 11 &&
767 ab8500->chip_id < AB8500_CUT2P0)
768 continue; 768 continue;
769 769
770 get_register_interruptible(ab8500, AB8500_INTERRUPT, 770 get_register_interruptible(ab8500, AB8500_INTERRUPT,
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 6421ad1160de..f16afb234ff9 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -57,6 +57,7 @@
57#define SW_AVG_16 0x60 57#define SW_AVG_16 0x60
58#define ADC_SW_CONV 0x04 58#define ADC_SW_CONV 0x04
59#define EN_ICHAR 0x80 59#define EN_ICHAR 0x80
60#define BTEMP_PULL_UP 0x08
60#define EN_BUF 0x40 61#define EN_BUF 0x40
61#define DIS_ZERO 0x00 62#define DIS_ZERO 0x00
62#define GPADC_BUSY 0x01 63#define GPADC_BUSY 0x01
@@ -101,6 +102,7 @@ struct adc_cal_data {
101 102
102/** 103/**
103 * struct ab8500_gpadc - AB8500 GPADC device information 104 * struct ab8500_gpadc - AB8500 GPADC device information
105 * @chip_id ABB chip id
104 * @dev: pointer to the struct device 106 * @dev: pointer to the struct device
105 * @node: a list of AB8500 GPADCs, hence prepared for 107 * @node: a list of AB8500 GPADCs, hence prepared for
106 reentrance 108 reentrance
@@ -112,6 +114,7 @@ struct adc_cal_data {
112 * @cal_data array of ADC calibration data structs 114 * @cal_data array of ADC calibration data structs
113 */ 115 */
114struct ab8500_gpadc { 116struct ab8500_gpadc {
117 u8 chip_id;
115 struct device *dev; 118 struct device *dev;
116 struct list_head node; 119 struct list_head node;
117 struct completion ab8500_gpadc_complete; 120 struct completion ab8500_gpadc_complete;
@@ -274,6 +277,7 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
274 dev_err(gpadc->dev, "gpadc_conversion: enable gpadc failed\n"); 277 dev_err(gpadc->dev, "gpadc_conversion: enable gpadc failed\n");
275 goto out; 278 goto out;
276 } 279 }
280
277 /* Select the input source and set average samples to 16 */ 281 /* Select the input source and set average samples to 16 */
278 ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC, 282 ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
279 AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16)); 283 AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16));
@@ -282,9 +286,11 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
282 "gpadc_conversion: set avg samples failed\n"); 286 "gpadc_conversion: set avg samples failed\n");
283 goto out; 287 goto out;
284 } 288 }
289
285 /* 290 /*
286 * Enable ADC, buffering, select rising edge and enable ADC path 291 * Enable ADC, buffering, select rising edge and enable ADC path
287 * charging current sense if it needed 292 * charging current sense if it needed, ABB 3.0 needs some special
293 * treatment too.
288 */ 294 */
289 switch (input) { 295 switch (input) {
290 case MAIN_CHARGER_C: 296 case MAIN_CHARGER_C:
@@ -294,6 +300,23 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
294 EN_BUF | EN_ICHAR, 300 EN_BUF | EN_ICHAR,
295 EN_BUF | EN_ICHAR); 301 EN_BUF | EN_ICHAR);
296 break; 302 break;
303 case BTEMP_BALL:
304 if (gpadc->chip_id >= AB8500_CUT3P0) {
305 /* Turn on btemp pull-up on ABB 3.0 */
306 ret = abx500_mask_and_set_register_interruptible(
307 gpadc->dev,
308 AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
309 EN_BUF | BTEMP_PULL_UP,
310 EN_BUF | BTEMP_PULL_UP);
311
312 /*
313 * Delay might be needed for ABB8500 cut 3.0, if not, remove
314 * when hardware will be availible
315 */
316 msleep(1);
317 break;
318 }
319 /* Intentional fallthrough */
297 default: 320 default:
298 ret = abx500_mask_and_set_register_interruptible(gpadc->dev, 321 ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
299 AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF); 322 AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF);
@@ -304,6 +327,7 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
304 "gpadc_conversion: select falling edge failed\n"); 327 "gpadc_conversion: select falling edge failed\n");
305 goto out; 328 goto out;
306 } 329 }
330
307 ret = abx500_mask_and_set_register_interruptible(gpadc->dev, 331 ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
308 AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV); 332 AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV);
309 if (ret < 0) { 333 if (ret < 0) {
@@ -552,6 +576,14 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
552 goto fail; 576 goto fail;
553 } 577 }
554 578
579 /* Get Chip ID of the ABB ASIC */
580 ret = abx500_get_chip_id(gpadc->dev);
581 if (ret < 0) {
582 dev_err(gpadc->dev, "failed to get chip ID\n");
583 goto fail_irq;
584 }
585 gpadc->chip_id = (u8) ret;
586
555 /* VTVout LDO used to power up ab8500-GPADC */ 587 /* VTVout LDO used to power up ab8500-GPADC */
556 gpadc->regu = regulator_get(&pdev->dev, "vddadc"); 588 gpadc->regu = regulator_get(&pdev->dev, "vddadc");
557 if (IS_ERR(gpadc->regu)) { 589 if (IS_ERR(gpadc->regu)) {
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index 821e6b86afd2..9be541c6b004 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -11,8 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/mfd/ab8500.h> 13#include <linux/mfd/ab8500.h>
14 14#include <linux/mfd/db8500-prcmu.h>
15#include <mach/prcmu.h>
16 15
17static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data) 16static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
18{ 17{
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 0b4d5b23bec9..c27fd1fc3b86 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -88,19 +88,19 @@ struct asic3 {
88 88
89static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset); 89static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset);
90 90
91static inline void asic3_write_register(struct asic3 *asic, 91void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 value)
92 unsigned int reg, u32 value)
93{ 92{
94 iowrite16(value, asic->mapping + 93 iowrite16(value, asic->mapping +
95 (reg >> asic->bus_shift)); 94 (reg >> asic->bus_shift));
96} 95}
96EXPORT_SYMBOL_GPL(asic3_write_register);
97 97
98static inline u32 asic3_read_register(struct asic3 *asic, 98u32 asic3_read_register(struct asic3 *asic, unsigned int reg)
99 unsigned int reg)
100{ 99{
101 return ioread16(asic->mapping + 100 return ioread16(asic->mapping +
102 (reg >> asic->bus_shift)); 101 (reg >> asic->bus_shift));
103} 102}
103EXPORT_SYMBOL_GPL(asic3_read_register);
104 104
105static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set) 105static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
106{ 106{
@@ -676,7 +676,8 @@ static struct mfd_cell asic3_cell_ds1wm = {
676 .name = "ds1wm", 676 .name = "ds1wm",
677 .enable = ds1wm_enable, 677 .enable = ds1wm_enable,
678 .disable = ds1wm_disable, 678 .disable = ds1wm_disable,
679 .mfd_data = &ds1wm_pdata, 679 .platform_data = &ds1wm_pdata,
680 .pdata_size = sizeof(ds1wm_pdata),
680 .num_resources = ARRAY_SIZE(ds1wm_resources), 681 .num_resources = ARRAY_SIZE(ds1wm_resources),
681 .resources = ds1wm_resources, 682 .resources = ds1wm_resources,
682}; 683};
@@ -777,12 +778,61 @@ static struct mfd_cell asic3_cell_mmc = {
777 .name = "tmio-mmc", 778 .name = "tmio-mmc",
778 .enable = asic3_mmc_enable, 779 .enable = asic3_mmc_enable,
779 .disable = asic3_mmc_disable, 780 .disable = asic3_mmc_disable,
780 .mfd_data = &asic3_mmc_data, 781 .platform_data = &asic3_mmc_data,
782 .pdata_size = sizeof(asic3_mmc_data),
781 .num_resources = ARRAY_SIZE(asic3_mmc_resources), 783 .num_resources = ARRAY_SIZE(asic3_mmc_resources),
782 .resources = asic3_mmc_resources, 784 .resources = asic3_mmc_resources,
783}; 785};
784 786
787static const int clock_ledn[ASIC3_NUM_LEDS] = {
788 [0] = ASIC3_CLOCK_LED0,
789 [1] = ASIC3_CLOCK_LED1,
790 [2] = ASIC3_CLOCK_LED2,
791};
792
793static int asic3_leds_enable(struct platform_device *pdev)
794{
795 const struct mfd_cell *cell = mfd_get_cell(pdev);
796 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
797
798 asic3_clk_enable(asic, &asic->clocks[clock_ledn[cell->id]]);
799
800 return 0;
801}
802
803static int asic3_leds_disable(struct platform_device *pdev)
804{
805 const struct mfd_cell *cell = mfd_get_cell(pdev);
806 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
807
808 asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]);
809
810 return 0;
811}
812
813static struct mfd_cell asic3_cell_leds[ASIC3_NUM_LEDS] = {
814 [0] = {
815 .name = "leds-asic3",
816 .id = 0,
817 .enable = asic3_leds_enable,
818 .disable = asic3_leds_disable,
819 },
820 [1] = {
821 .name = "leds-asic3",
822 .id = 1,
823 .enable = asic3_leds_enable,
824 .disable = asic3_leds_disable,
825 },
826 [2] = {
827 .name = "leds-asic3",
828 .id = 2,
829 .enable = asic3_leds_enable,
830 .disable = asic3_leds_disable,
831 },
832};
833
785static int __init asic3_mfd_probe(struct platform_device *pdev, 834static int __init asic3_mfd_probe(struct platform_device *pdev,
835 struct asic3_platform_data *pdata,
786 struct resource *mem) 836 struct resource *mem)
787{ 837{
788 struct asic3 *asic = platform_get_drvdata(pdev); 838 struct asic3 *asic = platform_get_drvdata(pdev);
@@ -806,7 +856,8 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
806 856
807 /* MMC */ 857 /* MMC */
808 asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) + 858 asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
809 mem_sdio->start, 0x400 >> asic->bus_shift); 859 mem_sdio->start,
860 ASIC3_SD_CONFIG_SIZE >> asic->bus_shift);
810 if (!asic->tmio_cnf) { 861 if (!asic->tmio_cnf) {
811 ret = -ENOMEM; 862 ret = -ENOMEM;
812 dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n"); 863 dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
@@ -820,9 +871,23 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
820 if (ret < 0) 871 if (ret < 0)
821 goto out; 872 goto out;
822 873
823 if (mem_sdio && (irq >= 0)) 874 if (mem_sdio && (irq >= 0)) {
824 ret = mfd_add_devices(&pdev->dev, pdev->id, 875 ret = mfd_add_devices(&pdev->dev, pdev->id,
825 &asic3_cell_mmc, 1, mem_sdio, irq); 876 &asic3_cell_mmc, 1, mem_sdio, irq);
877 if (ret < 0)
878 goto out;
879 }
880
881 if (pdata->leds) {
882 int i;
883
884 for (i = 0; i < ASIC3_NUM_LEDS; ++i) {
885 asic3_cell_leds[i].platform_data = &pdata->leds[i];
886 asic3_cell_leds[i].pdata_size = sizeof(pdata->leds[i]);
887 }
888 ret = mfd_add_devices(&pdev->dev, 0,
889 asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0);
890 }
826 891
827 out: 892 out:
828 return ret; 893 return ret;
@@ -903,7 +968,7 @@ static int __init asic3_probe(struct platform_device *pdev)
903 */ 968 */
904 memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init)); 969 memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init));
905 970
906 asic3_mfd_probe(pdev, mem); 971 asic3_mfd_probe(pdev, pdata, mem);
907 972
908 dev_info(asic->dev, "ASIC3 Core driver\n"); 973 dev_info(asic->dev, "ASIC3 Core driver\n");
909 974
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 414783b04849..4e2af2cb2d26 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -119,12 +119,14 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
119 /* Voice codec interface client */ 119 /* Voice codec interface client */
120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; 120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
121 cell->name = "davinci-vcif"; 121 cell->name = "davinci-vcif";
122 cell->mfd_data = davinci_vc; 122 cell->platform_data = davinci_vc;
123 cell->pdata_size = sizeof(*davinci_vc);
123 124
124 /* Voice codec CQ93VC client */ 125 /* Voice codec CQ93VC client */
125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; 126 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
126 cell->name = "cq93vc-codec"; 127 cell->name = "cq93vc-codec";
127 cell->mfd_data = davinci_vc; 128 cell->platform_data = davinci_vc;
129 cell->pdata_size = sizeof(*davinci_vc);
128 130
129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, 131 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
130 DAVINCI_VC_CELLS, NULL, 0); 132 DAVINCI_VC_CELLS, NULL, 0);
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-regs.h b/drivers/mfd/db5500-prcmu-regs.h
index 455467e88791..9a8e9e4ddd33 100644
--- a/arch/arm/mach-ux500/include/mach/prcmu-regs.h
+++ b/drivers/mfd/db5500-prcmu-regs.h
@@ -15,11 +15,20 @@
15 15
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17 17
18#define _PRCMU_BASE IO_ADDRESS(U8500_PRCMU_BASE)
19
20#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118) 18#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118)
19#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE 0x3f
20#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xf
21
22#define PRCM_PLLARM_LOCKP (_PRCMU_BASE + 0x0a8)
23#define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 0x2
24
21#define PRCM_ARM_CHGCLKREQ (_PRCMU_BASE + 0x114) 25#define PRCM_ARM_CHGCLKREQ (_PRCMU_BASE + 0x114)
26#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ 0x1
27
22#define PRCM_PLLARM_ENABLE (_PRCMU_BASE + 0x98) 28#define PRCM_PLLARM_ENABLE (_PRCMU_BASE + 0x98)
29#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE 0x1
30#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON 0x100
31
23#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0) 32#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0)
24#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4) 33#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4)
25#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0) 34#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0)
@@ -28,7 +37,8 @@
28 37
29/* ARM WFI Standby signal register */ 38/* ARM WFI Standby signal register */
30#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130) 39#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130)
31#define PRCMU_IOCR (_PRCMU_BASE + 0x310) 40#define PRCM_IOCR (_PRCMU_BASE + 0x310)
41#define PRCM_IOCR_IOFORCE 0x1
32 42
33/* CPU mailbox registers */ 43/* CPU mailbox registers */
34#define PRCM_MBOX_CPU_VAL (_PRCMU_BASE + 0x0fc) 44#define PRCM_MBOX_CPU_VAL (_PRCMU_BASE + 0x0fc)
@@ -37,6 +47,8 @@
37 47
38/* Dual A9 core interrupt management unit registers */ 48/* Dual A9 core interrupt management unit registers */
39#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328) 49#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328)
50#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ 0x1
51
40#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c) 52#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c)
41#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c) 53#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c)
42#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120) 54#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120)
@@ -74,14 +86,17 @@
74/* PRCMU clock/PLL/reset registers */ 86/* PRCMU clock/PLL/reset registers */
75#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500) 87#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500)
76#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504) 88#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504)
89#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
77#define PRCM_LCDCLK_MGT (_PRCMU_BASE + 0x044) 90#define PRCM_LCDCLK_MGT (_PRCMU_BASE + 0x044)
78#define PRCM_MCDECLK_MGT (_PRCMU_BASE + 0x064) 91#define PRCM_MCDECLK_MGT (_PRCMU_BASE + 0x064)
79#define PRCM_HDMICLK_MGT (_PRCMU_BASE + 0x058) 92#define PRCM_HDMICLK_MGT (_PRCMU_BASE + 0x058)
80#define PRCM_TVCLK_MGT (_PRCMU_BASE + 0x07c) 93#define PRCM_TVCLK_MGT (_PRCMU_BASE + 0x07c)
81#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530) 94#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530)
82#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C) 95#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C)
96#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
83#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4) 97#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4)
84#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8) 98#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8)
99#define PRCM_CLKOCR (_PRCMU_BASE + 0x1CC)
85 100
86/* ePOD and memory power signal control registers */ 101/* ePOD and memory power signal control registers */
87#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410) 102#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410)
@@ -92,5 +107,9 @@
92 107
93/* Miscellaneous unit registers */ 108/* Miscellaneous unit registers */
94#define PRCM_DSI_SW_RESET (_PRCMU_BASE + 0x324) 109#define PRCM_DSI_SW_RESET (_PRCMU_BASE + 0x324)
110#define PRCM_GPIOCR (_PRCMU_BASE + 0x138)
111#define PRCM_GPIOCR_DBG_STM_MOD_CMD1 0x800
112#define PRCM_GPIOCR_DBG_UARTMOD_CMD0 0x1
113
95 114
96#endif /* __MACH_PRCMU_REGS_H */ 115#endif /* __MACH_PRCMU__REGS_H */
diff --git a/drivers/mfd/db5500-prcmu.c b/drivers/mfd/db5500-prcmu.c
new file mode 100644
index 000000000000..9dbb3cab4a6f
--- /dev/null
+++ b/drivers/mfd/db5500-prcmu.c
@@ -0,0 +1,448 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
6 *
7 * U5500 PRCM Unit interface driver
8 */
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/spinlock.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/mutex.h>
18#include <linux/completion.h>
19#include <linux/irq.h>
20#include <linux/jiffies.h>
21#include <linux/bitops.h>
22#include <linux/interrupt.h>
23#include <linux/mfd/db5500-prcmu.h>
24#include <mach/hardware.h>
25#include <mach/irqs.h>
26#include <mach/db5500-regs.h>
27#include "db5500-prcmu-regs.h"
28
29#define _PRCM_MB_HEADER (tcdm_base + 0xFE8)
30#define PRCM_REQ_MB0_HEADER (_PRCM_MB_HEADER + 0x0)
31#define PRCM_REQ_MB1_HEADER (_PRCM_MB_HEADER + 0x1)
32#define PRCM_REQ_MB2_HEADER (_PRCM_MB_HEADER + 0x2)
33#define PRCM_REQ_MB3_HEADER (_PRCM_MB_HEADER + 0x3)
34#define PRCM_REQ_MB4_HEADER (_PRCM_MB_HEADER + 0x4)
35#define PRCM_REQ_MB5_HEADER (_PRCM_MB_HEADER + 0x5)
36#define PRCM_REQ_MB6_HEADER (_PRCM_MB_HEADER + 0x6)
37#define PRCM_REQ_MB7_HEADER (_PRCM_MB_HEADER + 0x7)
38#define PRCM_ACK_MB0_HEADER (_PRCM_MB_HEADER + 0x8)
39#define PRCM_ACK_MB1_HEADER (_PRCM_MB_HEADER + 0x9)
40#define PRCM_ACK_MB2_HEADER (_PRCM_MB_HEADER + 0xa)
41#define PRCM_ACK_MB3_HEADER (_PRCM_MB_HEADER + 0xb)
42#define PRCM_ACK_MB4_HEADER (_PRCM_MB_HEADER + 0xc)
43#define PRCM_ACK_MB5_HEADER (_PRCM_MB_HEADER + 0xd)
44#define PRCM_ACK_MB6_HEADER (_PRCM_MB_HEADER + 0xe)
45#define PRCM_ACK_MB7_HEADER (_PRCM_MB_HEADER + 0xf)
46
47/* Req Mailboxes */
48#define PRCM_REQ_MB0 (tcdm_base + 0xFD8)
49#define PRCM_REQ_MB1 (tcdm_base + 0xFCC)
50#define PRCM_REQ_MB2 (tcdm_base + 0xFC4)
51#define PRCM_REQ_MB3 (tcdm_base + 0xFC0)
52#define PRCM_REQ_MB4 (tcdm_base + 0xF98)
53#define PRCM_REQ_MB5 (tcdm_base + 0xF90)
54#define PRCM_REQ_MB6 (tcdm_base + 0xF8C)
55#define PRCM_REQ_MB7 (tcdm_base + 0xF84)
56
57/* Ack Mailboxes */
58#define PRCM_ACK_MB0 (tcdm_base + 0xF38)
59#define PRCM_ACK_MB1 (tcdm_base + 0xF30)
60#define PRCM_ACK_MB2 (tcdm_base + 0xF24)
61#define PRCM_ACK_MB3 (tcdm_base + 0xF20)
62#define PRCM_ACK_MB4 (tcdm_base + 0xF1C)
63#define PRCM_ACK_MB5 (tcdm_base + 0xF14)
64#define PRCM_ACK_MB6 (tcdm_base + 0xF0C)
65#define PRCM_ACK_MB7 (tcdm_base + 0xF08)
66
67enum mb_return_code {
68 RC_SUCCESS,
69 RC_FAIL,
70};
71
72/* Mailbox 0 headers. */
73enum mb0_header {
74 /* request */
75 RMB0H_PWR_STATE_TRANS = 1,
76 RMB0H_WAKE_UP_CFG,
77 RMB0H_RD_WAKE_UP_ACK,
78 /* acknowledge */
79 AMB0H_WAKE_UP = 1,
80};
81
82/* Mailbox 5 headers. */
83enum mb5_header {
84 MB5H_I2C_WRITE = 1,
85 MB5H_I2C_READ,
86};
87
88/* Request mailbox 5 fields. */
89#define PRCM_REQ_MB5_I2C_SLAVE (PRCM_REQ_MB5 + 0)
90#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 1)
91#define PRCM_REQ_MB5_I2C_SIZE (PRCM_REQ_MB5 + 2)
92#define PRCM_REQ_MB5_I2C_DATA (PRCM_REQ_MB5 + 4)
93
94/* Acknowledge mailbox 5 fields. */
95#define PRCM_ACK_MB5_RETURN_CODE (PRCM_ACK_MB5 + 0)
96#define PRCM_ACK_MB5_I2C_DATA (PRCM_ACK_MB5 + 4)
97
98#define NUM_MB 8
99#define MBOX_BIT BIT
100#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
101
102/*
103* Used by MCDE to setup all necessary PRCMU registers
104*/
105#define PRCMU_RESET_DSIPLL 0x00004000
106#define PRCMU_UNCLAMP_DSIPLL 0x00400800
107
108/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0x8, = 50 Mhz*/
109#define PRCMU_DSI_CLOCK_SETTING 0x00000128
110/* TVCLK_MGT PLLSW=001 (PLLSOC0) PLLDIV=0x13, = 19.05 MHZ */
111#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000135
112#define PRCMU_PLLDSI_FREQ_SETTING 0x0004013C
113#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000002
114#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x03000101
115#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00000101
116
117#define PRCMU_ENABLE_PLLDSI 0x00000001
118#define PRCMU_DISABLE_PLLDSI 0x00000000
119
120#define PRCMU_DSI_RESET_SW 0x00000003
121
122#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
123
124/*
125 * mb0_transfer - state needed for mailbox 0 communication.
126 * @lock: The transaction lock.
127 */
128static struct {
129 spinlock_t lock;
130} mb0_transfer;
131
132/*
133 * mb5_transfer - state needed for mailbox 5 communication.
134 * @lock: The transaction lock.
135 * @work: The transaction completion structure.
136 * @ack: Reply ("acknowledge") data.
137 */
138static struct {
139 struct mutex lock;
140 struct completion work;
141 struct {
142 u8 header;
143 u8 status;
144 u8 value[4];
145 } ack;
146} mb5_transfer;
147
148/* PRCMU TCDM base IO address. */
149static __iomem void *tcdm_base;
150
151/**
152 * db5500_prcmu_abb_read() - Read register value(s) from the ABB.
153 * @slave: The I2C slave address.
154 * @reg: The (start) register address.
155 * @value: The read out value(s).
156 * @size: The number of registers to read.
157 *
158 * Reads register value(s) from the ABB.
159 * @size has to be <= 4.
160 */
161int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
162{
163 int r;
164
165 if ((size < 1) || (4 < size))
166 return -EINVAL;
167
168 mutex_lock(&mb5_transfer.lock);
169
170 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
171 cpu_relax();
172 writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
173 writeb(reg, PRCM_REQ_MB5_I2C_REG);
174 writeb(size, PRCM_REQ_MB5_I2C_SIZE);
175 writeb(MB5H_I2C_READ, PRCM_REQ_MB5_HEADER);
176
177 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
178 wait_for_completion(&mb5_transfer.work);
179
180 r = 0;
181 if ((mb5_transfer.ack.header == MB5H_I2C_READ) &&
182 (mb5_transfer.ack.status == RC_SUCCESS))
183 memcpy(value, mb5_transfer.ack.value, (size_t)size);
184 else
185 r = -EIO;
186
187 mutex_unlock(&mb5_transfer.lock);
188
189 return r;
190}
191
192/**
193 * db5500_prcmu_abb_write() - Write register value(s) to the ABB.
194 * @slave: The I2C slave address.
195 * @reg: The (start) register address.
196 * @value: The value(s) to write.
197 * @size: The number of registers to write.
198 *
199 * Writes register value(s) to the ABB.
200 * @size has to be <= 4.
201 */
202int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
203{
204 int r;
205
206 if ((size < 1) || (4 < size))
207 return -EINVAL;
208
209 mutex_lock(&mb5_transfer.lock);
210
211 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
212 cpu_relax();
213 writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
214 writeb(reg, PRCM_REQ_MB5_I2C_REG);
215 writeb(size, PRCM_REQ_MB5_I2C_SIZE);
216 memcpy_toio(PRCM_REQ_MB5_I2C_DATA, value, size);
217 writeb(MB5H_I2C_WRITE, PRCM_REQ_MB5_HEADER);
218
219 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
220 wait_for_completion(&mb5_transfer.work);
221
222 if ((mb5_transfer.ack.header == MB5H_I2C_WRITE) &&
223 (mb5_transfer.ack.status == RC_SUCCESS))
224 r = 0;
225 else
226 r = -EIO;
227
228 mutex_unlock(&mb5_transfer.lock);
229
230 return r;
231}
232
233int db5500_prcmu_enable_dsipll(void)
234{
235 int i;
236
237 /* Enable DSIPLL_RESETN resets */
238 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
239 /* Unclamp DSIPLL in/out */
240 writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
241 /* Set DSI PLL FREQ */
242 writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
243 writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
244 PRCM_DSI_PLLOUT_SEL);
245 /* Enable Escape clocks */
246 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
247
248 /* Start DSI PLL */
249 writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
250 /* Reset DSI PLL */
251 writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
252 for (i = 0; i < 10; i++) {
253 if ((readl(PRCM_PLLDSI_LOCKP) &
254 PRCMU_PLLDSI_LOCKP_LOCKED) == PRCMU_PLLDSI_LOCKP_LOCKED)
255 break;
256 udelay(100);
257 }
258 /* Release DSIPLL_RESETN */
259 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
260 return 0;
261}
262
263int db5500_prcmu_disable_dsipll(void)
264{
265 /* Disable dsi pll */
266 writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
267 /* Disable escapeclock */
268 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
269 return 0;
270}
271
272int db5500_prcmu_set_display_clocks(void)
273{
274 /* HDMI and TVCLK Should be handled somewhere else */
275 /* PLLDIV=8, PLLSW=2, CLKEN=1 */
276 writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT);
277 /* PLLDIV=14, PLLSW=2, CLKEN=1 */
278 writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
279 return 0;
280}
281
282static void ack_dbb_wakeup(void)
283{
284 unsigned long flags;
285
286 spin_lock_irqsave(&mb0_transfer.lock, flags);
287
288 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
289 cpu_relax();
290
291 writeb(RMB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER);
292 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
293
294 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
295}
296
297static inline void print_unknown_header_warning(u8 n, u8 header)
298{
299 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
300 header, n);
301}
302
303static bool read_mailbox_0(void)
304{
305 bool r;
306 u8 header;
307
308 header = readb(PRCM_ACK_MB0_HEADER);
309 switch (header) {
310 case AMB0H_WAKE_UP:
311 r = true;
312 break;
313 default:
314 print_unknown_header_warning(0, header);
315 r = false;
316 break;
317 }
318 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLEAR);
319 return r;
320}
321
322static bool read_mailbox_1(void)
323{
324 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLEAR);
325 return false;
326}
327
328static bool read_mailbox_2(void)
329{
330 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLEAR);
331 return false;
332}
333
334static bool read_mailbox_3(void)
335{
336 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLEAR);
337 return false;
338}
339
340static bool read_mailbox_4(void)
341{
342 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLEAR);
343 return false;
344}
345
346static bool read_mailbox_5(void)
347{
348 u8 header;
349
350 header = readb(PRCM_ACK_MB5_HEADER);
351 switch (header) {
352 case MB5H_I2C_READ:
353 memcpy_fromio(mb5_transfer.ack.value, PRCM_ACK_MB5_I2C_DATA, 4);
354 case MB5H_I2C_WRITE:
355 mb5_transfer.ack.header = header;
356 mb5_transfer.ack.status = readb(PRCM_ACK_MB5_RETURN_CODE);
357 complete(&mb5_transfer.work);
358 break;
359 default:
360 print_unknown_header_warning(5, header);
361 break;
362 }
363 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLEAR);
364 return false;
365}
366
367static bool read_mailbox_6(void)
368{
369 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLEAR);
370 return false;
371}
372
373static bool read_mailbox_7(void)
374{
375 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLEAR);
376 return false;
377}
378
379static bool (* const read_mailbox[NUM_MB])(void) = {
380 read_mailbox_0,
381 read_mailbox_1,
382 read_mailbox_2,
383 read_mailbox_3,
384 read_mailbox_4,
385 read_mailbox_5,
386 read_mailbox_6,
387 read_mailbox_7
388};
389
390static irqreturn_t prcmu_irq_handler(int irq, void *data)
391{
392 u32 bits;
393 u8 n;
394 irqreturn_t r;
395
396 bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
397 if (unlikely(!bits))
398 return IRQ_NONE;
399
400 r = IRQ_HANDLED;
401 for (n = 0; bits; n++) {
402 if (bits & MBOX_BIT(n)) {
403 bits -= MBOX_BIT(n);
404 if (read_mailbox[n]())
405 r = IRQ_WAKE_THREAD;
406 }
407 }
408 return r;
409}
410
411static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
412{
413 ack_dbb_wakeup();
414 return IRQ_HANDLED;
415}
416
417void __init db5500_prcmu_early_init(void)
418{
419 tcdm_base = __io_address(U5500_PRCMU_TCDM_BASE);
420 spin_lock_init(&mb0_transfer.lock);
421 mutex_init(&mb5_transfer.lock);
422 init_completion(&mb5_transfer.work);
423}
424
425/**
426 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
427 *
428 */
429int __init db5500_prcmu_init(void)
430{
431 int r = 0;
432
433 if (ux500_is_svp() || !cpu_is_u5500())
434 return -ENODEV;
435
436 /* Clean up the mailbox interrupts after pre-kernel code. */
437 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLEAR);
438
439 r = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
440 prcmu_irq_thread_fn, 0, "prcmu", NULL);
441 if (r < 0) {
442 pr_err("prcmu: Failed to allocate IRQ_DB5500_PRCMU1.\n");
443 return -EBUSY;
444 }
445 return 0;
446}
447
448arch_initcall(db5500_prcmu_init);
diff --git a/drivers/mfd/db8500-prcmu-regs.h b/drivers/mfd/db8500-prcmu-regs.h
new file mode 100644
index 000000000000..3bbf04d58043
--- /dev/null
+++ b/drivers/mfd/db8500-prcmu-regs.h
@@ -0,0 +1,166 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
6 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
7 *
8 * License Terms: GNU General Public License v2
9 *
10 * PRCM Unit registers
11 */
12#ifndef __DB8500_PRCMU_REGS_H
13#define __DB8500_PRCMU_REGS_H
14
15#include <linux/bitops.h>
16#include <mach/hardware.h>
17
18#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
19
20#define PRCM_ARM_PLLDIVPS 0x118
21#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE BITS(0, 5)
22#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xF
23
24#define PRCM_PLLARM_LOCKP 0x0A8
25#define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 BIT(1)
26
27#define PRCM_ARM_CHGCLKREQ 0x114
28#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ BIT(0)
29
30#define PRCM_PLLARM_ENABLE 0x98
31#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE BIT(0)
32#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON BIT(8)
33
34#define PRCM_ARMCLKFIX_MGT 0x0
35#define PRCM_A9_RESETN_CLR 0x1f4
36#define PRCM_A9_RESETN_SET 0x1f0
37#define PRCM_ARM_LS_CLAMP 0x30C
38#define PRCM_SRAM_A9 0x308
39
40/* ARM WFI Standby signal register */
41#define PRCM_ARM_WFI_STANDBY 0x130
42#define PRCM_IOCR 0x310
43#define PRCM_IOCR_IOFORCE BIT(0)
44
45/* CPU mailbox registers */
46#define PRCM_MBOX_CPU_VAL 0x0FC
47#define PRCM_MBOX_CPU_SET 0x100
48
49/* Dual A9 core interrupt management unit registers */
50#define PRCM_A9_MASK_REQ 0x328
51#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ BIT(0)
52
53#define PRCM_A9_MASK_ACK 0x32C
54#define PRCM_ARMITMSK31TO0 0x11C
55#define PRCM_ARMITMSK63TO32 0x120
56#define PRCM_ARMITMSK95TO64 0x124
57#define PRCM_ARMITMSK127TO96 0x128
58#define PRCM_POWER_STATE_VAL 0x25C
59#define PRCM_ARMITVAL31TO0 0x260
60#define PRCM_ARMITVAL63TO32 0x264
61#define PRCM_ARMITVAL95TO64 0x268
62#define PRCM_ARMITVAL127TO96 0x26C
63
64#define PRCM_HOSTACCESS_REQ 0x334
65#define PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ BIT(0)
66
67#define PRCM_ARM_IT1_CLR 0x48C
68#define PRCM_ARM_IT1_VAL 0x494
69
70#define PRCM_ITSTATUS0 0x148
71#define PRCM_ITSTATUS1 0x150
72#define PRCM_ITSTATUS2 0x158
73#define PRCM_ITSTATUS3 0x160
74#define PRCM_ITSTATUS4 0x168
75#define PRCM_ITSTATUS5 0x484
76#define PRCM_ITCLEAR5 0x488
77#define PRCM_ARMIT_MASKXP70_IT 0x1018
78
79/* System reset register */
80#define PRCM_APE_SOFTRST 0x228
81
82/* Level shifter and clamp control registers */
83#define PRCM_MMIP_LS_CLAMP_SET 0x420
84#define PRCM_MMIP_LS_CLAMP_CLR 0x424
85
86/* PRCMU HW semaphore */
87#define PRCM_SEM 0x400
88#define PRCM_SEM_PRCM_SEM BIT(0)
89
90/* PRCMU clock/PLL/reset registers */
91#define PRCM_PLLDSI_FREQ 0x500
92#define PRCM_PLLDSI_ENABLE 0x504
93#define PRCM_PLLDSI_LOCKP 0x508
94#define PRCM_DSI_PLLOUT_SEL 0x530
95#define PRCM_DSITVCLK_DIV 0x52C
96#define PRCM_APE_RESETN_SET 0x1E4
97#define PRCM_APE_RESETN_CLR 0x1E8
98
99#define PRCM_TCR 0x1C8
100#define PRCM_TCR_TENSEL_MASK BITS(0, 7)
101#define PRCM_TCR_STOP_TIMERS BIT(16)
102#define PRCM_TCR_DOZE_MODE BIT(17)
103
104#define PRCM_CLKOCR 0x1CC
105#define PRCM_CLKOCR_CLKODIV0_SHIFT 0
106#define PRCM_CLKOCR_CLKODIV0_MASK BITS(0, 5)
107#define PRCM_CLKOCR_CLKOSEL0_SHIFT 6
108#define PRCM_CLKOCR_CLKOSEL0_MASK BITS(6, 8)
109#define PRCM_CLKOCR_CLKODIV1_SHIFT 16
110#define PRCM_CLKOCR_CLKODIV1_MASK BITS(16, 21)
111#define PRCM_CLKOCR_CLKOSEL1_SHIFT 22
112#define PRCM_CLKOCR_CLKOSEL1_MASK BITS(22, 24)
113#define PRCM_CLKOCR_CLK1TYPE BIT(28)
114
115#define PRCM_SGACLK_MGT 0x014
116#define PRCM_UARTCLK_MGT 0x018
117#define PRCM_MSP02CLK_MGT 0x01C
118#define PRCM_MSP1CLK_MGT 0x288
119#define PRCM_I2CCLK_MGT 0x020
120#define PRCM_SDMMCCLK_MGT 0x024
121#define PRCM_SLIMCLK_MGT 0x028
122#define PRCM_PER1CLK_MGT 0x02C
123#define PRCM_PER2CLK_MGT 0x030
124#define PRCM_PER3CLK_MGT 0x034
125#define PRCM_PER5CLK_MGT 0x038
126#define PRCM_PER6CLK_MGT 0x03C
127#define PRCM_PER7CLK_MGT 0x040
128#define PRCM_LCDCLK_MGT 0x044
129#define PRCM_BMLCLK_MGT 0x04C
130#define PRCM_HSITXCLK_MGT 0x050
131#define PRCM_HSIRXCLK_MGT 0x054
132#define PRCM_HDMICLK_MGT 0x058
133#define PRCM_APEATCLK_MGT 0x05C
134#define PRCM_APETRACECLK_MGT 0x060
135#define PRCM_MCDECLK_MGT 0x064
136#define PRCM_IPI2CCLK_MGT 0x068
137#define PRCM_DSIALTCLK_MGT 0x06C
138#define PRCM_DMACLK_MGT 0x074
139#define PRCM_B2R2CLK_MGT 0x078
140#define PRCM_TVCLK_MGT 0x07C
141#define PRCM_UNIPROCLK_MGT 0x278
142#define PRCM_SSPCLK_MGT 0x280
143#define PRCM_RNGCLK_MGT 0x284
144#define PRCM_UICCCLK_MGT 0x27C
145
146#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
147#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
148#define PRCM_CLK_MGT_CLKEN BIT(8)
149
150/* ePOD and memory power signal control registers */
151#define PRCM_EPOD_C_SET 0x410
152#define PRCM_SRAM_LS_SLEEP 0x304
153
154/* Debug power control unit registers */
155#define PRCM_POWER_STATE_SET 0x254
156
157/* Miscellaneous unit registers */
158#define PRCM_DSI_SW_RESET 0x324
159#define PRCM_GPIOCR 0x138
160
161/* GPIOCR register */
162#define PRCM_GPIOCR_SPI2_SELECT BIT(23)
163
164#define PRCM_DDR_SUBSYS_APE_MINBW 0x438
165
166#endif /* __DB8500_PRCMU_REGS_H */
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
new file mode 100644
index 000000000000..e63782107e2f
--- /dev/null
+++ b/drivers/mfd/db8500-prcmu.c
@@ -0,0 +1,2069 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
9 *
10 * U8500 PRCM Unit interface driver
11 *
12 */
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/delay.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/spinlock.h>
19#include <linux/io.h>
20#include <linux/slab.h>
21#include <linux/mutex.h>
22#include <linux/completion.h>
23#include <linux/irq.h>
24#include <linux/jiffies.h>
25#include <linux/bitops.h>
26#include <linux/fs.h>
27#include <linux/platform_device.h>
28#include <linux/uaccess.h>
29#include <linux/mfd/core.h>
30#include <linux/mfd/db8500-prcmu.h>
31#include <linux/regulator/db8500-prcmu.h>
32#include <linux/regulator/machine.h>
33#include <mach/hardware.h>
34#include <mach/irqs.h>
35#include <mach/db8500-regs.h>
36#include <mach/id.h>
37#include "db8500-prcmu-regs.h"
38
39/* Offset for the firmware version within the TCPM */
40#define PRCMU_FW_VERSION_OFFSET 0xA4
41
42/* PRCMU project numbers, defined by PRCMU FW */
43#define PRCMU_PROJECT_ID_8500V1_0 1
44#define PRCMU_PROJECT_ID_8500V2_0 2
45#define PRCMU_PROJECT_ID_8400V2_0 3
46
47/* Index of different voltages to be used when accessing AVSData */
48#define PRCM_AVS_BASE 0x2FC
49#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
50#define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1)
51#define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2)
52#define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3)
53#define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4)
54#define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5)
55#define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6)
56#define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7)
57#define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8)
58#define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9)
59#define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA)
60#define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB)
61#define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC)
62
63#define PRCM_AVS_VOLTAGE 0
64#define PRCM_AVS_VOLTAGE_MASK 0x3f
65#define PRCM_AVS_ISSLOWSTARTUP 6
66#define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP)
67#define PRCM_AVS_ISMODEENABLE 7
68#define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE)
69
70#define PRCM_BOOT_STATUS 0xFFF
71#define PRCM_ROMCODE_A2P 0xFFE
72#define PRCM_ROMCODE_P2A 0xFFD
73#define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */
74
75#define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
76
77#define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */
78#define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0)
79#define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1)
80#define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2)
81#define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3)
82#define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4)
83#define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5)
84#define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8)
85
86/* Req Mailboxes */
87#define PRCM_REQ_MB0 0xFDC /* 12 bytes */
88#define PRCM_REQ_MB1 0xFD0 /* 12 bytes */
89#define PRCM_REQ_MB2 0xFC0 /* 16 bytes */
90#define PRCM_REQ_MB3 0xE4C /* 372 bytes */
91#define PRCM_REQ_MB4 0xE48 /* 4 bytes */
92#define PRCM_REQ_MB5 0xE44 /* 4 bytes */
93
94/* Ack Mailboxes */
95#define PRCM_ACK_MB0 0xE08 /* 52 bytes */
96#define PRCM_ACK_MB1 0xE04 /* 4 bytes */
97#define PRCM_ACK_MB2 0xE00 /* 4 bytes */
98#define PRCM_ACK_MB3 0xDFC /* 4 bytes */
99#define PRCM_ACK_MB4 0xDF8 /* 4 bytes */
100#define PRCM_ACK_MB5 0xDF4 /* 4 bytes */
101
102/* Mailbox 0 headers */
103#define MB0H_POWER_STATE_TRANS 0
104#define MB0H_CONFIG_WAKEUPS_EXE 1
105#define MB0H_READ_WAKEUP_ACK 3
106#define MB0H_CONFIG_WAKEUPS_SLEEP 4
107
108#define MB0H_WAKEUP_EXE 2
109#define MB0H_WAKEUP_SLEEP 5
110
111/* Mailbox 0 REQs */
112#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
113#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1)
114#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2)
115#define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3)
116#define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4)
117#define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8)
118
119/* Mailbox 0 ACKs */
120#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
121#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
122#define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4)
123#define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8)
124#define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C)
125#define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20)
126#define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20
127
128/* Mailbox 1 headers */
129#define MB1H_ARM_APE_OPP 0x0
130#define MB1H_RESET_MODEM 0x2
131#define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
132#define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
133#define MB1H_RELEASE_USB_WAKEUP 0x5
134
135/* Mailbox 1 Requests */
136#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
137#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
138#define PRCM_REQ_MB1_APE_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x4)
139#define PRCM_REQ_MB1_ARM_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x8)
140
141/* Mailbox 1 ACKs */
142#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
143#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
144#define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2)
145#define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3)
146
147/* Mailbox 2 headers */
148#define MB2H_DPS 0x0
149#define MB2H_AUTO_PWR 0x1
150
151/* Mailbox 2 REQs */
152#define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0)
153#define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1)
154#define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2)
155#define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3)
156#define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4)
157#define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5)
158#define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6)
159#define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7)
160#define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8)
161#define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC)
162
163/* Mailbox 2 ACKs */
164#define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0)
165#define HWACC_PWR_ST_OK 0xFE
166
167/* Mailbox 3 headers */
168#define MB3H_ANC 0x0
169#define MB3H_SIDETONE 0x1
170#define MB3H_SYSCLK 0xE
171
172/* Mailbox 3 Requests */
173#define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0)
174#define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20)
175#define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60)
176#define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64)
177#define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68)
178#define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C)
179#define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C)
180
181/* Mailbox 4 headers */
182#define MB4H_DDR_INIT 0x0
183#define MB4H_MEM_ST 0x1
184#define MB4H_HOTDOG 0x12
185#define MB4H_HOTMON 0x13
186#define MB4H_HOT_PERIOD 0x14
187
188/* Mailbox 4 Requests */
189#define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0)
190#define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1)
191#define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3)
192#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0)
193#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0)
194#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1)
195#define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2)
196#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0)
197#define HOTMON_CONFIG_LOW BIT(0)
198#define HOTMON_CONFIG_HIGH BIT(1)
199
200/* Mailbox 5 Requests */
201#define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0)
202#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
203#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
204#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
205#define PRCMU_I2C_WRITE(slave) \
206 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
207#define PRCMU_I2C_READ(slave) \
208 (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
209#define PRCMU_I2C_STOP_EN BIT(3)
210
211/* Mailbox 5 ACKs */
212#define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1)
213#define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3)
214#define I2C_WR_OK 0x1
215#define I2C_RD_OK 0x2
216
217#define NUM_MB 8
218#define MBOX_BIT BIT
219#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
220
221/*
222 * Wakeups/IRQs
223 */
224
225#define WAKEUP_BIT_RTC BIT(0)
226#define WAKEUP_BIT_RTT0 BIT(1)
227#define WAKEUP_BIT_RTT1 BIT(2)
228#define WAKEUP_BIT_HSI0 BIT(3)
229#define WAKEUP_BIT_HSI1 BIT(4)
230#define WAKEUP_BIT_CA_WAKE BIT(5)
231#define WAKEUP_BIT_USB BIT(6)
232#define WAKEUP_BIT_ABB BIT(7)
233#define WAKEUP_BIT_ABB_FIFO BIT(8)
234#define WAKEUP_BIT_SYSCLK_OK BIT(9)
235#define WAKEUP_BIT_CA_SLEEP BIT(10)
236#define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
237#define WAKEUP_BIT_SIDE_TONE_OK BIT(12)
238#define WAKEUP_BIT_ANC_OK BIT(13)
239#define WAKEUP_BIT_SW_ERROR BIT(14)
240#define WAKEUP_BIT_AC_SLEEP_ACK BIT(15)
241#define WAKEUP_BIT_ARM BIT(17)
242#define WAKEUP_BIT_HOTMON_LOW BIT(18)
243#define WAKEUP_BIT_HOTMON_HIGH BIT(19)
244#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
245#define WAKEUP_BIT_GPIO0 BIT(23)
246#define WAKEUP_BIT_GPIO1 BIT(24)
247#define WAKEUP_BIT_GPIO2 BIT(25)
248#define WAKEUP_BIT_GPIO3 BIT(26)
249#define WAKEUP_BIT_GPIO4 BIT(27)
250#define WAKEUP_BIT_GPIO5 BIT(28)
251#define WAKEUP_BIT_GPIO6 BIT(29)
252#define WAKEUP_BIT_GPIO7 BIT(30)
253#define WAKEUP_BIT_GPIO8 BIT(31)
254
255/*
256 * This vector maps irq numbers to the bits in the bit field used in
257 * communication with the PRCMU firmware.
258 *
259 * The reason for having this is to keep the irq numbers contiguous even though
260 * the bits in the bit field are not. (The bits also have a tendency to move
261 * around, to further complicate matters.)
262 */
263#define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE)
264#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
265static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
266 IRQ_ENTRY(RTC),
267 IRQ_ENTRY(RTT0),
268 IRQ_ENTRY(RTT1),
269 IRQ_ENTRY(HSI0),
270 IRQ_ENTRY(HSI1),
271 IRQ_ENTRY(CA_WAKE),
272 IRQ_ENTRY(USB),
273 IRQ_ENTRY(ABB),
274 IRQ_ENTRY(ABB_FIFO),
275 IRQ_ENTRY(CA_SLEEP),
276 IRQ_ENTRY(ARM),
277 IRQ_ENTRY(HOTMON_LOW),
278 IRQ_ENTRY(HOTMON_HIGH),
279 IRQ_ENTRY(MODEM_SW_RESET_REQ),
280 IRQ_ENTRY(GPIO0),
281 IRQ_ENTRY(GPIO1),
282 IRQ_ENTRY(GPIO2),
283 IRQ_ENTRY(GPIO3),
284 IRQ_ENTRY(GPIO4),
285 IRQ_ENTRY(GPIO5),
286 IRQ_ENTRY(GPIO6),
287 IRQ_ENTRY(GPIO7),
288 IRQ_ENTRY(GPIO8)
289};
290
291#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
292#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
293static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
294 WAKEUP_ENTRY(RTC),
295 WAKEUP_ENTRY(RTT0),
296 WAKEUP_ENTRY(RTT1),
297 WAKEUP_ENTRY(HSI0),
298 WAKEUP_ENTRY(HSI1),
299 WAKEUP_ENTRY(USB),
300 WAKEUP_ENTRY(ABB),
301 WAKEUP_ENTRY(ABB_FIFO),
302 WAKEUP_ENTRY(ARM)
303};
304
305/*
306 * mb0_transfer - state needed for mailbox 0 communication.
307 * @lock: The transaction lock.
308 * @dbb_events_lock: A lock used to handle concurrent access to (parts of)
309 * the request data.
310 * @mask_work: Work structure used for (un)masking wakeup interrupts.
311 * @req: Request data that need to persist between requests.
312 */
313static struct {
314 spinlock_t lock;
315 spinlock_t dbb_irqs_lock;
316 struct work_struct mask_work;
317 struct mutex ac_wake_lock;
318 struct completion ac_wake_work;
319 struct {
320 u32 dbb_irqs;
321 u32 dbb_wakeups;
322 u32 abb_events;
323 } req;
324} mb0_transfer;
325
326/*
327 * mb1_transfer - state needed for mailbox 1 communication.
328 * @lock: The transaction lock.
329 * @work: The transaction completion structure.
330 * @ack: Reply ("acknowledge") data.
331 */
332static struct {
333 struct mutex lock;
334 struct completion work;
335 struct {
336 u8 header;
337 u8 arm_opp;
338 u8 ape_opp;
339 u8 ape_voltage_status;
340 } ack;
341} mb1_transfer;
342
343/*
344 * mb2_transfer - state needed for mailbox 2 communication.
345 * @lock: The transaction lock.
346 * @work: The transaction completion structure.
347 * @auto_pm_lock: The autonomous power management configuration lock.
348 * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled.
349 * @req: Request data that need to persist between requests.
350 * @ack: Reply ("acknowledge") data.
351 */
352static struct {
353 struct mutex lock;
354 struct completion work;
355 spinlock_t auto_pm_lock;
356 bool auto_pm_enabled;
357 struct {
358 u8 status;
359 } ack;
360} mb2_transfer;
361
362/*
363 * mb3_transfer - state needed for mailbox 3 communication.
364 * @lock: The request lock.
365 * @sysclk_lock: A lock used to handle concurrent sysclk requests.
366 * @sysclk_work: Work structure used for sysclk requests.
367 */
368static struct {
369 spinlock_t lock;
370 struct mutex sysclk_lock;
371 struct completion sysclk_work;
372} mb3_transfer;
373
374/*
375 * mb4_transfer - state needed for mailbox 4 communication.
376 * @lock: The transaction lock.
377 * @work: The transaction completion structure.
378 */
379static struct {
380 struct mutex lock;
381 struct completion work;
382} mb4_transfer;
383
384/*
385 * mb5_transfer - state needed for mailbox 5 communication.
386 * @lock: The transaction lock.
387 * @work: The transaction completion structure.
388 * @ack: Reply ("acknowledge") data.
389 */
390static struct {
391 struct mutex lock;
392 struct completion work;
393 struct {
394 u8 status;
395 u8 value;
396 } ack;
397} mb5_transfer;
398
399static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
400
401/* Spinlocks */
402static DEFINE_SPINLOCK(clkout_lock);
403static DEFINE_SPINLOCK(gpiocr_lock);
404
405/* Global var to runtime determine TCDM base for v2 or v1 */
406static __iomem void *tcdm_base;
407
408struct clk_mgt {
409 unsigned int offset;
410 u32 pllsw;
411};
412
413static DEFINE_SPINLOCK(clk_mgt_lock);
414
415#define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT), 0 }
416struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
417 CLK_MGT_ENTRY(SGACLK),
418 CLK_MGT_ENTRY(UARTCLK),
419 CLK_MGT_ENTRY(MSP02CLK),
420 CLK_MGT_ENTRY(MSP1CLK),
421 CLK_MGT_ENTRY(I2CCLK),
422 CLK_MGT_ENTRY(SDMMCCLK),
423 CLK_MGT_ENTRY(SLIMCLK),
424 CLK_MGT_ENTRY(PER1CLK),
425 CLK_MGT_ENTRY(PER2CLK),
426 CLK_MGT_ENTRY(PER3CLK),
427 CLK_MGT_ENTRY(PER5CLK),
428 CLK_MGT_ENTRY(PER6CLK),
429 CLK_MGT_ENTRY(PER7CLK),
430 CLK_MGT_ENTRY(LCDCLK),
431 CLK_MGT_ENTRY(BMLCLK),
432 CLK_MGT_ENTRY(HSITXCLK),
433 CLK_MGT_ENTRY(HSIRXCLK),
434 CLK_MGT_ENTRY(HDMICLK),
435 CLK_MGT_ENTRY(APEATCLK),
436 CLK_MGT_ENTRY(APETRACECLK),
437 CLK_MGT_ENTRY(MCDECLK),
438 CLK_MGT_ENTRY(IPI2CCLK),
439 CLK_MGT_ENTRY(DSIALTCLK),
440 CLK_MGT_ENTRY(DMACLK),
441 CLK_MGT_ENTRY(B2R2CLK),
442 CLK_MGT_ENTRY(TVCLK),
443 CLK_MGT_ENTRY(SSPCLK),
444 CLK_MGT_ENTRY(RNGCLK),
445 CLK_MGT_ENTRY(UICCCLK),
446};
447
448/*
449* Used by MCDE to setup all necessary PRCMU registers
450*/
451#define PRCMU_RESET_DSIPLL 0x00004000
452#define PRCMU_UNCLAMP_DSIPLL 0x00400800
453
454#define PRCMU_CLK_PLL_DIV_SHIFT 0
455#define PRCMU_CLK_PLL_SW_SHIFT 5
456#define PRCMU_CLK_38 (1 << 9)
457#define PRCMU_CLK_38_SRC (1 << 10)
458#define PRCMU_CLK_38_DIV (1 << 11)
459
460/* PLLDIV=12, PLLSW=4 (PLLDDR) */
461#define PRCMU_DSI_CLOCK_SETTING 0x0000008C
462
463/* PLLDIV=8, PLLSW=4 (PLLDDR) */
464#define PRCMU_DSI_CLOCK_SETTING_U8400 0x00000088
465
466/* DPI 50000000 Hz */
467#define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
468 (16 << PRCMU_CLK_PLL_DIV_SHIFT))
469#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00
470
471/* D=101, N=1, R=4, SELDIV2=0 */
472#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
473
474/* D=70, N=1, R=3, SELDIV2=0 */
475#define PRCMU_PLLDSI_FREQ_SETTING_U8400 0x00030146
476
477#define PRCMU_ENABLE_PLLDSI 0x00000001
478#define PRCMU_DISABLE_PLLDSI 0x00000000
479#define PRCMU_RELEASE_RESET_DSS 0x0000400C
480#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202
481/* ESC clk, div0=1, div1=1, div2=3 */
482#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101
483#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101
484#define PRCMU_DSI_RESET_SW 0x00000007
485
486#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
487
488static struct {
489 u8 project_number;
490 u8 api_version;
491 u8 func_version;
492 u8 errata;
493} prcmu_version;
494
495
496int prcmu_enable_dsipll(void)
497{
498 int i;
499 unsigned int plldsifreq;
500
501 /* Clear DSIPLL_RESETN */
502 writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_CLR));
503 /* Unclamp DSIPLL in/out */
504 writel(PRCMU_UNCLAMP_DSIPLL, (_PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR));
505
506 if (prcmu_is_u8400())
507 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400;
508 else
509 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING;
510 /* Set DSI PLL FREQ */
511 writel(plldsifreq, (_PRCMU_BASE + PRCM_PLLDSI_FREQ));
512 writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
513 (_PRCMU_BASE + PRCM_DSI_PLLOUT_SEL));
514 /* Enable Escape clocks */
515 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV,
516 (_PRCMU_BASE + PRCM_DSITVCLK_DIV));
517
518 /* Start DSI PLL */
519 writel(PRCMU_ENABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
520 /* Reset DSI PLL */
521 writel(PRCMU_DSI_RESET_SW, (_PRCMU_BASE + PRCM_DSI_SW_RESET));
522 for (i = 0; i < 10; i++) {
523 if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
524 PRCMU_PLLDSI_LOCKP_LOCKED)
525 == PRCMU_PLLDSI_LOCKP_LOCKED)
526 break;
527 udelay(100);
528 }
529 /* Set DSIPLL_RESETN */
530 writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_SET));
531 return 0;
532}
533
534int prcmu_disable_dsipll(void)
535{
536 /* Disable dsi pll */
537 writel(PRCMU_DISABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
538 /* Disable escapeclock */
539 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV,
540 (_PRCMU_BASE + PRCM_DSITVCLK_DIV));
541 return 0;
542}
543
544int prcmu_set_display_clocks(void)
545{
546 unsigned long flags;
547 unsigned int dsiclk;
548
549 if (prcmu_is_u8400())
550 dsiclk = PRCMU_DSI_CLOCK_SETTING_U8400;
551 else
552 dsiclk = PRCMU_DSI_CLOCK_SETTING;
553
554 spin_lock_irqsave(&clk_mgt_lock, flags);
555
556 /* Grab the HW semaphore. */
557 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
558 cpu_relax();
559
560 writel(dsiclk, (_PRCMU_BASE + PRCM_HDMICLK_MGT));
561 writel(PRCMU_DSI_LP_CLOCK_SETTING, (_PRCMU_BASE + PRCM_TVCLK_MGT));
562 writel(PRCMU_DPI_CLOCK_SETTING, (_PRCMU_BASE + PRCM_LCDCLK_MGT));
563
564 /* Release the HW semaphore. */
565 writel(0, (_PRCMU_BASE + PRCM_SEM));
566
567 spin_unlock_irqrestore(&clk_mgt_lock, flags);
568
569 return 0;
570}
571
572/**
573 * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1.
574 */
575void prcmu_enable_spi2(void)
576{
577 u32 reg;
578 unsigned long flags;
579
580 spin_lock_irqsave(&gpiocr_lock, flags);
581 reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
582 writel(reg | PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
583 spin_unlock_irqrestore(&gpiocr_lock, flags);
584}
585
586/**
587 * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1.
588 */
589void prcmu_disable_spi2(void)
590{
591 u32 reg;
592 unsigned long flags;
593
594 spin_lock_irqsave(&gpiocr_lock, flags);
595 reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
596 writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
597 spin_unlock_irqrestore(&gpiocr_lock, flags);
598}
599
600bool prcmu_has_arm_maxopp(void)
601{
602 return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
603 PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
604}
605
606bool prcmu_is_u8400(void)
607{
608 return prcmu_version.project_number == PRCMU_PROJECT_ID_8400V2_0;
609}
610
611/**
612 * prcmu_get_boot_status - PRCMU boot status checking
613 * Returns: the current PRCMU boot status
614 */
615int prcmu_get_boot_status(void)
616{
617 return readb(tcdm_base + PRCM_BOOT_STATUS);
618}
619
620/**
621 * prcmu_set_rc_a2p - This function is used to run few power state sequences
622 * @val: Value to be set, i.e. transition requested
623 * Returns: 0 on success, -EINVAL on invalid argument
624 *
625 * This function is used to run the following power state sequences -
626 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
627 */
628int prcmu_set_rc_a2p(enum romcode_write val)
629{
630 if (val < RDY_2_DS || val > RDY_2_XP70_RST)
631 return -EINVAL;
632 writeb(val, (tcdm_base + PRCM_ROMCODE_A2P));
633 return 0;
634}
635
636/**
637 * prcmu_get_rc_p2a - This function is used to get power state sequences
638 * Returns: the power transition that has last happened
639 *
640 * This function can return the following transitions-
641 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
642 */
643enum romcode_read prcmu_get_rc_p2a(void)
644{
645 return readb(tcdm_base + PRCM_ROMCODE_P2A);
646}
647
648/**
649 * prcmu_get_current_mode - Return the current XP70 power mode
650 * Returns: Returns the current AP(ARM) power mode: init,
651 * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
652 */
653enum ap_pwrst prcmu_get_xp70_current_state(void)
654{
655 return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE);
656}
657
658/**
659 * prcmu_config_clkout - Configure one of the programmable clock outputs.
660 * @clkout: The CLKOUT number (0 or 1).
661 * @source: The clock to be used (one of the PRCMU_CLKSRC_*).
662 * @div: The divider to be applied.
663 *
664 * Configures one of the programmable clock outputs (CLKOUTs).
665 * @div should be in the range [1,63] to request a configuration, or 0 to
666 * inform that the configuration is no longer requested.
667 */
668int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
669{
670 static int requests[2];
671 int r = 0;
672 unsigned long flags;
673 u32 val;
674 u32 bits;
675 u32 mask;
676 u32 div_mask;
677
678 BUG_ON(clkout > 1);
679 BUG_ON(div > 63);
680 BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009));
681
682 if (!div && !requests[clkout])
683 return -EINVAL;
684
685 switch (clkout) {
686 case 0:
687 div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
688 mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
689 bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
690 (div << PRCM_CLKOCR_CLKODIV0_SHIFT));
691 break;
692 case 1:
693 div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
694 mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
695 PRCM_CLKOCR_CLK1TYPE);
696 bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
697 (div << PRCM_CLKOCR_CLKODIV1_SHIFT));
698 break;
699 }
700 bits &= mask;
701
702 spin_lock_irqsave(&clkout_lock, flags);
703
704 val = readl(_PRCMU_BASE + PRCM_CLKOCR);
705 if (val & div_mask) {
706 if (div) {
707 if ((val & mask) != bits) {
708 r = -EBUSY;
709 goto unlock_and_return;
710 }
711 } else {
712 if ((val & mask & ~div_mask) != bits) {
713 r = -EINVAL;
714 goto unlock_and_return;
715 }
716 }
717 }
718 writel((bits | (val & ~mask)), (_PRCMU_BASE + PRCM_CLKOCR));
719 requests[clkout] += (div ? 1 : -1);
720
721unlock_and_return:
722 spin_unlock_irqrestore(&clkout_lock, flags);
723
724 return r;
725}
726
727int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
728{
729 unsigned long flags;
730
731 BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state));
732
733 spin_lock_irqsave(&mb0_transfer.lock, flags);
734
735 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
736 cpu_relax();
737
738 writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
739 writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE));
740 writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE));
741 writeb((keep_ulp_clk ? 1 : 0),
742 (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
743 writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
744 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
745
746 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
747
748 return 0;
749}
750
751/* This function should only be called while mb0_transfer.lock is held. */
752static void config_wakeups(void)
753{
754 const u8 header[2] = {
755 MB0H_CONFIG_WAKEUPS_EXE,
756 MB0H_CONFIG_WAKEUPS_SLEEP
757 };
758 static u32 last_dbb_events;
759 static u32 last_abb_events;
760 u32 dbb_events;
761 u32 abb_events;
762 unsigned int i;
763
764 dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
765 dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK);
766
767 abb_events = mb0_transfer.req.abb_events;
768
769 if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
770 return;
771
772 for (i = 0; i < 2; i++) {
773 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
774 cpu_relax();
775 writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
776 writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
777 writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
778 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
779 }
780 last_dbb_events = dbb_events;
781 last_abb_events = abb_events;
782}
783
784void prcmu_enable_wakeups(u32 wakeups)
785{
786 unsigned long flags;
787 u32 bits;
788 int i;
789
790 BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
791
792 for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
793 if (wakeups & BIT(i))
794 bits |= prcmu_wakeup_bit[i];
795 }
796
797 spin_lock_irqsave(&mb0_transfer.lock, flags);
798
799 mb0_transfer.req.dbb_wakeups = bits;
800 config_wakeups();
801
802 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
803}
804
805void prcmu_config_abb_event_readout(u32 abb_events)
806{
807 unsigned long flags;
808
809 spin_lock_irqsave(&mb0_transfer.lock, flags);
810
811 mb0_transfer.req.abb_events = abb_events;
812 config_wakeups();
813
814 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
815}
816
817void prcmu_get_abb_event_buffer(void __iomem **buf)
818{
819 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
820 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
821 else
822 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500);
823}
824
825/**
826 * prcmu_set_arm_opp - set the appropriate ARM OPP
827 * @opp: The new ARM operating point to which transition is to be made
828 * Returns: 0 on success, non-zero on failure
829 *
830 * This function sets the the operating point of the ARM.
831 */
832int prcmu_set_arm_opp(u8 opp)
833{
834 int r;
835
836 if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK)
837 return -EINVAL;
838
839 r = 0;
840
841 mutex_lock(&mb1_transfer.lock);
842
843 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
844 cpu_relax();
845
846 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
847 writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
848 writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
849
850 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
851 wait_for_completion(&mb1_transfer.work);
852
853 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
854 (mb1_transfer.ack.arm_opp != opp))
855 r = -EIO;
856
857 mutex_unlock(&mb1_transfer.lock);
858
859 return r;
860}
861
862/**
863 * prcmu_get_arm_opp - get the current ARM OPP
864 *
865 * Returns: the current ARM OPP
866 */
867int prcmu_get_arm_opp(void)
868{
869 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
870}
871
872/**
873 * prcmu_get_ddr_opp - get the current DDR OPP
874 *
875 * Returns: the current DDR OPP
876 */
877int prcmu_get_ddr_opp(void)
878{
879 return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
880}
881
882/**
883 * set_ddr_opp - set the appropriate DDR OPP
884 * @opp: The new DDR operating point to which transition is to be made
885 * Returns: 0 on success, non-zero on failure
886 *
887 * This function sets the operating point of the DDR.
888 */
889int prcmu_set_ddr_opp(u8 opp)
890{
891 if (opp < DDR_100_OPP || opp > DDR_25_OPP)
892 return -EINVAL;
893 /* Changing the DDR OPP can hang the hardware pre-v21 */
894 if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
895 writeb(opp, (_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW));
896
897 return 0;
898}
899/**
900 * set_ape_opp - set the appropriate APE OPP
901 * @opp: The new APE operating point to which transition is to be made
902 * Returns: 0 on success, non-zero on failure
903 *
904 * This function sets the operating point of the APE.
905 */
906int prcmu_set_ape_opp(u8 opp)
907{
908 int r = 0;
909
910 mutex_lock(&mb1_transfer.lock);
911
912 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
913 cpu_relax();
914
915 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
916 writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
917 writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
918
919 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
920 wait_for_completion(&mb1_transfer.work);
921
922 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
923 (mb1_transfer.ack.ape_opp != opp))
924 r = -EIO;
925
926 mutex_unlock(&mb1_transfer.lock);
927
928 return r;
929}
930
931/**
932 * prcmu_get_ape_opp - get the current APE OPP
933 *
934 * Returns: the current APE OPP
935 */
936int prcmu_get_ape_opp(void)
937{
938 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
939}
940
941/**
942 * prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage
943 * @enable: true to request the higher voltage, false to drop a request.
944 *
945 * Calls to this function to enable and disable requests must be balanced.
946 */
947int prcmu_request_ape_opp_100_voltage(bool enable)
948{
949 int r = 0;
950 u8 header;
951 static unsigned int requests;
952
953 mutex_lock(&mb1_transfer.lock);
954
955 if (enable) {
956 if (0 != requests++)
957 goto unlock_and_return;
958 header = MB1H_REQUEST_APE_OPP_100_VOLT;
959 } else {
960 if (requests == 0) {
961 r = -EIO;
962 goto unlock_and_return;
963 } else if (1 != requests--) {
964 goto unlock_and_return;
965 }
966 header = MB1H_RELEASE_APE_OPP_100_VOLT;
967 }
968
969 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
970 cpu_relax();
971
972 writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
973
974 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
975 wait_for_completion(&mb1_transfer.work);
976
977 if ((mb1_transfer.ack.header != header) ||
978 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
979 r = -EIO;
980
981unlock_and_return:
982 mutex_unlock(&mb1_transfer.lock);
983
984 return r;
985}
986
987/**
988 * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup
989 *
990 * This function releases the power state requirements of a USB wakeup.
991 */
992int prcmu_release_usb_wakeup_state(void)
993{
994 int r = 0;
995
996 mutex_lock(&mb1_transfer.lock);
997
998 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
999 cpu_relax();
1000
1001 writeb(MB1H_RELEASE_USB_WAKEUP,
1002 (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1003
1004 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1005 wait_for_completion(&mb1_transfer.work);
1006
1007 if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
1008 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
1009 r = -EIO;
1010
1011 mutex_unlock(&mb1_transfer.lock);
1012
1013 return r;
1014}
1015
1016/**
1017 * prcmu_set_epod - set the state of a EPOD (power domain)
1018 * @epod_id: The EPOD to set
1019 * @epod_state: The new EPOD state
1020 *
1021 * This function sets the state of a EPOD (power domain). It may not be called
1022 * from interrupt context.
1023 */
1024int prcmu_set_epod(u16 epod_id, u8 epod_state)
1025{
1026 int r = 0;
1027 bool ram_retention = false;
1028 int i;
1029
1030 /* check argument */
1031 BUG_ON(epod_id >= NUM_EPOD_ID);
1032
1033 /* set flag if retention is possible */
1034 switch (epod_id) {
1035 case EPOD_ID_SVAMMDSP:
1036 case EPOD_ID_SIAMMDSP:
1037 case EPOD_ID_ESRAM12:
1038 case EPOD_ID_ESRAM34:
1039 ram_retention = true;
1040 break;
1041 }
1042
1043 /* check argument */
1044 BUG_ON(epod_state > EPOD_STATE_ON);
1045 BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
1046
1047 /* get lock */
1048 mutex_lock(&mb2_transfer.lock);
1049
1050 /* wait for mailbox */
1051 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
1052 cpu_relax();
1053
1054 /* fill in mailbox */
1055 for (i = 0; i < NUM_EPOD_ID; i++)
1056 writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i));
1057 writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id));
1058
1059 writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
1060
1061 writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1062
1063 /*
1064 * The current firmware version does not handle errors correctly,
1065 * and we cannot recover if there is an error.
1066 * This is expected to change when the firmware is updated.
1067 */
1068 if (!wait_for_completion_timeout(&mb2_transfer.work,
1069 msecs_to_jiffies(20000))) {
1070 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1071 __func__);
1072 r = -EIO;
1073 goto unlock_and_return;
1074 }
1075
1076 if (mb2_transfer.ack.status != HWACC_PWR_ST_OK)
1077 r = -EIO;
1078
1079unlock_and_return:
1080 mutex_unlock(&mb2_transfer.lock);
1081 return r;
1082}
1083
1084/**
1085 * prcmu_configure_auto_pm - Configure autonomous power management.
1086 * @sleep: Configuration for ApSleep.
1087 * @idle: Configuration for ApIdle.
1088 */
1089void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
1090 struct prcmu_auto_pm_config *idle)
1091{
1092 u32 sleep_cfg;
1093 u32 idle_cfg;
1094 unsigned long flags;
1095
1096 BUG_ON((sleep == NULL) || (idle == NULL));
1097
1098 sleep_cfg = (sleep->sva_auto_pm_enable & 0xF);
1099 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF));
1100 sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF));
1101 sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF));
1102 sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF));
1103 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF));
1104
1105 idle_cfg = (idle->sva_auto_pm_enable & 0xF);
1106 idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF));
1107 idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF));
1108 idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF));
1109 idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF));
1110 idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF));
1111
1112 spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags);
1113
1114 /*
1115 * The autonomous power management configuration is done through
1116 * fields in mailbox 2, but these fields are only used as shared
1117 * variables - i.e. there is no need to send a message.
1118 */
1119 writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP));
1120 writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE));
1121
1122 mb2_transfer.auto_pm_enabled =
1123 ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1124 (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1125 (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1126 (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON));
1127
1128 spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags);
1129}
1130EXPORT_SYMBOL(prcmu_configure_auto_pm);
1131
1132bool prcmu_is_auto_pm_enabled(void)
1133{
1134 return mb2_transfer.auto_pm_enabled;
1135}
1136
1137static int request_sysclk(bool enable)
1138{
1139 int r;
1140 unsigned long flags;
1141
1142 r = 0;
1143
1144 mutex_lock(&mb3_transfer.sysclk_lock);
1145
1146 spin_lock_irqsave(&mb3_transfer.lock, flags);
1147
1148 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
1149 cpu_relax();
1150
1151 writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
1152
1153 writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
1154 writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1155
1156 spin_unlock_irqrestore(&mb3_transfer.lock, flags);
1157
1158 /*
1159 * The firmware only sends an ACK if we want to enable the
1160 * SysClk, and it succeeds.
1161 */
1162 if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work,
1163 msecs_to_jiffies(20000))) {
1164 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1165 __func__);
1166 r = -EIO;
1167 }
1168
1169 mutex_unlock(&mb3_transfer.sysclk_lock);
1170
1171 return r;
1172}
1173
1174static int request_timclk(bool enable)
1175{
1176 u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
1177
1178 if (!enable)
1179 val |= PRCM_TCR_STOP_TIMERS;
1180 writel(val, (_PRCMU_BASE + PRCM_TCR));
1181
1182 return 0;
1183}
1184
1185static int request_reg_clock(u8 clock, bool enable)
1186{
1187 u32 val;
1188 unsigned long flags;
1189
1190 spin_lock_irqsave(&clk_mgt_lock, flags);
1191
1192 /* Grab the HW semaphore. */
1193 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1194 cpu_relax();
1195
1196 val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1197 if (enable) {
1198 val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
1199 } else {
1200 clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
1201 val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
1202 }
1203 writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1204
1205 /* Release the HW semaphore. */
1206 writel(0, (_PRCMU_BASE + PRCM_SEM));
1207
1208 spin_unlock_irqrestore(&clk_mgt_lock, flags);
1209
1210 return 0;
1211}
1212
1213/**
1214 * prcmu_request_clock() - Request for a clock to be enabled or disabled.
1215 * @clock: The clock for which the request is made.
1216 * @enable: Whether the clock should be enabled (true) or disabled (false).
1217 *
1218 * This function should only be used by the clock implementation.
1219 * Do not use it from any other place!
1220 */
1221int prcmu_request_clock(u8 clock, bool enable)
1222{
1223 if (clock < PRCMU_NUM_REG_CLOCKS)
1224 return request_reg_clock(clock, enable);
1225 else if (clock == PRCMU_TIMCLK)
1226 return request_timclk(enable);
1227 else if (clock == PRCMU_SYSCLK)
1228 return request_sysclk(enable);
1229 else
1230 return -EINVAL;
1231}
1232
1233int prcmu_config_esram0_deep_sleep(u8 state)
1234{
1235 if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
1236 (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
1237 return -EINVAL;
1238
1239 mutex_lock(&mb4_transfer.lock);
1240
1241 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1242 cpu_relax();
1243
1244 writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1245 writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON),
1246 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE));
1247 writeb(DDR_PWR_STATE_ON,
1248 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
1249 writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
1250
1251 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1252 wait_for_completion(&mb4_transfer.work);
1253
1254 mutex_unlock(&mb4_transfer.lock);
1255
1256 return 0;
1257}
1258
1259int prcmu_config_hotdog(u8 threshold)
1260{
1261 mutex_lock(&mb4_transfer.lock);
1262
1263 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1264 cpu_relax();
1265
1266 writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
1267 writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1268
1269 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1270 wait_for_completion(&mb4_transfer.work);
1271
1272 mutex_unlock(&mb4_transfer.lock);
1273
1274 return 0;
1275}
1276
1277int prcmu_config_hotmon(u8 low, u8 high)
1278{
1279 mutex_lock(&mb4_transfer.lock);
1280
1281 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1282 cpu_relax();
1283
1284 writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
1285 writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH));
1286 writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH),
1287 (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
1288 writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1289
1290 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1291 wait_for_completion(&mb4_transfer.work);
1292
1293 mutex_unlock(&mb4_transfer.lock);
1294
1295 return 0;
1296}
1297
1298static int config_hot_period(u16 val)
1299{
1300 mutex_lock(&mb4_transfer.lock);
1301
1302 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1303 cpu_relax();
1304
1305 writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
1306 writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1307
1308 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1309 wait_for_completion(&mb4_transfer.work);
1310
1311 mutex_unlock(&mb4_transfer.lock);
1312
1313 return 0;
1314}
1315
1316int prcmu_start_temp_sense(u16 cycles32k)
1317{
1318 if (cycles32k == 0xFFFF)
1319 return -EINVAL;
1320
1321 return config_hot_period(cycles32k);
1322}
1323
1324int prcmu_stop_temp_sense(void)
1325{
1326 return config_hot_period(0xFFFF);
1327}
1328
1329/**
1330 * prcmu_set_clock_divider() - Configure the clock divider.
1331 * @clock: The clock for which the request is made.
1332 * @divider: The clock divider. (< 32)
1333 *
1334 * This function should only be used by the clock implementation.
1335 * Do not use it from any other place!
1336 */
1337int prcmu_set_clock_divider(u8 clock, u8 divider)
1338{
1339 u32 val;
1340 unsigned long flags;
1341
1342 if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider))
1343 return -EINVAL;
1344
1345 spin_lock_irqsave(&clk_mgt_lock, flags);
1346
1347 /* Grab the HW semaphore. */
1348 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1349 cpu_relax();
1350
1351 val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1352 val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK);
1353 val |= (u32)divider;
1354 writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1355
1356 /* Release the HW semaphore. */
1357 writel(0, (_PRCMU_BASE + PRCM_SEM));
1358
1359 spin_unlock_irqrestore(&clk_mgt_lock, flags);
1360
1361 return 0;
1362}
1363
1364/**
1365 * prcmu_abb_read() - Read register value(s) from the ABB.
1366 * @slave: The I2C slave address.
1367 * @reg: The (start) register address.
1368 * @value: The read out value(s).
1369 * @size: The number of registers to read.
1370 *
1371 * Reads register value(s) from the ABB.
1372 * @size has to be 1 for the current firmware version.
1373 */
1374int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
1375{
1376 int r;
1377
1378 if (size != 1)
1379 return -EINVAL;
1380
1381 mutex_lock(&mb5_transfer.lock);
1382
1383 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1384 cpu_relax();
1385
1386 writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1387 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1388 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1389 writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1390
1391 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1392
1393 if (!wait_for_completion_timeout(&mb5_transfer.work,
1394 msecs_to_jiffies(20000))) {
1395 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1396 __func__);
1397 r = -EIO;
1398 } else {
1399 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
1400 }
1401
1402 if (!r)
1403 *value = mb5_transfer.ack.value;
1404
1405 mutex_unlock(&mb5_transfer.lock);
1406
1407 return r;
1408}
1409
1410/**
1411 * prcmu_abb_write() - Write register value(s) to the ABB.
1412 * @slave: The I2C slave address.
1413 * @reg: The (start) register address.
1414 * @value: The value(s) to write.
1415 * @size: The number of registers to write.
1416 *
1417 * Reads register value(s) from the ABB.
1418 * @size has to be 1 for the current firmware version.
1419 */
1420int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
1421{
1422 int r;
1423
1424 if (size != 1)
1425 return -EINVAL;
1426
1427 mutex_lock(&mb5_transfer.lock);
1428
1429 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1430 cpu_relax();
1431
1432 writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1433 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1434 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1435 writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1436
1437 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1438
1439 if (!wait_for_completion_timeout(&mb5_transfer.work,
1440 msecs_to_jiffies(20000))) {
1441 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1442 __func__);
1443 r = -EIO;
1444 } else {
1445 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
1446 }
1447
1448 mutex_unlock(&mb5_transfer.lock);
1449
1450 return r;
1451}
1452
1453/**
1454 * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
1455 */
1456void prcmu_ac_wake_req(void)
1457{
1458 u32 val;
1459
1460 mutex_lock(&mb0_transfer.ac_wake_lock);
1461
1462 val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
1463 if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
1464 goto unlock_and_return;
1465
1466 atomic_set(&ac_wake_req_state, 1);
1467
1468 writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1469 (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
1470
1471 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1472 msecs_to_jiffies(20000))) {
1473 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1474 __func__);
1475 }
1476
1477unlock_and_return:
1478 mutex_unlock(&mb0_transfer.ac_wake_lock);
1479}
1480
1481/**
1482 * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
1483 */
1484void prcmu_ac_sleep_req()
1485{
1486 u32 val;
1487
1488 mutex_lock(&mb0_transfer.ac_wake_lock);
1489
1490 val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
1491 if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
1492 goto unlock_and_return;
1493
1494 writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1495 (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
1496
1497 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1498 msecs_to_jiffies(20000))) {
1499 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1500 __func__);
1501 }
1502
1503 atomic_set(&ac_wake_req_state, 0);
1504
1505unlock_and_return:
1506 mutex_unlock(&mb0_transfer.ac_wake_lock);
1507}
1508
1509bool prcmu_is_ac_wake_requested(void)
1510{
1511 return (atomic_read(&ac_wake_req_state) != 0);
1512}
1513
1514/**
1515 * prcmu_system_reset - System reset
1516 *
1517 * Saves the reset reason code and then sets the APE_SOFRST register which
1518 * fires interrupt to fw
1519 */
1520void prcmu_system_reset(u16 reset_code)
1521{
1522 writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
1523 writel(1, (_PRCMU_BASE + PRCM_APE_SOFTRST));
1524}
1525
1526/**
1527 * prcmu_reset_modem - ask the PRCMU to reset modem
1528 */
1529void prcmu_modem_reset(void)
1530{
1531 mutex_lock(&mb1_transfer.lock);
1532
1533 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1534 cpu_relax();
1535
1536 writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1537 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1538 wait_for_completion(&mb1_transfer.work);
1539
1540 /*
1541 * No need to check return from PRCMU as modem should go in reset state
1542 * This state is already managed by upper layer
1543 */
1544
1545 mutex_unlock(&mb1_transfer.lock);
1546}
1547
1548static void ack_dbb_wakeup(void)
1549{
1550 unsigned long flags;
1551
1552 spin_lock_irqsave(&mb0_transfer.lock, flags);
1553
1554 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
1555 cpu_relax();
1556
1557 writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
1558 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1559
1560 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1561}
1562
1563static inline void print_unknown_header_warning(u8 n, u8 header)
1564{
1565 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
1566 header, n);
1567}
1568
1569static bool read_mailbox_0(void)
1570{
1571 bool r;
1572 u32 ev;
1573 unsigned int n;
1574 u8 header;
1575
1576 header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0);
1577 switch (header) {
1578 case MB0H_WAKEUP_EXE:
1579 case MB0H_WAKEUP_SLEEP:
1580 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
1581 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500);
1582 else
1583 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500);
1584
1585 if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK))
1586 complete(&mb0_transfer.ac_wake_work);
1587 if (ev & WAKEUP_BIT_SYSCLK_OK)
1588 complete(&mb3_transfer.sysclk_work);
1589
1590 ev &= mb0_transfer.req.dbb_irqs;
1591
1592 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
1593 if (ev & prcmu_irq_bit[n])
1594 generic_handle_irq(IRQ_PRCMU_BASE + n);
1595 }
1596 r = true;
1597 break;
1598 default:
1599 print_unknown_header_warning(0, header);
1600 r = false;
1601 break;
1602 }
1603 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1604 return r;
1605}
1606
1607static bool read_mailbox_1(void)
1608{
1609 mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1);
1610 mb1_transfer.ack.arm_opp = readb(tcdm_base +
1611 PRCM_ACK_MB1_CURRENT_ARM_OPP);
1612 mb1_transfer.ack.ape_opp = readb(tcdm_base +
1613 PRCM_ACK_MB1_CURRENT_APE_OPP);
1614 mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
1615 PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
1616 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1617 complete(&mb1_transfer.work);
1618 return false;
1619}
1620
1621static bool read_mailbox_2(void)
1622{
1623 mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
1624 writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1625 complete(&mb2_transfer.work);
1626 return false;
1627}
1628
1629static bool read_mailbox_3(void)
1630{
1631 writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1632 return false;
1633}
1634
1635static bool read_mailbox_4(void)
1636{
1637 u8 header;
1638 bool do_complete = true;
1639
1640 header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4);
1641 switch (header) {
1642 case MB4H_MEM_ST:
1643 case MB4H_HOTDOG:
1644 case MB4H_HOTMON:
1645 case MB4H_HOT_PERIOD:
1646 break;
1647 default:
1648 print_unknown_header_warning(4, header);
1649 do_complete = false;
1650 break;
1651 }
1652
1653 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1654
1655 if (do_complete)
1656 complete(&mb4_transfer.work);
1657
1658 return false;
1659}
1660
1661static bool read_mailbox_5(void)
1662{
1663 mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
1664 mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
1665 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1666 complete(&mb5_transfer.work);
1667 return false;
1668}
1669
1670static bool read_mailbox_6(void)
1671{
1672 writel(MBOX_BIT(6), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1673 return false;
1674}
1675
1676static bool read_mailbox_7(void)
1677{
1678 writel(MBOX_BIT(7), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1679 return false;
1680}
1681
1682static bool (* const read_mailbox[NUM_MB])(void) = {
1683 read_mailbox_0,
1684 read_mailbox_1,
1685 read_mailbox_2,
1686 read_mailbox_3,
1687 read_mailbox_4,
1688 read_mailbox_5,
1689 read_mailbox_6,
1690 read_mailbox_7
1691};
1692
1693static irqreturn_t prcmu_irq_handler(int irq, void *data)
1694{
1695 u32 bits;
1696 u8 n;
1697 irqreturn_t r;
1698
1699 bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
1700 if (unlikely(!bits))
1701 return IRQ_NONE;
1702
1703 r = IRQ_HANDLED;
1704 for (n = 0; bits; n++) {
1705 if (bits & MBOX_BIT(n)) {
1706 bits -= MBOX_BIT(n);
1707 if (read_mailbox[n]())
1708 r = IRQ_WAKE_THREAD;
1709 }
1710 }
1711 return r;
1712}
1713
1714static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
1715{
1716 ack_dbb_wakeup();
1717 return IRQ_HANDLED;
1718}
1719
1720static void prcmu_mask_work(struct work_struct *work)
1721{
1722 unsigned long flags;
1723
1724 spin_lock_irqsave(&mb0_transfer.lock, flags);
1725
1726 config_wakeups();
1727
1728 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1729}
1730
1731static void prcmu_irq_mask(struct irq_data *d)
1732{
1733 unsigned long flags;
1734
1735 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1736
1737 mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1738
1739 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1740
1741 if (d->irq != IRQ_PRCMU_CA_SLEEP)
1742 schedule_work(&mb0_transfer.mask_work);
1743}
1744
1745static void prcmu_irq_unmask(struct irq_data *d)
1746{
1747 unsigned long flags;
1748
1749 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1750
1751 mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1752
1753 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1754
1755 if (d->irq != IRQ_PRCMU_CA_SLEEP)
1756 schedule_work(&mb0_transfer.mask_work);
1757}
1758
1759static void noop(struct irq_data *d)
1760{
1761}
1762
1763static struct irq_chip prcmu_irq_chip = {
1764 .name = "prcmu",
1765 .irq_disable = prcmu_irq_mask,
1766 .irq_ack = noop,
1767 .irq_mask = prcmu_irq_mask,
1768 .irq_unmask = prcmu_irq_unmask,
1769};
1770
1771void __init prcmu_early_init(void)
1772{
1773 unsigned int i;
1774
1775 if (cpu_is_u8500v1()) {
1776 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
1777 } else if (cpu_is_u8500v2()) {
1778 void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
1779
1780 if (tcpm_base != NULL) {
1781 int version;
1782 version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
1783 prcmu_version.project_number = version & 0xFF;
1784 prcmu_version.api_version = (version >> 8) & 0xFF;
1785 prcmu_version.func_version = (version >> 16) & 0xFF;
1786 prcmu_version.errata = (version >> 24) & 0xFF;
1787 pr_info("PRCMU firmware version %d.%d.%d\n",
1788 (version >> 8) & 0xFF, (version >> 16) & 0xFF,
1789 (version >> 24) & 0xFF);
1790 iounmap(tcpm_base);
1791 }
1792
1793 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
1794 } else {
1795 pr_err("prcmu: Unsupported chip version\n");
1796 BUG();
1797 }
1798
1799 spin_lock_init(&mb0_transfer.lock);
1800 spin_lock_init(&mb0_transfer.dbb_irqs_lock);
1801 mutex_init(&mb0_transfer.ac_wake_lock);
1802 init_completion(&mb0_transfer.ac_wake_work);
1803 mutex_init(&mb1_transfer.lock);
1804 init_completion(&mb1_transfer.work);
1805 mutex_init(&mb2_transfer.lock);
1806 init_completion(&mb2_transfer.work);
1807 spin_lock_init(&mb2_transfer.auto_pm_lock);
1808 spin_lock_init(&mb3_transfer.lock);
1809 mutex_init(&mb3_transfer.sysclk_lock);
1810 init_completion(&mb3_transfer.sysclk_work);
1811 mutex_init(&mb4_transfer.lock);
1812 init_completion(&mb4_transfer.work);
1813 mutex_init(&mb5_transfer.lock);
1814 init_completion(&mb5_transfer.work);
1815
1816 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1817
1818 /* Initalize irqs. */
1819 for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) {
1820 unsigned int irq;
1821
1822 irq = IRQ_PRCMU_BASE + i;
1823 irq_set_chip_and_handler(irq, &prcmu_irq_chip,
1824 handle_simple_irq);
1825 set_irq_flags(irq, IRQF_VALID);
1826 }
1827}
1828
1829/*
1830 * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC
1831 */
1832static struct regulator_consumer_supply db8500_vape_consumers[] = {
1833 REGULATOR_SUPPLY("v-ape", NULL),
1834 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
1835 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
1836 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
1837 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
1838 /* "v-mmc" changed to "vcore" in the mainline kernel */
1839 REGULATOR_SUPPLY("vcore", "sdi0"),
1840 REGULATOR_SUPPLY("vcore", "sdi1"),
1841 REGULATOR_SUPPLY("vcore", "sdi2"),
1842 REGULATOR_SUPPLY("vcore", "sdi3"),
1843 REGULATOR_SUPPLY("vcore", "sdi4"),
1844 REGULATOR_SUPPLY("v-dma", "dma40.0"),
1845 REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
1846 /* "v-uart" changed to "vcore" in the mainline kernel */
1847 REGULATOR_SUPPLY("vcore", "uart0"),
1848 REGULATOR_SUPPLY("vcore", "uart1"),
1849 REGULATOR_SUPPLY("vcore", "uart2"),
1850 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
1851};
1852
1853static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
1854 /* CG2900 and CW1200 power to off-chip peripherals */
1855 REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
1856 REGULATOR_SUPPLY("wlan_1v8", "cw1200.0"),
1857 REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
1858 /* AV8100 regulator */
1859 REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
1860};
1861
1862static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
1863 REGULATOR_SUPPLY("vsupply", "b2r2.0"),
1864 REGULATOR_SUPPLY("vsupply", "mcde.0"),
1865};
1866
1867static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
1868 [DB8500_REGULATOR_VAPE] = {
1869 .constraints = {
1870 .name = "db8500-vape",
1871 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1872 },
1873 .consumer_supplies = db8500_vape_consumers,
1874 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
1875 },
1876 [DB8500_REGULATOR_VARM] = {
1877 .constraints = {
1878 .name = "db8500-varm",
1879 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1880 },
1881 },
1882 [DB8500_REGULATOR_VMODEM] = {
1883 .constraints = {
1884 .name = "db8500-vmodem",
1885 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1886 },
1887 },
1888 [DB8500_REGULATOR_VPLL] = {
1889 .constraints = {
1890 .name = "db8500-vpll",
1891 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1892 },
1893 },
1894 [DB8500_REGULATOR_VSMPS1] = {
1895 .constraints = {
1896 .name = "db8500-vsmps1",
1897 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1898 },
1899 },
1900 [DB8500_REGULATOR_VSMPS2] = {
1901 .constraints = {
1902 .name = "db8500-vsmps2",
1903 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1904 },
1905 .consumer_supplies = db8500_vsmps2_consumers,
1906 .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers),
1907 },
1908 [DB8500_REGULATOR_VSMPS3] = {
1909 .constraints = {
1910 .name = "db8500-vsmps3",
1911 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1912 },
1913 },
1914 [DB8500_REGULATOR_VRF1] = {
1915 .constraints = {
1916 .name = "db8500-vrf1",
1917 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1918 },
1919 },
1920 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
1921 .supply_regulator = "db8500-vape",
1922 .constraints = {
1923 .name = "db8500-sva-mmdsp",
1924 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1925 },
1926 },
1927 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
1928 .constraints = {
1929 /* "ret" means "retention" */
1930 .name = "db8500-sva-mmdsp-ret",
1931 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1932 },
1933 },
1934 [DB8500_REGULATOR_SWITCH_SVAPIPE] = {
1935 .supply_regulator = "db8500-vape",
1936 .constraints = {
1937 .name = "db8500-sva-pipe",
1938 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1939 },
1940 },
1941 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
1942 .supply_regulator = "db8500-vape",
1943 .constraints = {
1944 .name = "db8500-sia-mmdsp",
1945 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1946 },
1947 },
1948 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
1949 .constraints = {
1950 .name = "db8500-sia-mmdsp-ret",
1951 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1952 },
1953 },
1954 [DB8500_REGULATOR_SWITCH_SIAPIPE] = {
1955 .supply_regulator = "db8500-vape",
1956 .constraints = {
1957 .name = "db8500-sia-pipe",
1958 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1959 },
1960 },
1961 [DB8500_REGULATOR_SWITCH_SGA] = {
1962 .supply_regulator = "db8500-vape",
1963 .constraints = {
1964 .name = "db8500-sga",
1965 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1966 },
1967 },
1968 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
1969 .supply_regulator = "db8500-vape",
1970 .constraints = {
1971 .name = "db8500-b2r2-mcde",
1972 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1973 },
1974 .consumer_supplies = db8500_b2r2_mcde_consumers,
1975 .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers),
1976 },
1977 [DB8500_REGULATOR_SWITCH_ESRAM12] = {
1978 .supply_regulator = "db8500-vape",
1979 .constraints = {
1980 .name = "db8500-esram12",
1981 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1982 },
1983 },
1984 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
1985 .constraints = {
1986 .name = "db8500-esram12-ret",
1987 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1988 },
1989 },
1990 [DB8500_REGULATOR_SWITCH_ESRAM34] = {
1991 .supply_regulator = "db8500-vape",
1992 .constraints = {
1993 .name = "db8500-esram34",
1994 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1995 },
1996 },
1997 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
1998 .constraints = {
1999 .name = "db8500-esram34-ret",
2000 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2001 },
2002 },
2003};
2004
2005static struct mfd_cell db8500_prcmu_devs[] = {
2006 {
2007 .name = "db8500-prcmu-regulators",
2008 .mfd_data = &db8500_regulators,
2009 },
2010 {
2011 .name = "cpufreq-u8500",
2012 },
2013};
2014
2015/**
2016 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
2017 *
2018 */
2019static int __init db8500_prcmu_probe(struct platform_device *pdev)
2020{
2021 int err = 0;
2022
2023 if (ux500_is_svp())
2024 return -ENODEV;
2025
2026 /* Clean up the mailbox interrupts after pre-kernel code. */
2027 writel(ALL_MBOX_BITS, (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
2028
2029 err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
2030 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
2031 if (err < 0) {
2032 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
2033 err = -EBUSY;
2034 goto no_irq_return;
2035 }
2036
2037 if (cpu_is_u8500v20_or_later())
2038 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
2039
2040 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
2041 ARRAY_SIZE(db8500_prcmu_devs), NULL,
2042 0);
2043
2044 if (err)
2045 pr_err("prcmu: Failed to add subdevices\n");
2046 else
2047 pr_info("DB8500 PRCMU initialized\n");
2048
2049no_irq_return:
2050 return err;
2051}
2052
2053static struct platform_driver db8500_prcmu_driver = {
2054 .driver = {
2055 .name = "db8500-prcmu",
2056 .owner = THIS_MODULE,
2057 },
2058};
2059
2060static int __init db8500_prcmu_init(void)
2061{
2062 return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
2063}
2064
2065arch_initcall(db8500_prcmu_init);
2066
2067MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
2068MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
2069MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index fb9770b39a32..2808bd125d13 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -117,7 +117,8 @@ static struct mfd_cell ds1wm_cell __initdata = {
117 .name = "ds1wm", 117 .name = "ds1wm",
118 .enable = ds1wm_enable, 118 .enable = ds1wm_enable,
119 .disable = ds1wm_disable, 119 .disable = ds1wm_disable,
120 .mfd_data = &ds1wm_pdata, 120 .platform_data = &ds1wm_pdata,
121 .pdata_size = sizeof(ds1wm_pdata),
121 .num_resources = 2, 122 .num_resources = 2,
122 .resources = ds1wm_resources, 123 .resources = ds1wm_resources,
123}; 124};
@@ -172,6 +173,8 @@ static int __init pasic3_probe(struct platform_device *pdev)
172 } 173 }
173 174
174 if (pdata && pdata->led_pdata) { 175 if (pdata && pdata->led_pdata) {
176 led_cell.platform_data = pdata->led_pdata;
177 led_cell.pdata_size = sizeof(struct pasic3_leds_machinfo);
175 ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0); 178 ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0);
176 if (ret < 0) 179 if (ret < 0)
177 dev_warn(dev, "failed to register LED device\n"); 180 dev_warn(dev, "failed to register LED device\n");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index fc4191137e90..5c2a06acb77f 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -86,7 +86,8 @@ static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
86 86
87 /* Add platform data */ 87 /* Add platform data */
88 pdata->modno = modno; 88 pdata->modno = modno;
89 cell->mfd_data = pdata; 89 cell->platform_data = pdata;
90 cell->pdata_size = sizeof(*pdata);
90 91
91 /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */ 92 /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
92 res->flags = IORESOURCE_MEM; 93 res->flags = IORESOURCE_MEM;
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 58cc5fdde016..e1e59c92f758 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -627,7 +627,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
627 goto out_dev; 627 goto out_dev;
628 } 628 }
629 629
630 if (pdata && pdata->regulator[0]) { 630 if (pdata) {
631 ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0], 631 ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0],
632 ARRAY_SIZE(regulator_devs), 632 ARRAY_SIZE(regulator_devs),
633 &regulator_resources[0], 0); 633 &regulator_resources[0], 0);
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 668634e89e81..7e4d44bf92ab 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -683,13 +683,14 @@ out:
683EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion); 683EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
684 684
685static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx, 685static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
686 const char *format, void *pdata) 686 const char *format, void *pdata, size_t pdata_size)
687{ 687{
688 char buf[30]; 688 char buf[30];
689 const char *name = mc13xxx_get_chipname(mc13xxx); 689 const char *name = mc13xxx_get_chipname(mc13xxx);
690 690
691 struct mfd_cell cell = { 691 struct mfd_cell cell = {
692 .mfd_data = pdata, 692 .platform_data = pdata,
693 .pdata_size = pdata_size,
693 }; 694 };
694 695
695 /* there is no asnprintf in the kernel :-( */ 696 /* there is no asnprintf in the kernel :-( */
@@ -705,7 +706,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
705 706
706static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format) 707static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
707{ 708{
708 return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL); 709 return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0);
709} 710}
710 711
711static int mc13xxx_probe(struct spi_device *spi) 712static int mc13xxx_probe(struct spi_device *spi)
@@ -764,7 +765,7 @@ err_revision:
764 765
765 if (pdata->flags & MC13XXX_USE_REGULATOR) { 766 if (pdata->flags & MC13XXX_USE_REGULATOR) {
766 mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator", 767 mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
767 &pdata->regulators); 768 &pdata->regulators, sizeof(pdata->regulators));
768 } 769 }
769 770
770 if (pdata->flags & MC13XXX_USE_RTC) 771 if (pdata->flags & MC13XXX_USE_RTC)
@@ -774,7 +775,8 @@ err_revision:
774 mc13xxx_add_subdevice(mc13xxx, "%s-ts"); 775 mc13xxx_add_subdevice(mc13xxx, "%s-ts");
775 776
776 if (pdata->flags & MC13XXX_USE_LED) 777 if (pdata->flags & MC13XXX_USE_LED)
777 mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led", pdata->leds); 778 mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
779 pdata->leds, sizeof(*pdata->leds));
778 780
779 return 0; 781 return 0;
780} 782}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index f4c8c844b913..0902523af62d 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -88,6 +88,13 @@ static int mfd_add_device(struct device *parent, int id,
88 88
89 pdev->dev.parent = parent; 89 pdev->dev.parent = parent;
90 90
91 if (cell->pdata_size) {
92 ret = platform_device_add_data(pdev,
93 cell->platform_data, cell->pdata_size);
94 if (ret)
95 goto fail_res;
96 }
97
91 ret = mfd_platform_add_cell(pdev, cell); 98 ret = mfd_platform_add_cell(pdev, cell);
92 if (ret) 99 if (ret)
93 goto fail_res; 100 goto fail_res;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 3ab9ffa00aad..855219526ccb 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -26,6 +26,7 @@
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <plat/usb.h> 28#include <plat/usb.h>
29#include <linux/pm_runtime.h>
29 30
30#define USBHS_DRIVER_NAME "usbhs-omap" 31#define USBHS_DRIVER_NAME "usbhs-omap"
31#define OMAP_EHCI_DEVICE "ehci-omap" 32#define OMAP_EHCI_DEVICE "ehci-omap"
@@ -146,9 +147,6 @@
146 147
147 148
148struct usbhs_hcd_omap { 149struct usbhs_hcd_omap {
149 struct clk *usbhost_ick;
150 struct clk *usbhost_hs_fck;
151 struct clk *usbhost_fs_fck;
152 struct clk *xclk60mhsp1_ck; 150 struct clk *xclk60mhsp1_ck;
153 struct clk *xclk60mhsp2_ck; 151 struct clk *xclk60mhsp2_ck;
154 struct clk *utmi_p1_fck; 152 struct clk *utmi_p1_fck;
@@ -158,8 +156,6 @@ struct usbhs_hcd_omap {
158 struct clk *usbhost_p2_fck; 156 struct clk *usbhost_p2_fck;
159 struct clk *usbtll_p2_fck; 157 struct clk *usbtll_p2_fck;
160 struct clk *init_60m_fclk; 158 struct clk *init_60m_fclk;
161 struct clk *usbtll_fck;
162 struct clk *usbtll_ick;
163 159
164 void __iomem *uhh_base; 160 void __iomem *uhh_base;
165 void __iomem *tll_base; 161 void __iomem *tll_base;
@@ -281,6 +277,7 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev)
281 277
282 if (!ehci) { 278 if (!ehci) {
283 dev_err(dev, "omap_usbhs_alloc_child failed\n"); 279 dev_err(dev, "omap_usbhs_alloc_child failed\n");
280 ret = -ENOMEM;
284 goto err_end; 281 goto err_end;
285 } 282 }
286 283
@@ -304,13 +301,14 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev)
304 sizeof(*ohci_data), dev); 301 sizeof(*ohci_data), dev);
305 if (!ohci) { 302 if (!ohci) {
306 dev_err(dev, "omap_usbhs_alloc_child failed\n"); 303 dev_err(dev, "omap_usbhs_alloc_child failed\n");
304 ret = -ENOMEM;
307 goto err_ehci; 305 goto err_ehci;
308 } 306 }
309 307
310 return 0; 308 return 0;
311 309
312err_ehci: 310err_ehci:
313 platform_device_put(ehci); 311 platform_device_unregister(ehci);
314 312
315err_end: 313err_end:
316 return ret; 314 return ret;
@@ -351,46 +349,13 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
351 omap->platdata.ehci_data = pdata->ehci_data; 349 omap->platdata.ehci_data = pdata->ehci_data;
352 omap->platdata.ohci_data = pdata->ohci_data; 350 omap->platdata.ohci_data = pdata->ohci_data;
353 351
354 omap->usbhost_ick = clk_get(dev, "usbhost_ick"); 352 pm_runtime_enable(&pdev->dev);
355 if (IS_ERR(omap->usbhost_ick)) {
356 ret = PTR_ERR(omap->usbhost_ick);
357 dev_err(dev, "usbhost_ick failed error:%d\n", ret);
358 goto err_end;
359 }
360
361 omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
362 if (IS_ERR(omap->usbhost_hs_fck)) {
363 ret = PTR_ERR(omap->usbhost_hs_fck);
364 dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
365 goto err_usbhost_ick;
366 }
367
368 omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
369 if (IS_ERR(omap->usbhost_fs_fck)) {
370 ret = PTR_ERR(omap->usbhost_fs_fck);
371 dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
372 goto err_usbhost_hs_fck;
373 }
374
375 omap->usbtll_fck = clk_get(dev, "usbtll_fck");
376 if (IS_ERR(omap->usbtll_fck)) {
377 ret = PTR_ERR(omap->usbtll_fck);
378 dev_err(dev, "usbtll_fck failed error:%d\n", ret);
379 goto err_usbhost_fs_fck;
380 }
381
382 omap->usbtll_ick = clk_get(dev, "usbtll_ick");
383 if (IS_ERR(omap->usbtll_ick)) {
384 ret = PTR_ERR(omap->usbtll_ick);
385 dev_err(dev, "usbtll_ick failed error:%d\n", ret);
386 goto err_usbtll_fck;
387 }
388 353
389 omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); 354 omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
390 if (IS_ERR(omap->utmi_p1_fck)) { 355 if (IS_ERR(omap->utmi_p1_fck)) {
391 ret = PTR_ERR(omap->utmi_p1_fck); 356 ret = PTR_ERR(omap->utmi_p1_fck);
392 dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); 357 dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
393 goto err_usbtll_ick; 358 goto err_end;
394 } 359 }
395 360
396 omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); 361 omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -520,22 +485,8 @@ err_xclk60mhsp1_ck:
520err_utmi_p1_fck: 485err_utmi_p1_fck:
521 clk_put(omap->utmi_p1_fck); 486 clk_put(omap->utmi_p1_fck);
522 487
523err_usbtll_ick:
524 clk_put(omap->usbtll_ick);
525
526err_usbtll_fck:
527 clk_put(omap->usbtll_fck);
528
529err_usbhost_fs_fck:
530 clk_put(omap->usbhost_fs_fck);
531
532err_usbhost_hs_fck:
533 clk_put(omap->usbhost_hs_fck);
534
535err_usbhost_ick:
536 clk_put(omap->usbhost_ick);
537
538err_end: 488err_end:
489 pm_runtime_disable(&pdev->dev);
539 kfree(omap); 490 kfree(omap);
540 491
541end_probe: 492end_probe:
@@ -569,11 +520,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
569 clk_put(omap->utmi_p2_fck); 520 clk_put(omap->utmi_p2_fck);
570 clk_put(omap->xclk60mhsp1_ck); 521 clk_put(omap->xclk60mhsp1_ck);
571 clk_put(omap->utmi_p1_fck); 522 clk_put(omap->utmi_p1_fck);
572 clk_put(omap->usbtll_ick); 523 pm_runtime_disable(&pdev->dev);
573 clk_put(omap->usbtll_fck);
574 clk_put(omap->usbhost_fs_fck);
575 clk_put(omap->usbhost_hs_fck);
576 clk_put(omap->usbhost_ick);
577 kfree(omap); 524 kfree(omap);
578 525
579 return 0; 526 return 0;
@@ -693,7 +640,6 @@ static int usbhs_enable(struct device *dev)
693 struct usbhs_omap_platform_data *pdata = &omap->platdata; 640 struct usbhs_omap_platform_data *pdata = &omap->platdata;
694 unsigned long flags = 0; 641 unsigned long flags = 0;
695 int ret = 0; 642 int ret = 0;
696 unsigned long timeout;
697 unsigned reg; 643 unsigned reg;
698 644
699 dev_dbg(dev, "starting TI HSUSB Controller\n"); 645 dev_dbg(dev, "starting TI HSUSB Controller\n");
@@ -706,11 +652,7 @@ static int usbhs_enable(struct device *dev)
706 if (omap->count > 0) 652 if (omap->count > 0)
707 goto end_count; 653 goto end_count;
708 654
709 clk_enable(omap->usbhost_ick); 655 pm_runtime_get_sync(dev);
710 clk_enable(omap->usbhost_hs_fck);
711 clk_enable(omap->usbhost_fs_fck);
712 clk_enable(omap->usbtll_fck);
713 clk_enable(omap->usbtll_ick);
714 656
715 if (pdata->ehci_data->phy_reset) { 657 if (pdata->ehci_data->phy_reset) {
716 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) { 658 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -734,50 +676,6 @@ static int usbhs_enable(struct device *dev)
734 omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION); 676 omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
735 dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev); 677 dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
736 678
737 /* perform TLL soft reset, and wait until reset is complete */
738 usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
739 OMAP_USBTLL_SYSCONFIG_SOFTRESET);
740
741 /* Wait for TLL reset to complete */
742 timeout = jiffies + msecs_to_jiffies(1000);
743 while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
744 & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
745 cpu_relax();
746
747 if (time_after(jiffies, timeout)) {
748 dev_dbg(dev, "operation timed out\n");
749 ret = -EINVAL;
750 goto err_tll;
751 }
752 }
753
754 dev_dbg(dev, "TLL RESET DONE\n");
755
756 /* (1<<3) = no idle mode only for initial debugging */
757 usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
758 OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
759 OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
760 OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
761
762 /* Put UHH in NoIdle/NoStandby mode */
763 reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
764 if (is_omap_usbhs_rev1(omap)) {
765 reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
766 | OMAP_UHH_SYSCONFIG_SIDLEMODE
767 | OMAP_UHH_SYSCONFIG_CACTIVITY
768 | OMAP_UHH_SYSCONFIG_MIDLEMODE);
769 reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
770
771
772 } else if (is_omap_usbhs_rev2(omap)) {
773 reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
774 reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
775 reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
776 reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
777 }
778
779 usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
780
781 reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG); 679 reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
782 /* setup ULPI bypass and burst configurations */ 680 /* setup ULPI bypass and burst configurations */
783 reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN 681 reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -917,6 +815,8 @@ end_count:
917 return 0; 815 return 0;
918 816
919err_tll: 817err_tll:
818 pm_runtime_put_sync(dev);
819 spin_unlock_irqrestore(&omap->lock, flags);
920 if (pdata->ehci_data->phy_reset) { 820 if (pdata->ehci_data->phy_reset) {
921 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) 821 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
922 gpio_free(pdata->ehci_data->reset_gpio_port[0]); 822 gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -924,13 +824,6 @@ err_tll:
924 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) 824 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
925 gpio_free(pdata->ehci_data->reset_gpio_port[1]); 825 gpio_free(pdata->ehci_data->reset_gpio_port[1]);
926 } 826 }
927
928 clk_disable(omap->usbtll_ick);
929 clk_disable(omap->usbtll_fck);
930 clk_disable(omap->usbhost_fs_fck);
931 clk_disable(omap->usbhost_hs_fck);
932 clk_disable(omap->usbhost_ick);
933 spin_unlock_irqrestore(&omap->lock, flags);
934 return ret; 827 return ret;
935} 828}
936 829
@@ -994,6 +887,20 @@ static void usbhs_disable(struct device *dev)
994 dev_dbg(dev, "operation timed out\n"); 887 dev_dbg(dev, "operation timed out\n");
995 } 888 }
996 889
890 if (is_omap_usbhs_rev2(omap)) {
891 if (is_ehci_tll_mode(pdata->port_mode[0]))
892 clk_enable(omap->usbtll_p1_fck);
893 if (is_ehci_tll_mode(pdata->port_mode[1]))
894 clk_enable(omap->usbtll_p2_fck);
895 clk_disable(omap->utmi_p2_fck);
896 clk_disable(omap->utmi_p1_fck);
897 }
898
899 pm_runtime_put_sync(dev);
900
901 /* The gpio_free migh sleep; so unlock the spinlock */
902 spin_unlock_irqrestore(&omap->lock, flags);
903
997 if (pdata->ehci_data->phy_reset) { 904 if (pdata->ehci_data->phy_reset) {
998 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) 905 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
999 gpio_free(pdata->ehci_data->reset_gpio_port[0]); 906 gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -1001,14 +908,7 @@ static void usbhs_disable(struct device *dev)
1001 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) 908 if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
1002 gpio_free(pdata->ehci_data->reset_gpio_port[1]); 909 gpio_free(pdata->ehci_data->reset_gpio_port[1]);
1003 } 910 }
1004 911 return;
1005 clk_disable(omap->utmi_p2_fck);
1006 clk_disable(omap->utmi_p1_fck);
1007 clk_disable(omap->usbtll_ick);
1008 clk_disable(omap->usbtll_fck);
1009 clk_disable(omap->usbhost_fs_fck);
1010 clk_disable(omap->usbhost_hs_fck);
1011 clk_disable(omap->usbhost_ick);
1012 912
1013end_disble: 913end_disble:
1014 spin_unlock_irqrestore(&omap->lock, flags); 914 spin_unlock_irqrestore(&omap->lock, flags);
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
new file mode 100644
index 000000000000..e873b15753d8
--- /dev/null
+++ b/drivers/mfd/pm8921-core.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/msm_ssbi.h>
21#include <linux/mfd/core.h>
22#include <linux/mfd/pm8xxx/pm8921.h>
23#include <linux/mfd/pm8xxx/core.h>
24
25#define REG_HWREV 0x002 /* PMIC4 revision */
26#define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */
27
28struct pm8921 {
29 struct device *dev;
30 struct pm_irq_chip *irq_chip;
31};
32
33static int pm8921_readb(const struct device *dev, u16 addr, u8 *val)
34{
35 const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
36 const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
37
38 return msm_ssbi_read(pmic->dev->parent, addr, val, 1);
39}
40
41static int pm8921_writeb(const struct device *dev, u16 addr, u8 val)
42{
43 const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
44 const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
45
46 return msm_ssbi_write(pmic->dev->parent, addr, &val, 1);
47}
48
49static int pm8921_read_buf(const struct device *dev, u16 addr, u8 *buf,
50 int cnt)
51{
52 const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
53 const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
54
55 return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt);
56}
57
58static int pm8921_write_buf(const struct device *dev, u16 addr, u8 *buf,
59 int cnt)
60{
61 const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
62 const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
63
64 return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt);
65}
66
67static int pm8921_read_irq_stat(const struct device *dev, int irq)
68{
69 const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
70 const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
71
72 return pm8xxx_get_irq_stat(pmic->irq_chip, irq);
73}
74
75static struct pm8xxx_drvdata pm8921_drvdata = {
76 .pmic_readb = pm8921_readb,
77 .pmic_writeb = pm8921_writeb,
78 .pmic_read_buf = pm8921_read_buf,
79 .pmic_write_buf = pm8921_write_buf,
80 .pmic_read_irq_stat = pm8921_read_irq_stat,
81};
82
83static int __devinit pm8921_add_subdevices(const struct pm8921_platform_data
84 *pdata,
85 struct pm8921 *pmic,
86 u32 rev)
87{
88 int ret = 0, irq_base = 0;
89 struct pm_irq_chip *irq_chip;
90
91 if (pdata->irq_pdata) {
92 pdata->irq_pdata->irq_cdata.nirqs = PM8921_NR_IRQS;
93 pdata->irq_pdata->irq_cdata.rev = rev;
94 irq_base = pdata->irq_pdata->irq_base;
95 irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata);
96
97 if (IS_ERR(irq_chip)) {
98 pr_err("Failed to init interrupts ret=%ld\n",
99 PTR_ERR(irq_chip));
100 return PTR_ERR(irq_chip);
101 }
102 pmic->irq_chip = irq_chip;
103 }
104 return ret;
105}
106
107static int __devinit pm8921_probe(struct platform_device *pdev)
108{
109 const struct pm8921_platform_data *pdata = pdev->dev.platform_data;
110 struct pm8921 *pmic;
111 int rc;
112 u8 val;
113 u32 rev;
114
115 if (!pdata) {
116 pr_err("missing platform data\n");
117 return -EINVAL;
118 }
119
120 pmic = kzalloc(sizeof(struct pm8921), GFP_KERNEL);
121 if (!pmic) {
122 pr_err("Cannot alloc pm8921 struct\n");
123 return -ENOMEM;
124 }
125
126 /* Read PMIC chip revision */
127 rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val));
128 if (rc) {
129 pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc);
130 goto err_read_rev;
131 }
132 pr_info("PMIC revision 1: %02X\n", val);
133 rev = val;
134
135 /* Read PMIC chip revision 2 */
136 rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val));
137 if (rc) {
138 pr_err("Failed to read hw rev 2 reg %d:rc=%d\n",
139 REG_HWREV_2, rc);
140 goto err_read_rev;
141 }
142 pr_info("PMIC revision 2: %02X\n", val);
143 rev |= val << BITS_PER_BYTE;
144
145 pmic->dev = &pdev->dev;
146 pm8921_drvdata.pm_chip_data = pmic;
147 platform_set_drvdata(pdev, &pm8921_drvdata);
148
149 rc = pm8921_add_subdevices(pdata, pmic, rev);
150 if (rc) {
151 pr_err("Cannot add subdevices rc=%d\n", rc);
152 goto err;
153 }
154
155 /* gpio might not work if no irq device is found */
156 WARN_ON(pmic->irq_chip == NULL);
157
158 return 0;
159
160err:
161 mfd_remove_devices(pmic->dev);
162 platform_set_drvdata(pdev, NULL);
163err_read_rev:
164 kfree(pmic);
165 return rc;
166}
167
168static int __devexit pm8921_remove(struct platform_device *pdev)
169{
170 struct pm8xxx_drvdata *drvdata;
171 struct pm8921 *pmic = NULL;
172
173 drvdata = platform_get_drvdata(pdev);
174 if (drvdata)
175 pmic = drvdata->pm_chip_data;
176 if (pmic)
177 mfd_remove_devices(pmic->dev);
178 if (pmic->irq_chip) {
179 pm8xxx_irq_exit(pmic->irq_chip);
180 pmic->irq_chip = NULL;
181 }
182 platform_set_drvdata(pdev, NULL);
183 kfree(pmic);
184
185 return 0;
186}
187
188static struct platform_driver pm8921_driver = {
189 .probe = pm8921_probe,
190 .remove = __devexit_p(pm8921_remove),
191 .driver = {
192 .name = "pm8921-core",
193 .owner = THIS_MODULE,
194 },
195};
196
197static int __init pm8921_init(void)
198{
199 return platform_driver_register(&pm8921_driver);
200}
201subsys_initcall(pm8921_init);
202
203static void __exit pm8921_exit(void)
204{
205 platform_driver_unregister(&pm8921_driver);
206}
207module_exit(pm8921_exit);
208
209MODULE_LICENSE("GPL v2");
210MODULE_DESCRIPTION("PMIC 8921 core driver");
211MODULE_VERSION("1.0");
212MODULE_ALIAS("platform:pm8921-core");
diff --git a/drivers/mfd/pm8xxx-irq.c b/drivers/mfd/pm8xxx-irq.c
new file mode 100644
index 000000000000..d452dd013081
--- /dev/null
+++ b/drivers/mfd/pm8xxx-irq.c
@@ -0,0 +1,371 @@
1/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/err.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/kernel.h>
20#include <linux/mfd/pm8xxx/core.h>
21#include <linux/mfd/pm8xxx/irq.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24
25/* PMIC8xxx IRQ */
26
27#define SSBI_REG_ADDR_IRQ_BASE 0x1BB
28
29#define SSBI_REG_ADDR_IRQ_ROOT (SSBI_REG_ADDR_IRQ_BASE + 0)
30#define SSBI_REG_ADDR_IRQ_M_STATUS1 (SSBI_REG_ADDR_IRQ_BASE + 1)
31#define SSBI_REG_ADDR_IRQ_M_STATUS2 (SSBI_REG_ADDR_IRQ_BASE + 2)
32#define SSBI_REG_ADDR_IRQ_M_STATUS3 (SSBI_REG_ADDR_IRQ_BASE + 3)
33#define SSBI_REG_ADDR_IRQ_M_STATUS4 (SSBI_REG_ADDR_IRQ_BASE + 4)
34#define SSBI_REG_ADDR_IRQ_BLK_SEL (SSBI_REG_ADDR_IRQ_BASE + 5)
35#define SSBI_REG_ADDR_IRQ_IT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 6)
36#define SSBI_REG_ADDR_IRQ_CONFIG (SSBI_REG_ADDR_IRQ_BASE + 7)
37#define SSBI_REG_ADDR_IRQ_RT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 8)
38
39#define PM_IRQF_LVL_SEL 0x01 /* level select */
40#define PM_IRQF_MASK_FE 0x02 /* mask falling edge */
41#define PM_IRQF_MASK_RE 0x04 /* mask rising edge */
42#define PM_IRQF_CLR 0x08 /* clear interrupt */
43#define PM_IRQF_BITS_MASK 0x70
44#define PM_IRQF_BITS_SHIFT 4
45#define PM_IRQF_WRITE 0x80
46
47#define PM_IRQF_MASK_ALL (PM_IRQF_MASK_FE | \
48 PM_IRQF_MASK_RE)
49
50struct pm_irq_chip {
51 struct device *dev;
52 spinlock_t pm_irq_lock;
53 unsigned int devirq;
54 unsigned int irq_base;
55 unsigned int num_irqs;
56 unsigned int num_blocks;
57 unsigned int num_masters;
58 u8 config[0];
59};
60
61static int pm8xxx_read_root_irq(const struct pm_irq_chip *chip, u8 *rp)
62{
63 return pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_ROOT, rp);
64}
65
66static int pm8xxx_read_master_irq(const struct pm_irq_chip *chip, u8 m, u8 *bp)
67{
68 return pm8xxx_readb(chip->dev,
69 SSBI_REG_ADDR_IRQ_M_STATUS1 + m, bp);
70}
71
72static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, u8 bp, u8 *ip)
73{
74 int rc;
75
76 spin_lock(&chip->pm_irq_lock);
77 rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
78 if (rc) {
79 pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
80 goto bail;
81 }
82
83 rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_IT_STATUS, ip);
84 if (rc)
85 pr_err("Failed Reading Status rc=%d\n", rc);
86bail:
87 spin_unlock(&chip->pm_irq_lock);
88 return rc;
89}
90
91static int pm8xxx_config_irq(struct pm_irq_chip *chip, u8 bp, u8 cp)
92{
93 int rc;
94
95 spin_lock(&chip->pm_irq_lock);
96 rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
97 if (rc) {
98 pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
99 goto bail;
100 }
101
102 cp |= PM_IRQF_WRITE;
103 rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_CONFIG, cp);
104 if (rc)
105 pr_err("Failed Configuring IRQ rc=%d\n", rc);
106bail:
107 spin_unlock(&chip->pm_irq_lock);
108 return rc;
109}
110
111static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
112{
113 int pmirq, irq, i, ret = 0;
114 u8 bits;
115
116 ret = pm8xxx_read_block_irq(chip, block, &bits);
117 if (ret) {
118 pr_err("Failed reading %d block ret=%d", block, ret);
119 return ret;
120 }
121 if (!bits) {
122 pr_err("block bit set in master but no irqs: %d", block);
123 return 0;
124 }
125
126 /* Check IRQ bits */
127 for (i = 0; i < 8; i++) {
128 if (bits & (1 << i)) {
129 pmirq = block * 8 + i;
130 irq = pmirq + chip->irq_base;
131 generic_handle_irq(irq);
132 }
133 }
134 return 0;
135}
136
137static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master)
138{
139 u8 blockbits;
140 int block_number, i, ret = 0;
141
142 ret = pm8xxx_read_master_irq(chip, master, &blockbits);
143 if (ret) {
144 pr_err("Failed to read master %d ret=%d\n", master, ret);
145 return ret;
146 }
147 if (!blockbits) {
148 pr_err("master bit set in root but no blocks: %d", master);
149 return 0;
150 }
151
152 for (i = 0; i < 8; i++)
153 if (blockbits & (1 << i)) {
154 block_number = master * 8 + i; /* block # */
155 ret |= pm8xxx_irq_block_handler(chip, block_number);
156 }
157 return ret;
158}
159
160static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc)
161{
162 struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
163 struct irq_chip *irq_chip = irq_desc_get_chip(desc);
164 u8 root;
165 int i, ret, masters = 0;
166
167 ret = pm8xxx_read_root_irq(chip, &root);
168 if (ret) {
169 pr_err("Can't read root status ret=%d\n", ret);
170 return;
171 }
172
173 /* on pm8xxx series masters start from bit 1 of the root */
174 masters = root >> 1;
175
176 /* Read allowed masters for blocks. */
177 for (i = 0; i < chip->num_masters; i++)
178 if (masters & (1 << i))
179 pm8xxx_irq_master_handler(chip, i);
180
181 irq_chip->irq_ack(&desc->irq_data);
182}
183
184static void pm8xxx_irq_mask_ack(struct irq_data *d)
185{
186 struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
187 unsigned int pmirq = d->irq - chip->irq_base;
188 int master, irq_bit;
189 u8 block, config;
190
191 block = pmirq / 8;
192 master = block / 8;
193 irq_bit = pmirq % 8;
194
195 config = chip->config[pmirq] | PM_IRQF_MASK_ALL | PM_IRQF_CLR;
196 pm8xxx_config_irq(chip, block, config);
197}
198
199static void pm8xxx_irq_unmask(struct irq_data *d)
200{
201 struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
202 unsigned int pmirq = d->irq - chip->irq_base;
203 int master, irq_bit;
204 u8 block, config;
205
206 block = pmirq / 8;
207 master = block / 8;
208 irq_bit = pmirq % 8;
209
210 config = chip->config[pmirq];
211 pm8xxx_config_irq(chip, block, config);
212}
213
214static int pm8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
215{
216 struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
217 unsigned int pmirq = d->irq - chip->irq_base;
218 int master, irq_bit;
219 u8 block, config;
220
221 block = pmirq / 8;
222 master = block / 8;
223 irq_bit = pmirq % 8;
224
225 chip->config[pmirq] = (irq_bit << PM_IRQF_BITS_SHIFT)
226 | PM_IRQF_MASK_ALL;
227 if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
228 if (flow_type & IRQF_TRIGGER_RISING)
229 chip->config[pmirq] &= ~PM_IRQF_MASK_RE;
230 if (flow_type & IRQF_TRIGGER_FALLING)
231 chip->config[pmirq] &= ~PM_IRQF_MASK_FE;
232 } else {
233 chip->config[pmirq] |= PM_IRQF_LVL_SEL;
234
235 if (flow_type & IRQF_TRIGGER_HIGH)
236 chip->config[pmirq] &= ~PM_IRQF_MASK_RE;
237 else
238 chip->config[pmirq] &= ~PM_IRQF_MASK_FE;
239 }
240
241 config = chip->config[pmirq] | PM_IRQF_CLR;
242 return pm8xxx_config_irq(chip, block, config);
243}
244
245static int pm8xxx_irq_set_wake(struct irq_data *d, unsigned int on)
246{
247 return 0;
248}
249
250static struct irq_chip pm8xxx_irq_chip = {
251 .name = "pm8xxx",
252 .irq_mask_ack = pm8xxx_irq_mask_ack,
253 .irq_unmask = pm8xxx_irq_unmask,
254 .irq_set_type = pm8xxx_irq_set_type,
255 .irq_set_wake = pm8xxx_irq_set_wake,
256 .flags = IRQCHIP_MASK_ON_SUSPEND,
257};
258
259/**
260 * pm8xxx_get_irq_stat - get the status of the irq line
261 * @chip: pointer to identify a pmic irq controller
262 * @irq: the irq number
263 *
264 * The pm8xxx gpio and mpp rely on the interrupt block to read
265 * the values on their pins. This function is to facilitate reading
266 * the status of a gpio or an mpp line. The caller has to convert the
267 * gpio number to irq number.
268 *
269 * RETURNS:
270 * an int indicating the value read on that line
271 */
272int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq)
273{
274 int pmirq, rc;
275 u8 block, bits, bit;
276 unsigned long flags;
277
278 if (chip == NULL || irq < chip->irq_base ||
279 irq >= chip->irq_base + chip->num_irqs)
280 return -EINVAL;
281
282 pmirq = irq - chip->irq_base;
283
284 block = pmirq / 8;
285 bit = pmirq % 8;
286
287 spin_lock_irqsave(&chip->pm_irq_lock, flags);
288
289 rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
290 if (rc) {
291 pr_err("Failed Selecting block irq=%d pmirq=%d blk=%d rc=%d\n",
292 irq, pmirq, block, rc);
293 goto bail_out;
294 }
295
296 rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits);
297 if (rc) {
298 pr_err("Failed Configuring irq=%d pmirq=%d blk=%d rc=%d\n",
299 irq, pmirq, block, rc);
300 goto bail_out;
301 }
302
303 rc = (bits & (1 << bit)) ? 1 : 0;
304
305bail_out:
306 spin_unlock_irqrestore(&chip->pm_irq_lock, flags);
307
308 return rc;
309}
310EXPORT_SYMBOL_GPL(pm8xxx_get_irq_stat);
311
312struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev,
313 const struct pm8xxx_irq_platform_data *pdata)
314{
315 struct pm_irq_chip *chip;
316 int devirq, rc;
317 unsigned int pmirq;
318
319 if (!pdata) {
320 pr_err("No platform data\n");
321 return ERR_PTR(-EINVAL);
322 }
323
324 devirq = pdata->devirq;
325 if (devirq < 0) {
326 pr_err("missing devirq\n");
327 rc = devirq;
328 return ERR_PTR(-EINVAL);
329 }
330
331 chip = kzalloc(sizeof(struct pm_irq_chip)
332 + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL);
333 if (!chip) {
334 pr_err("Cannot alloc pm_irq_chip struct\n");
335 return ERR_PTR(-EINVAL);
336 }
337
338 chip->dev = dev;
339 chip->devirq = devirq;
340 chip->irq_base = pdata->irq_base;
341 chip->num_irqs = pdata->irq_cdata.nirqs;
342 chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8);
343 chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8);
344 spin_lock_init(&chip->pm_irq_lock);
345
346 for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) {
347 irq_set_chip_and_handler(chip->irq_base + pmirq,
348 &pm8xxx_irq_chip,
349 handle_level_irq);
350 irq_set_chip_data(chip->irq_base + pmirq, chip);
351#ifdef CONFIG_ARM
352 set_irq_flags(chip->irq_base + pmirq, IRQF_VALID);
353#else
354 irq_set_noprobe(chip->irq_base + pmirq);
355#endif
356 }
357
358 irq_set_irq_type(devirq, pdata->irq_trigger_flag);
359 irq_set_handler_data(devirq, chip);
360 irq_set_chained_handler(devirq, pm8xxx_irq_handler);
361 set_irq_wake(devirq, 1);
362
363 return chip;
364}
365
366int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip)
367{
368 irq_set_chained_handler(chip->devirq, NULL);
369 kfree(chip);
370 return 0;
371}
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 10dbe6374a89..809bd4a61089 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -61,12 +61,14 @@ static struct mfd_cell rdc321x_sb_cells[] = {
61 .name = "rdc321x-wdt", 61 .name = "rdc321x-wdt",
62 .resources = rdc321x_wdt_resource, 62 .resources = rdc321x_wdt_resource,
63 .num_resources = ARRAY_SIZE(rdc321x_wdt_resource), 63 .num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
64 .mfd_data = &rdc321x_wdt_pdata, 64 .platform_data = &rdc321x_wdt_pdata,
65 .pdata_size = sizeof(rdc321x_wdt_pdata),
65 }, { 66 }, {
66 .name = "rdc321x-gpio", 67 .name = "rdc321x-gpio",
67 .resources = rdc321x_gpio_resources, 68 .resources = rdc321x_gpio_resources,
68 .num_resources = ARRAY_SIZE(rdc321x_gpio_resources), 69 .num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
69 .mfd_data = &rdc321x_gpio_pdata, 70 .platform_data = &rdc321x_gpio_pdata,
71 .pdata_size = sizeof(rdc321x_gpio_pdata),
70 }, 72 },
71}; 73};
72 74
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 42830e692964..91ad21ef7721 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -170,7 +170,8 @@ static struct mfd_cell t7l66xb_cells[] = {
170 .name = "tmio-mmc", 170 .name = "tmio-mmc",
171 .enable = t7l66xb_mmc_enable, 171 .enable = t7l66xb_mmc_enable,
172 .disable = t7l66xb_mmc_disable, 172 .disable = t7l66xb_mmc_disable,
173 .mfd_data = &t7166xb_mmc_data, 173 .platform_data = &t7166xb_mmc_data,
174 .pdata_size = sizeof(t7166xb_mmc_data),
174 .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources), 175 .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
175 .resources = t7l66xb_mmc_resources, 176 .resources = t7l66xb_mmc_resources,
176 }, 177 },
@@ -382,7 +383,8 @@ static int t7l66xb_probe(struct platform_device *dev)
382 383
383 t7l66xb_attach_irq(dev); 384 t7l66xb_attach_irq(dev);
384 385
385 t7l66xb_cells[T7L66XB_CELL_NAND].mfd_data = pdata->nand_data; 386 t7l66xb_cells[T7L66XB_CELL_NAND].platform_data = pdata->nand_data;
387 t7l66xb_cells[T7L66XB_CELL_NAND].pdata_size = sizeof(*pdata->nand_data);
386 388
387 ret = mfd_add_devices(&dev->dev, dev->id, 389 ret = mfd_add_devices(&dev->dev, dev->id,
388 t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells), 390 t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells),
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index b006f7cee952..ad715bf49cac 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -131,7 +131,8 @@ static struct mfd_cell tc6387xb_cells[] = {
131 .name = "tmio-mmc", 131 .name = "tmio-mmc",
132 .enable = tc6387xb_mmc_enable, 132 .enable = tc6387xb_mmc_enable,
133 .disable = tc6387xb_mmc_disable, 133 .disable = tc6387xb_mmc_disable,
134 .mfd_data = &tc6387xb_mmc_data, 134 .platform_data = &tc6387xb_mmc_data,
135 .pdata_size = sizeof(tc6387xb_mmc_data),
135 .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources), 136 .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
136 .resources = tc6387xb_mmc_resources, 137 .resources = tc6387xb_mmc_resources,
137 }, 138 },
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index fc53ce287601..9612264f0e6d 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -393,7 +393,8 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
393 .name = "tmio-mmc", 393 .name = "tmio-mmc",
394 .enable = tc6393xb_mmc_enable, 394 .enable = tc6393xb_mmc_enable,
395 .resume = tc6393xb_mmc_resume, 395 .resume = tc6393xb_mmc_resume,
396 .mfd_data = &tc6393xb_mmc_data, 396 .platform_data = &tc6393xb_mmc_data,
397 .pdata_size = sizeof(tc6393xb_mmc_data),
397 .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources), 398 .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
398 .resources = tc6393xb_mmc_resources, 399 .resources = tc6393xb_mmc_resources,
399 }, 400 },
@@ -692,8 +693,11 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
692 goto err_setup; 693 goto err_setup;
693 } 694 }
694 695
695 tc6393xb_cells[TC6393XB_CELL_NAND].mfd_data = tcpd->nand_data; 696 tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data;
696 tc6393xb_cells[TC6393XB_CELL_FB].mfd_data = tcpd->fb_data; 697 tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size =
698 sizeof(*tcpd->nand_data);
699 tc6393xb_cells[TC6393XB_CELL_FB].platform_data = tcpd->fb_data;
700 tc6393xb_cells[TC6393XB_CELL_FB].pdata_size = sizeof(*tcpd->fb_data);
697 701
698 ret = mfd_add_devices(&dev->dev, dev->id, 702 ret = mfd_add_devices(&dev->dev, dev->id,
699 tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), 703 tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 94c6c8afad12..69272e4e3459 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -384,7 +384,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
384 .name = "timb-dma", 384 .name = "timb-dma",
385 .num_resources = ARRAY_SIZE(timberdale_dma_resources), 385 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
386 .resources = timberdale_dma_resources, 386 .resources = timberdale_dma_resources,
387 .mfd_data = &timb_dma_platform_data, 387 .platform_data = &timb_dma_platform_data,
388 .pdata_size = sizeof(timb_dma_platform_data),
388 }, 389 },
389 { 390 {
390 .name = "timb-uart", 391 .name = "timb-uart",
@@ -395,37 +396,43 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
395 .name = "xiic-i2c", 396 .name = "xiic-i2c",
396 .num_resources = ARRAY_SIZE(timberdale_xiic_resources), 397 .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
397 .resources = timberdale_xiic_resources, 398 .resources = timberdale_xiic_resources,
398 .mfd_data = &timberdale_xiic_platform_data, 399 .platform_data = &timberdale_xiic_platform_data,
400 .pdata_size = sizeof(timberdale_xiic_platform_data),
399 }, 401 },
400 { 402 {
401 .name = "timb-gpio", 403 .name = "timb-gpio",
402 .num_resources = ARRAY_SIZE(timberdale_gpio_resources), 404 .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
403 .resources = timberdale_gpio_resources, 405 .resources = timberdale_gpio_resources,
404 .mfd_data = &timberdale_gpio_platform_data, 406 .platform_data = &timberdale_gpio_platform_data,
407 .pdata_size = sizeof(timberdale_gpio_platform_data),
405 }, 408 },
406 { 409 {
407 .name = "timb-video", 410 .name = "timb-video",
408 .num_resources = ARRAY_SIZE(timberdale_video_resources), 411 .num_resources = ARRAY_SIZE(timberdale_video_resources),
409 .resources = timberdale_video_resources, 412 .resources = timberdale_video_resources,
410 .mfd_data = &timberdale_video_platform_data, 413 .platform_data = &timberdale_video_platform_data,
414 .pdata_size = sizeof(timberdale_video_platform_data),
411 }, 415 },
412 { 416 {
413 .name = "timb-radio", 417 .name = "timb-radio",
414 .num_resources = ARRAY_SIZE(timberdale_radio_resources), 418 .num_resources = ARRAY_SIZE(timberdale_radio_resources),
415 .resources = timberdale_radio_resources, 419 .resources = timberdale_radio_resources,
416 .mfd_data = &timberdale_radio_platform_data, 420 .platform_data = &timberdale_radio_platform_data,
421 .pdata_size = sizeof(timberdale_radio_platform_data),
417 }, 422 },
418 { 423 {
419 .name = "xilinx_spi", 424 .name = "xilinx_spi",
420 .num_resources = ARRAY_SIZE(timberdale_spi_resources), 425 .num_resources = ARRAY_SIZE(timberdale_spi_resources),
421 .resources = timberdale_spi_resources, 426 .resources = timberdale_spi_resources,
422 .mfd_data = &timberdale_xspi_platform_data, 427 .platform_data = &timberdale_xspi_platform_data,
428 .pdata_size = sizeof(timberdale_xspi_platform_data),
423 }, 429 },
424 { 430 {
425 .name = "ks8842", 431 .name = "ks8842",
426 .num_resources = ARRAY_SIZE(timberdale_eth_resources), 432 .num_resources = ARRAY_SIZE(timberdale_eth_resources),
427 .resources = timberdale_eth_resources, 433 .resources = timberdale_eth_resources,
428 .mfd_data = &timberdale_ks8842_platform_data, 434 .platform_data = &timberdale_ks8842_platform_data,
435 .pdata_size = sizeof(timberdale_ks8842_platform_data),
429 }, 436 },
430}; 437};
431 438
@@ -434,7 +441,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
434 .name = "timb-dma", 441 .name = "timb-dma",
435 .num_resources = ARRAY_SIZE(timberdale_dma_resources), 442 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
436 .resources = timberdale_dma_resources, 443 .resources = timberdale_dma_resources,
437 .mfd_data = &timb_dma_platform_data, 444 .platform_data = &timb_dma_platform_data,
445 .pdata_size = sizeof(timb_dma_platform_data),
438 }, 446 },
439 { 447 {
440 .name = "timb-uart", 448 .name = "timb-uart",
@@ -450,13 +458,15 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
450 .name = "xiic-i2c", 458 .name = "xiic-i2c",
451 .num_resources = ARRAY_SIZE(timberdale_xiic_resources), 459 .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
452 .resources = timberdale_xiic_resources, 460 .resources = timberdale_xiic_resources,
453 .mfd_data = &timberdale_xiic_platform_data, 461 .platform_data = &timberdale_xiic_platform_data,
462 .pdata_size = sizeof(timberdale_xiic_platform_data),
454 }, 463 },
455 { 464 {
456 .name = "timb-gpio", 465 .name = "timb-gpio",
457 .num_resources = ARRAY_SIZE(timberdale_gpio_resources), 466 .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
458 .resources = timberdale_gpio_resources, 467 .resources = timberdale_gpio_resources,
459 .mfd_data = &timberdale_gpio_platform_data, 468 .platform_data = &timberdale_gpio_platform_data,
469 .pdata_size = sizeof(timberdale_gpio_platform_data),
460 }, 470 },
461 { 471 {
462 .name = "timb-mlogicore", 472 .name = "timb-mlogicore",
@@ -467,25 +477,29 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
467 .name = "timb-video", 477 .name = "timb-video",
468 .num_resources = ARRAY_SIZE(timberdale_video_resources), 478 .num_resources = ARRAY_SIZE(timberdale_video_resources),
469 .resources = timberdale_video_resources, 479 .resources = timberdale_video_resources,
470 .mfd_data = &timberdale_video_platform_data, 480 .platform_data = &timberdale_video_platform_data,
481 .pdata_size = sizeof(timberdale_video_platform_data),
471 }, 482 },
472 { 483 {
473 .name = "timb-radio", 484 .name = "timb-radio",
474 .num_resources = ARRAY_SIZE(timberdale_radio_resources), 485 .num_resources = ARRAY_SIZE(timberdale_radio_resources),
475 .resources = timberdale_radio_resources, 486 .resources = timberdale_radio_resources,
476 .mfd_data = &timberdale_radio_platform_data, 487 .platform_data = &timberdale_radio_platform_data,
488 .pdata_size = sizeof(timberdale_radio_platform_data),
477 }, 489 },
478 { 490 {
479 .name = "xilinx_spi", 491 .name = "xilinx_spi",
480 .num_resources = ARRAY_SIZE(timberdale_spi_resources), 492 .num_resources = ARRAY_SIZE(timberdale_spi_resources),
481 .resources = timberdale_spi_resources, 493 .resources = timberdale_spi_resources,
482 .mfd_data = &timberdale_xspi_platform_data, 494 .platform_data = &timberdale_xspi_platform_data,
495 .pdata_size = sizeof(timberdale_xspi_platform_data),
483 }, 496 },
484 { 497 {
485 .name = "ks8842", 498 .name = "ks8842",
486 .num_resources = ARRAY_SIZE(timberdale_eth_resources), 499 .num_resources = ARRAY_SIZE(timberdale_eth_resources),
487 .resources = timberdale_eth_resources, 500 .resources = timberdale_eth_resources,
488 .mfd_data = &timberdale_ks8842_platform_data, 501 .platform_data = &timberdale_ks8842_platform_data,
502 .pdata_size = sizeof(timberdale_ks8842_platform_data),
489 }, 503 },
490}; 504};
491 505
@@ -494,7 +508,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
494 .name = "timb-dma", 508 .name = "timb-dma",
495 .num_resources = ARRAY_SIZE(timberdale_dma_resources), 509 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
496 .resources = timberdale_dma_resources, 510 .resources = timberdale_dma_resources,
497 .mfd_data = &timb_dma_platform_data, 511 .platform_data = &timb_dma_platform_data,
512 .pdata_size = sizeof(timb_dma_platform_data),
498 }, 513 },
499 { 514 {
500 .name = "timb-uart", 515 .name = "timb-uart",
@@ -505,31 +520,36 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
505 .name = "xiic-i2c", 520 .name = "xiic-i2c",
506 .num_resources = ARRAY_SIZE(timberdale_xiic_resources), 521 .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
507 .resources = timberdale_xiic_resources, 522 .resources = timberdale_xiic_resources,
508 .mfd_data = &timberdale_xiic_platform_data, 523 .platform_data = &timberdale_xiic_platform_data,
524 .pdata_size = sizeof(timberdale_xiic_platform_data),
509 }, 525 },
510 { 526 {
511 .name = "timb-gpio", 527 .name = "timb-gpio",
512 .num_resources = ARRAY_SIZE(timberdale_gpio_resources), 528 .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
513 .resources = timberdale_gpio_resources, 529 .resources = timberdale_gpio_resources,
514 .mfd_data = &timberdale_gpio_platform_data, 530 .platform_data = &timberdale_gpio_platform_data,
531 .pdata_size = sizeof(timberdale_gpio_platform_data),
515 }, 532 },
516 { 533 {
517 .name = "timb-video", 534 .name = "timb-video",
518 .num_resources = ARRAY_SIZE(timberdale_video_resources), 535 .num_resources = ARRAY_SIZE(timberdale_video_resources),
519 .resources = timberdale_video_resources, 536 .resources = timberdale_video_resources,
520 .mfd_data = &timberdale_video_platform_data, 537 .platform_data = &timberdale_video_platform_data,
538 .pdata_size = sizeof(timberdale_video_platform_data),
521 }, 539 },
522 { 540 {
523 .name = "timb-radio", 541 .name = "timb-radio",
524 .num_resources = ARRAY_SIZE(timberdale_radio_resources), 542 .num_resources = ARRAY_SIZE(timberdale_radio_resources),
525 .resources = timberdale_radio_resources, 543 .resources = timberdale_radio_resources,
526 .mfd_data = &timberdale_radio_platform_data, 544 .platform_data = &timberdale_radio_platform_data,
545 .pdata_size = sizeof(timberdale_radio_platform_data),
527 }, 546 },
528 { 547 {
529 .name = "xilinx_spi", 548 .name = "xilinx_spi",
530 .num_resources = ARRAY_SIZE(timberdale_spi_resources), 549 .num_resources = ARRAY_SIZE(timberdale_spi_resources),
531 .resources = timberdale_spi_resources, 550 .resources = timberdale_spi_resources,
532 .mfd_data = &timberdale_xspi_platform_data, 551 .platform_data = &timberdale_xspi_platform_data,
552 .pdata_size = sizeof(timberdale_xspi_platform_data),
533 }, 553 },
534}; 554};
535 555
@@ -538,7 +558,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
538 .name = "timb-dma", 558 .name = "timb-dma",
539 .num_resources = ARRAY_SIZE(timberdale_dma_resources), 559 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
540 .resources = timberdale_dma_resources, 560 .resources = timberdale_dma_resources,
541 .mfd_data = &timb_dma_platform_data, 561 .platform_data = &timb_dma_platform_data,
562 .pdata_size = sizeof(timb_dma_platform_data),
542 }, 563 },
543 { 564 {
544 .name = "timb-uart", 565 .name = "timb-uart",
@@ -549,37 +570,43 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
549 .name = "ocores-i2c", 570 .name = "ocores-i2c",
550 .num_resources = ARRAY_SIZE(timberdale_ocores_resources), 571 .num_resources = ARRAY_SIZE(timberdale_ocores_resources),
551 .resources = timberdale_ocores_resources, 572 .resources = timberdale_ocores_resources,
552 .mfd_data = &timberdale_ocores_platform_data, 573 .platform_data = &timberdale_ocores_platform_data,
574 .pdata_size = sizeof(timberdale_ocores_platform_data),
553 }, 575 },
554 { 576 {
555 .name = "timb-gpio", 577 .name = "timb-gpio",
556 .num_resources = ARRAY_SIZE(timberdale_gpio_resources), 578 .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
557 .resources = timberdale_gpio_resources, 579 .resources = timberdale_gpio_resources,
558 .mfd_data = &timberdale_gpio_platform_data, 580 .platform_data = &timberdale_gpio_platform_data,
581 .pdata_size = sizeof(timberdale_gpio_platform_data),
559 }, 582 },
560 { 583 {
561 .name = "timb-video", 584 .name = "timb-video",
562 .num_resources = ARRAY_SIZE(timberdale_video_resources), 585 .num_resources = ARRAY_SIZE(timberdale_video_resources),
563 .resources = timberdale_video_resources, 586 .resources = timberdale_video_resources,
564 .mfd_data = &timberdale_video_platform_data, 587 .platform_data = &timberdale_video_platform_data,
588 .pdata_size = sizeof(timberdale_video_platform_data),
565 }, 589 },
566 { 590 {
567 .name = "timb-radio", 591 .name = "timb-radio",
568 .num_resources = ARRAY_SIZE(timberdale_radio_resources), 592 .num_resources = ARRAY_SIZE(timberdale_radio_resources),
569 .resources = timberdale_radio_resources, 593 .resources = timberdale_radio_resources,
570 .mfd_data = &timberdale_radio_platform_data, 594 .platform_data = &timberdale_radio_platform_data,
595 .pdata_size = sizeof(timberdale_radio_platform_data),
571 }, 596 },
572 { 597 {
573 .name = "xilinx_spi", 598 .name = "xilinx_spi",
574 .num_resources = ARRAY_SIZE(timberdale_spi_resources), 599 .num_resources = ARRAY_SIZE(timberdale_spi_resources),
575 .resources = timberdale_spi_resources, 600 .resources = timberdale_spi_resources,
576 .mfd_data = &timberdale_xspi_platform_data, 601 .platform_data = &timberdale_xspi_platform_data,
602 .pdata_size = sizeof(timberdale_xspi_platform_data),
577 }, 603 },
578 { 604 {
579 .name = "ks8842", 605 .name = "ks8842",
580 .num_resources = ARRAY_SIZE(timberdale_eth_resources), 606 .num_resources = ARRAY_SIZE(timberdale_eth_resources),
581 .resources = timberdale_eth_resources, 607 .resources = timberdale_eth_resources,
582 .mfd_data = &timberdale_ks8842_platform_data, 608 .platform_data = &timberdale_ks8842_platform_data,
609 .pdata_size = sizeof(timberdale_ks8842_platform_data),
583 }, 610 },
584}; 611};
585 612
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 46d8205646b6..a293b978e27c 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -183,7 +183,8 @@ static int __devinit tps6105x_probe(struct i2c_client *client,
183 /* Set up and register the platform devices. */ 183 /* Set up and register the platform devices. */
184 for (i = 0; i < ARRAY_SIZE(tps6105x_cells); i++) { 184 for (i = 0; i < ARRAY_SIZE(tps6105x_cells); i++) {
185 /* One state holder for all drivers, this is simple */ 185 /* One state holder for all drivers, this is simple */
186 tps6105x_cells[i].mfd_data = tps6105x; 186 tps6105x_cells[i].platform_data = tps6105x;
187 tps6105x_cells[i].pdata_size = sizeof(*tps6105x);
187 } 188 }
188 189
189 ret = mfd_add_devices(&client->dev, 0, tps6105x_cells, 190 ret = mfd_add_devices(&client->dev, 0, tps6105x_cells,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b600808690c1..bba26d96c240 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -270,8 +270,8 @@ static void tps6586x_gpio_set(struct gpio_chip *chip, unsigned offset,
270{ 270{
271 struct tps6586x *tps6586x = container_of(chip, struct tps6586x, gpio); 271 struct tps6586x *tps6586x = container_of(chip, struct tps6586x, gpio);
272 272
273 __tps6586x_write(tps6586x->client, TPS6586X_GPIOSET2, 273 tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET2,
274 value << offset); 274 value << offset, 1 << offset);
275} 275}
276 276
277static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset, 277static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
new file mode 100644
index 000000000000..2bfad5c86cc7
--- /dev/null
+++ b/drivers/mfd/tps65910-irq.c
@@ -0,0 +1,218 @@
1/*
2 * tps65910-irq.c -- TI TPS6591x
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/bug.h>
20#include <linux/device.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/gpio.h>
24#include <linux/mfd/tps65910.h>
25
26static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
27 int irq)
28{
29 return (irq - tps65910->irq_base);
30}
31
32/*
33 * This is a threaded IRQ handler so can access I2C/SPI. Since all
34 * interrupts are clear on read the IRQ line will be reasserted and
35 * the physical IRQ will be handled again if another interrupt is
36 * asserted while we run - in the normal course of events this is a
37 * rare occurrence so we save I2C/SPI reads. We're also assuming that
38 * it's rare to get lots of interrupts firing simultaneously so try to
39 * minimise I/O.
40 */
41static irqreturn_t tps65910_irq(int irq, void *irq_data)
42{
43 struct tps65910 *tps65910 = irq_data;
44 u32 irq_sts;
45 u32 irq_mask;
46 u8 reg;
47 int i;
48
49 tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
50 irq_sts = reg;
51 tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
52 irq_sts |= reg << 8;
53 switch (tps65910_chip_id(tps65910)) {
54 case TPS65911:
55 tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
56 irq_sts |= reg << 16;
57 }
58
59 tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
60 irq_mask = reg;
61 tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
62 irq_mask |= reg << 8;
63 switch (tps65910_chip_id(tps65910)) {
64 case TPS65911:
65 tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
66 irq_mask |= reg << 16;
67 }
68
69 irq_sts &= ~irq_mask;
70
71 if (!irq_sts)
72 return IRQ_NONE;
73
74 for (i = 0; i < tps65910->irq_num; i++) {
75
76 if (!(irq_sts & (1 << i)))
77 continue;
78
79 handle_nested_irq(tps65910->irq_base + i);
80 }
81
82 /* Write the STS register back to clear IRQs we handled */
83 reg = irq_sts & 0xFF;
84 irq_sts >>= 8;
85 tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
86 reg = irq_sts & 0xFF;
87 tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
88 switch (tps65910_chip_id(tps65910)) {
89 case TPS65911:
90 reg = irq_sts >> 8;
91 tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
92 }
93
94 return IRQ_HANDLED;
95}
96
97static void tps65910_irq_lock(struct irq_data *data)
98{
99 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
100
101 mutex_lock(&tps65910->irq_lock);
102}
103
104static void tps65910_irq_sync_unlock(struct irq_data *data)
105{
106 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
107 u32 reg_mask;
108 u8 reg;
109
110 tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
111 reg_mask = reg;
112 tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
113 reg_mask |= reg << 8;
114 switch (tps65910_chip_id(tps65910)) {
115 case TPS65911:
116 tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
117 reg_mask |= reg << 16;
118 }
119
120 if (tps65910->irq_mask != reg_mask) {
121 reg = tps65910->irq_mask & 0xFF;
122 tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
123 reg = tps65910->irq_mask >> 8 & 0xFF;
124 tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
125 switch (tps65910_chip_id(tps65910)) {
126 case TPS65911:
127 reg = tps65910->irq_mask >> 16;
128 tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
129 }
130 }
131 mutex_unlock(&tps65910->irq_lock);
132}
133
134static void tps65910_irq_enable(struct irq_data *data)
135{
136 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
137
138 tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
139}
140
141static void tps65910_irq_disable(struct irq_data *data)
142{
143 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
144
145 tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
146}
147
148static struct irq_chip tps65910_irq_chip = {
149 .name = "tps65910",
150 .irq_bus_lock = tps65910_irq_lock,
151 .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
152 .irq_disable = tps65910_irq_disable,
153 .irq_enable = tps65910_irq_enable,
154};
155
156int tps65910_irq_init(struct tps65910 *tps65910, int irq,
157 struct tps65910_platform_data *pdata)
158{
159 int ret, cur_irq;
160 int flags = IRQF_ONESHOT;
161
162 if (!irq) {
163 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
164 return -EINVAL;
165 }
166
167 if (!pdata || !pdata->irq_base) {
168 dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
169 return -EINVAL;
170 }
171
172 tps65910->irq_mask = 0xFFFFFF;
173
174 mutex_init(&tps65910->irq_lock);
175 tps65910->chip_irq = irq;
176 tps65910->irq_base = pdata->irq_base;
177
178 switch (tps65910_chip_id(tps65910)) {
179 case TPS65910:
180 tps65910->irq_num = TPS65910_NUM_IRQ;
181 case TPS65911:
182 tps65910->irq_num = TPS65911_NUM_IRQ;
183 }
184
185 /* Register with genirq */
186 for (cur_irq = tps65910->irq_base;
187 cur_irq < tps65910->irq_num + tps65910->irq_base;
188 cur_irq++) {
189 irq_set_chip_data(cur_irq, tps65910);
190 irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
191 handle_edge_irq);
192 irq_set_nested_thread(cur_irq, 1);
193
194 /* ARM needs us to explicitly flag the IRQ as valid
195 * and will set them noprobe when we do so. */
196#ifdef CONFIG_ARM
197 set_irq_flags(cur_irq, IRQF_VALID);
198#else
199 irq_set_noprobe(cur_irq);
200#endif
201 }
202
203 ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
204 "tps65910", tps65910);
205
206 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
207
208 if (ret != 0)
209 dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
210
211 return ret;
212}
213
214int tps65910_irq_exit(struct tps65910 *tps65910)
215{
216 free_irq(tps65910->chip_irq, tps65910);
217 return 0;
218}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
new file mode 100644
index 000000000000..2229e66d80db
--- /dev/null
+++ b/drivers/mfd/tps65910.c
@@ -0,0 +1,229 @@
1/*
2 * tps65910.c -- TI TPS6591x
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/i2c.h>
21#include <linux/gpio.h>
22#include <linux/mfd/core.h>
23#include <linux/mfd/tps65910.h>
24
25static struct mfd_cell tps65910s[] = {
26 {
27 .name = "tps65910-pmic",
28 },
29 {
30 .name = "tps65910-rtc",
31 },
32 {
33 .name = "tps65910-power",
34 },
35};
36
37
38static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
39 int bytes, void *dest)
40{
41 struct i2c_client *i2c = tps65910->i2c_client;
42 struct i2c_msg xfer[2];
43 int ret;
44
45 /* Write register */
46 xfer[0].addr = i2c->addr;
47 xfer[0].flags = 0;
48 xfer[0].len = 1;
49 xfer[0].buf = &reg;
50
51 /* Read data */
52 xfer[1].addr = i2c->addr;
53 xfer[1].flags = I2C_M_RD;
54 xfer[1].len = bytes;
55 xfer[1].buf = dest;
56
57 ret = i2c_transfer(i2c->adapter, xfer, 2);
58 if (ret == 2)
59 ret = 0;
60 else if (ret >= 0)
61 ret = -EIO;
62
63 return ret;
64}
65
66static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
67 int bytes, void *src)
68{
69 struct i2c_client *i2c = tps65910->i2c_client;
70 /* we add 1 byte for device register */
71 u8 msg[TPS65910_MAX_REGISTER + 1];
72 int ret;
73
74 if (bytes > TPS65910_MAX_REGISTER)
75 return -EINVAL;
76
77 msg[0] = reg;
78 memcpy(&msg[1], src, bytes);
79
80 ret = i2c_master_send(i2c, msg, bytes + 1);
81 if (ret < 0)
82 return ret;
83 if (ret != bytes + 1)
84 return -EIO;
85 return 0;
86}
87
88int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
89{
90 u8 data;
91 int err;
92
93 mutex_lock(&tps65910->io_mutex);
94 err = tps65910_i2c_read(tps65910, reg, 1, &data);
95 if (err) {
96 dev_err(tps65910->dev, "read from reg %x failed\n", reg);
97 goto out;
98 }
99
100 data |= mask;
101 err = tps65910_i2c_write(tps65910, reg, 1, &data);
102 if (err)
103 dev_err(tps65910->dev, "write to reg %x failed\n", reg);
104
105out:
106 mutex_unlock(&tps65910->io_mutex);
107 return err;
108}
109EXPORT_SYMBOL_GPL(tps65910_set_bits);
110
111int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
112{
113 u8 data;
114 int err;
115
116 mutex_lock(&tps65910->io_mutex);
117 err = tps65910_i2c_read(tps65910, reg, 1, &data);
118 if (err) {
119 dev_err(tps65910->dev, "read from reg %x failed\n", reg);
120 goto out;
121 }
122
123 data &= mask;
124 err = tps65910_i2c_write(tps65910, reg, 1, &data);
125 if (err)
126 dev_err(tps65910->dev, "write to reg %x failed\n", reg);
127
128out:
129 mutex_unlock(&tps65910->io_mutex);
130 return err;
131}
132EXPORT_SYMBOL_GPL(tps65910_clear_bits);
133
134static int tps65910_i2c_probe(struct i2c_client *i2c,
135 const struct i2c_device_id *id)
136{
137 struct tps65910 *tps65910;
138 struct tps65910_board *pmic_plat_data;
139 struct tps65910_platform_data *init_data;
140 int ret = 0;
141
142 pmic_plat_data = dev_get_platdata(&i2c->dev);
143 if (!pmic_plat_data)
144 return -EINVAL;
145
146 init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
147 if (init_data == NULL)
148 return -ENOMEM;
149
150 init_data->irq = pmic_plat_data->irq;
151 init_data->irq_base = pmic_plat_data->irq;
152
153 tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
154 if (tps65910 == NULL)
155 return -ENOMEM;
156
157 i2c_set_clientdata(i2c, tps65910);
158 tps65910->dev = &i2c->dev;
159 tps65910->i2c_client = i2c;
160 tps65910->id = id->driver_data;
161 tps65910->read = tps65910_i2c_read;
162 tps65910->write = tps65910_i2c_write;
163 mutex_init(&tps65910->io_mutex);
164
165 ret = mfd_add_devices(tps65910->dev, -1,
166 tps65910s, ARRAY_SIZE(tps65910s),
167 NULL, 0);
168 if (ret < 0)
169 goto err;
170
171 tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
172
173 ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
174 if (ret < 0)
175 goto err;
176
177 return ret;
178
179err:
180 mfd_remove_devices(tps65910->dev);
181 kfree(tps65910);
182 return ret;
183}
184
185static int tps65910_i2c_remove(struct i2c_client *i2c)
186{
187 struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
188
189 mfd_remove_devices(tps65910->dev);
190 kfree(tps65910);
191
192 return 0;
193}
194
195static const struct i2c_device_id tps65910_i2c_id[] = {
196 { "tps65910", TPS65910 },
197 { "tps65911", TPS65911 },
198 { }
199};
200MODULE_DEVICE_TABLE(i2c, tps65910_i2c_id);
201
202
203static struct i2c_driver tps65910_i2c_driver = {
204 .driver = {
205 .name = "tps65910",
206 .owner = THIS_MODULE,
207 },
208 .probe = tps65910_i2c_probe,
209 .remove = tps65910_i2c_remove,
210 .id_table = tps65910_i2c_id,
211};
212
213static int __init tps65910_i2c_init(void)
214{
215 return i2c_add_driver(&tps65910_i2c_driver);
216}
217/* init early so consumer devices can complete system boot */
218subsys_initcall(tps65910_i2c_init);
219
220static void __exit tps65910_i2c_exit(void)
221{
222 i2c_del_driver(&tps65910_i2c_driver);
223}
224module_exit(tps65910_i2c_exit);
225
226MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
227MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
228MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
229MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
new file mode 100644
index 000000000000..3d2dc56a3d40
--- /dev/null
+++ b/drivers/mfd/tps65911-comparator.c
@@ -0,0 +1,188 @@
1/*
2 * tps65910.c -- TI TPS6591x
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/platform_device.h>
21#include <linux/debugfs.h>
22#include <linux/gpio.h>
23#include <linux/mfd/tps65910.h>
24
25#define COMP 0
26#define COMP1 1
27#define COMP2 2
28
29/* Comparator 1 voltage selection table in milivolts */
30static const u16 COMP_VSEL_TABLE[] = {
31 0, 2500, 2500, 2500, 2500, 2550, 2600, 2650,
32 2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050,
33 3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450,
34 3500,
35};
36
37struct comparator {
38 const char *name;
39 int reg;
40 int uV_max;
41 const u16 *vsel_table;
42};
43
44static struct comparator tps_comparators[] = {
45 {
46 .name = "COMP1",
47 .reg = TPS65911_VMBCH,
48 .uV_max = 3500,
49 .vsel_table = COMP_VSEL_TABLE,
50 },
51 {
52 .name = "COMP2",
53 .reg = TPS65911_VMBCH2,
54 .uV_max = 3500,
55 .vsel_table = COMP_VSEL_TABLE,
56 },
57};
58
59static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage)
60{
61 struct comparator tps_comp = tps_comparators[id];
62 int curr_voltage = 0;
63 int ret;
64 u8 index = 0, val;
65
66 if (id == COMP)
67 return 0;
68
69 while (curr_voltage < tps_comp.uV_max) {
70 curr_voltage = tps_comp.vsel_table[index];
71 if (curr_voltage >= voltage)
72 break;
73 else if (curr_voltage < voltage)
74 index ++;
75 }
76
77 if (curr_voltage > tps_comp.uV_max)
78 return -EINVAL;
79
80 val = index << 1;
81 ret = tps65910->write(tps65910, tps_comp.reg, 1, &val);
82
83 return ret;
84}
85
86static int comp_threshold_get(struct tps65910 *tps65910, int id)
87{
88 struct comparator tps_comp = tps_comparators[id];
89 int ret;
90 u8 val;
91
92 if (id == COMP)
93 return 0;
94
95 ret = tps65910->read(tps65910, tps_comp.reg, 1, &val);
96 if (ret < 0)
97 return ret;
98
99 val >>= 1;
100 return tps_comp.vsel_table[val];
101}
102
103static ssize_t comp_threshold_show(struct device *dev,
104 struct device_attribute *attr, char *buf)
105{
106 struct tps65910 *tps65910 = dev_get_drvdata(dev->parent);
107 struct attribute comp_attr = attr->attr;
108 int id, uVolt;
109
110 if (!strcmp(comp_attr.name, "comp1_threshold"))
111 id = COMP1;
112 else if (!strcmp(comp_attr.name, "comp2_threshold"))
113 id = COMP2;
114 else
115 return -EINVAL;
116
117 uVolt = comp_threshold_get(tps65910, id);
118
119 return sprintf(buf, "%d\n", uVolt);
120}
121
122static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL);
123static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
124
125static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
126{
127 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
128 struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
129 int ret;
130
131 ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold);
132 if (ret < 0) {
133 dev_err(&pdev->dev, "cannot set COMP1 threshold\n");
134 return ret;
135 }
136
137 ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold);
138 if (ret < 0) {
139 dev_err(&pdev->dev, "cannot set COMP2 theshold\n");
140 return ret;
141 }
142
143 /* Create sysfs entry */
144 ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold);
145 if (ret < 0)
146 dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n");
147
148 ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold);
149 if (ret < 0)
150 dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n");
151
152 return ret;
153}
154
155static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
156{
157 struct tps65910 *tps65910;
158
159 tps65910 = dev_get_drvdata(pdev->dev.parent);
160
161 return 0;
162}
163
164static struct platform_driver tps65911_comparator_driver = {
165 .driver = {
166 .name = "tps65911-comparator",
167 .owner = THIS_MODULE,
168 },
169 .probe = tps65911_comparator_probe,
170 .remove = __devexit_p(tps65911_comparator_remove),
171};
172
173static int __init tps65911_comparator_init(void)
174{
175 return platform_driver_register(&tps65911_comparator_driver);
176}
177subsys_initcall(tps65911_comparator_init);
178
179static void __exit tps65911_comparator_exit(void)
180{
181 platform_driver_unregister(&tps65911_comparator_driver);
182}
183module_exit(tps65911_comparator_exit);
184
185MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
186MODULE_DESCRIPTION("TPS65911 comparator driver");
187MODULE_LICENSE("GPL v2");
188MODULE_ALIAS("platform:tps65911-comparator");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 960b5bed7f52..b8f2a4e7f6e7 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -198,6 +198,7 @@
198#define TWL6030_BASEADD_GASGAUGE 0x00C0 198#define TWL6030_BASEADD_GASGAUGE 0x00C0
199#define TWL6030_BASEADD_PIH 0x00D0 199#define TWL6030_BASEADD_PIH 0x00D0
200#define TWL6030_BASEADD_CHARGER 0x00E0 200#define TWL6030_BASEADD_CHARGER 0x00E0
201#define TWL6025_BASEADD_CHARGER 0x00DA
201 202
202/* subchip/slave 2 0x4A - DFT */ 203/* subchip/slave 2 0x4A - DFT */
203#define TWL6030_BASEADD_DIEID 0x00C0 204#define TWL6030_BASEADD_DIEID 0x00C0
@@ -229,6 +230,9 @@
229/* is driver active, bound to a chip? */ 230/* is driver active, bound to a chip? */
230static bool inuse; 231static bool inuse;
231 232
233/* TWL IDCODE Register value */
234static u32 twl_idcode;
235
232static unsigned int twl_id; 236static unsigned int twl_id;
233unsigned int twl_rev(void) 237unsigned int twl_rev(void)
234{ 238{
@@ -328,6 +332,7 @@ static struct twl_mapping twl6030_map[] = {
328 332
329 { SUB_CHIP_ID0, TWL6030_BASEADD_RTC }, 333 { SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
330 { SUB_CHIP_ID0, TWL6030_BASEADD_MEM }, 334 { SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
335 { SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
331}; 336};
332 337
333/*----------------------------------------------------------------------*/ 338/*----------------------------------------------------------------------*/
@@ -487,6 +492,58 @@ EXPORT_SYMBOL(twl_i2c_read_u8);
487 492
488/*----------------------------------------------------------------------*/ 493/*----------------------------------------------------------------------*/
489 494
495/**
496 * twl_read_idcode_register - API to read the IDCODE register.
497 *
498 * Unlocks the IDCODE register and read the 32 bit value.
499 */
500static int twl_read_idcode_register(void)
501{
502 int err;
503
504 err = twl_i2c_write_u8(TWL4030_MODULE_INTBR, TWL_EEPROM_R_UNLOCK,
505 REG_UNLOCK_TEST_REG);
506 if (err) {
507 pr_err("TWL4030 Unable to unlock IDCODE registers -%d\n", err);
508 goto fail;
509 }
510
511 err = twl_i2c_read(TWL4030_MODULE_INTBR, (u8 *)(&twl_idcode),
512 REG_IDCODE_7_0, 4);
513 if (err) {
514 pr_err("TWL4030: unable to read IDCODE -%d\n", err);
515 goto fail;
516 }
517
518 err = twl_i2c_write_u8(TWL4030_MODULE_INTBR, 0x0, REG_UNLOCK_TEST_REG);
519 if (err)
520 pr_err("TWL4030 Unable to relock IDCODE registers -%d\n", err);
521fail:
522 return err;
523}
524
525/**
526 * twl_get_type - API to get TWL Si type.
527 *
528 * Api to get the TWL Si type from IDCODE value.
529 */
530int twl_get_type(void)
531{
532 return TWL_SIL_TYPE(twl_idcode);
533}
534EXPORT_SYMBOL_GPL(twl_get_type);
535
536/**
537 * twl_get_version - API to get TWL Si version.
538 *
539 * Api to get the TWL Si version from IDCODE value.
540 */
541int twl_get_version(void)
542{
543 return TWL_SIL_REV(twl_idcode);
544}
545EXPORT_SYMBOL_GPL(twl_get_version);
546
490static struct device * 547static struct device *
491add_numbered_child(unsigned chip, const char *name, int num, 548add_numbered_child(unsigned chip, const char *name, int num,
492 void *pdata, unsigned pdata_len, 549 void *pdata, unsigned pdata_len,
@@ -549,7 +606,7 @@ static inline struct device *add_child(unsigned chip, const char *name,
549static struct device * 606static struct device *
550add_regulator_linked(int num, struct regulator_init_data *pdata, 607add_regulator_linked(int num, struct regulator_init_data *pdata,
551 struct regulator_consumer_supply *consumers, 608 struct regulator_consumer_supply *consumers,
552 unsigned num_consumers) 609 unsigned num_consumers, unsigned long features)
553{ 610{
554 unsigned sub_chip_id; 611 unsigned sub_chip_id;
555 /* regulator framework demands init_data ... */ 612 /* regulator framework demands init_data ... */
@@ -561,6 +618,8 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
561 pdata->num_consumer_supplies = num_consumers; 618 pdata->num_consumer_supplies = num_consumers;
562 } 619 }
563 620
621 pdata->driver_data = (void *)features;
622
564 /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */ 623 /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
565 sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid; 624 sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid;
566 return add_numbered_child(sub_chip_id, "twl_reg", num, 625 return add_numbered_child(sub_chip_id, "twl_reg", num,
@@ -568,9 +627,10 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
568} 627}
569 628
570static struct device * 629static struct device *
571add_regulator(int num, struct regulator_init_data *pdata) 630add_regulator(int num, struct regulator_init_data *pdata,
631 unsigned long features)
572{ 632{
573 return add_regulator_linked(num, pdata, NULL, 0); 633 return add_regulator_linked(num, pdata, NULL, 0, features);
574} 634}
575 635
576/* 636/*
@@ -650,17 +710,20 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
650 }; 710 };
651 711
652 child = add_regulator_linked(TWL4030_REG_VUSB1V5, 712 child = add_regulator_linked(TWL4030_REG_VUSB1V5,
653 &usb_fixed, &usb1v5, 1); 713 &usb_fixed, &usb1v5, 1,
714 features);
654 if (IS_ERR(child)) 715 if (IS_ERR(child))
655 return PTR_ERR(child); 716 return PTR_ERR(child);
656 717
657 child = add_regulator_linked(TWL4030_REG_VUSB1V8, 718 child = add_regulator_linked(TWL4030_REG_VUSB1V8,
658 &usb_fixed, &usb1v8, 1); 719 &usb_fixed, &usb1v8, 1,
720 features);
659 if (IS_ERR(child)) 721 if (IS_ERR(child))
660 return PTR_ERR(child); 722 return PTR_ERR(child);
661 723
662 child = add_regulator_linked(TWL4030_REG_VUSB3V1, 724 child = add_regulator_linked(TWL4030_REG_VUSB3V1,
663 &usb_fixed, &usb3v1, 1); 725 &usb_fixed, &usb3v1, 1,
726 features);
664 if (IS_ERR(child)) 727 if (IS_ERR(child))
665 return PTR_ERR(child); 728 return PTR_ERR(child);
666 729
@@ -685,9 +748,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
685 } 748 }
686 if (twl_has_usb() && pdata->usb && twl_class_is_6030()) { 749 if (twl_has_usb() && pdata->usb && twl_class_is_6030()) {
687 750
688 static struct regulator_consumer_supply usb3v3 = { 751 static struct regulator_consumer_supply usb3v3;
689 .supply = "vusb", 752 int regulator;
690 };
691 753
692 if (twl_has_regulator()) { 754 if (twl_has_regulator()) {
693 /* this is a template that gets copied */ 755 /* this is a template that gets copied */
@@ -700,12 +762,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
700 | REGULATOR_CHANGE_STATUS, 762 | REGULATOR_CHANGE_STATUS,
701 }; 763 };
702 764
703 child = add_regulator_linked(TWL6030_REG_VUSB, 765 if (features & TWL6025_SUBCLASS) {
704 &usb_fixed, &usb3v3, 1); 766 usb3v3.supply = "ldousb";
767 regulator = TWL6025_REG_LDOUSB;
768 } else {
769 usb3v3.supply = "vusb";
770 regulator = TWL6030_REG_VUSB;
771 }
772 child = add_regulator_linked(regulator, &usb_fixed,
773 &usb3v3, 1,
774 features);
705 if (IS_ERR(child)) 775 if (IS_ERR(child))
706 return PTR_ERR(child); 776 return PTR_ERR(child);
707 } 777 }
708 778
779 pdata->usb->features = features;
780
709 child = add_child(0, "twl6030_usb", 781 child = add_child(0, "twl6030_usb",
710 pdata->usb, sizeof(*pdata->usb), 782 pdata->usb, sizeof(*pdata->usb),
711 true, 783 true,
@@ -718,7 +790,16 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
718 /* we need to connect regulators to this transceiver */ 790 /* we need to connect regulators to this transceiver */
719 if (twl_has_regulator() && child) 791 if (twl_has_regulator() && child)
720 usb3v3.dev = child; 792 usb3v3.dev = child;
793 } else if (twl_has_regulator() && twl_class_is_6030()) {
794 if (features & TWL6025_SUBCLASS)
795 child = add_regulator(TWL6025_REG_LDOUSB,
796 pdata->ldousb, features);
797 else
798 child = add_regulator(TWL6030_REG_VUSB,
799 pdata->vusb, features);
721 800
801 if (IS_ERR(child))
802 return PTR_ERR(child);
722 } 803 }
723 804
724 if (twl_has_watchdog() && twl_class_is_4030()) { 805 if (twl_has_watchdog() && twl_class_is_4030()) {
@@ -755,46 +836,55 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
755 836
756 /* twl4030 regulators */ 837 /* twl4030 regulators */
757 if (twl_has_regulator() && twl_class_is_4030()) { 838 if (twl_has_regulator() && twl_class_is_4030()) {
758 child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); 839 child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1,
840 features);
759 if (IS_ERR(child)) 841 if (IS_ERR(child))
760 return PTR_ERR(child); 842 return PTR_ERR(child);
761 843
762 child = add_regulator(TWL4030_REG_VIO, pdata->vio); 844 child = add_regulator(TWL4030_REG_VIO, pdata->vio,
845 features);
763 if (IS_ERR(child)) 846 if (IS_ERR(child))
764 return PTR_ERR(child); 847 return PTR_ERR(child);
765 848
766 child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1); 849 child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1,
850 features);
767 if (IS_ERR(child)) 851 if (IS_ERR(child))
768 return PTR_ERR(child); 852 return PTR_ERR(child);
769 853
770 child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2); 854 child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2,
855 features);
771 if (IS_ERR(child)) 856 if (IS_ERR(child))
772 return PTR_ERR(child); 857 return PTR_ERR(child);
773 858
774 child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1); 859 child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1,
860 features);
775 if (IS_ERR(child)) 861 if (IS_ERR(child))
776 return PTR_ERR(child); 862 return PTR_ERR(child);
777 863
778 child = add_regulator(TWL4030_REG_VDAC, pdata->vdac); 864 child = add_regulator(TWL4030_REG_VDAC, pdata->vdac,
865 features);
779 if (IS_ERR(child)) 866 if (IS_ERR(child))
780 return PTR_ERR(child); 867 return PTR_ERR(child);
781 868
782 child = add_regulator((features & TWL4030_VAUX2) 869 child = add_regulator((features & TWL4030_VAUX2)
783 ? TWL4030_REG_VAUX2_4030 870 ? TWL4030_REG_VAUX2_4030
784 : TWL4030_REG_VAUX2, 871 : TWL4030_REG_VAUX2,
785 pdata->vaux2); 872 pdata->vaux2, features);
786 if (IS_ERR(child)) 873 if (IS_ERR(child))
787 return PTR_ERR(child); 874 return PTR_ERR(child);
788 875
789 child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1); 876 child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1,
877 features);
790 if (IS_ERR(child)) 878 if (IS_ERR(child))
791 return PTR_ERR(child); 879 return PTR_ERR(child);
792 880
793 child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2); 881 child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2,
882 features);
794 if (IS_ERR(child)) 883 if (IS_ERR(child))
795 return PTR_ERR(child); 884 return PTR_ERR(child);
796 885
797 child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig); 886 child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig,
887 features);
798 if (IS_ERR(child)) 888 if (IS_ERR(child))
799 return PTR_ERR(child); 889 return PTR_ERR(child);
800 } 890 }
@@ -802,72 +892,152 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
802 /* maybe add LDOs that are omitted on cost-reduced parts */ 892 /* maybe add LDOs that are omitted on cost-reduced parts */
803 if (twl_has_regulator() && !(features & TPS_SUBSET) 893 if (twl_has_regulator() && !(features & TPS_SUBSET)
804 && twl_class_is_4030()) { 894 && twl_class_is_4030()) {
805 child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2); 895 child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2,
896 features);
806 if (IS_ERR(child)) 897 if (IS_ERR(child))
807 return PTR_ERR(child); 898 return PTR_ERR(child);
808 899
809 child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2); 900 child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2,
901 features);
810 if (IS_ERR(child)) 902 if (IS_ERR(child))
811 return PTR_ERR(child); 903 return PTR_ERR(child);
812 904
813 child = add_regulator(TWL4030_REG_VSIM, pdata->vsim); 905 child = add_regulator(TWL4030_REG_VSIM, pdata->vsim,
906 features);
814 if (IS_ERR(child)) 907 if (IS_ERR(child))
815 return PTR_ERR(child); 908 return PTR_ERR(child);
816 909
817 child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1); 910 child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1,
911 features);
818 if (IS_ERR(child)) 912 if (IS_ERR(child))
819 return PTR_ERR(child); 913 return PTR_ERR(child);
820 914
821 child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3); 915 child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3,
916 features);
822 if (IS_ERR(child)) 917 if (IS_ERR(child))
823 return PTR_ERR(child); 918 return PTR_ERR(child);
824 919
825 child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4); 920 child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4,
921 features);
826 if (IS_ERR(child)) 922 if (IS_ERR(child))
827 return PTR_ERR(child); 923 return PTR_ERR(child);
828 } 924 }
829 925
830 /* twl6030 regulators */ 926 /* twl6030 regulators */
927 if (twl_has_regulator() && twl_class_is_6030() &&
928 !(features & TWL6025_SUBCLASS)) {
929 child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc,
930 features);
931 if (IS_ERR(child))
932 return PTR_ERR(child);
933
934 child = add_regulator(TWL6030_REG_VPP, pdata->vpp,
935 features);
936 if (IS_ERR(child))
937 return PTR_ERR(child);
938
939 child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim,
940 features);
941 if (IS_ERR(child))
942 return PTR_ERR(child);
943
944 child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio,
945 features);
946 if (IS_ERR(child))
947 return PTR_ERR(child);
948
949 child = add_regulator(TWL6030_REG_VDAC, pdata->vdac,
950 features);
951 if (IS_ERR(child))
952 return PTR_ERR(child);
953
954 child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1,
955 features);
956 if (IS_ERR(child))
957 return PTR_ERR(child);
958
959 child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2,
960 features);
961 if (IS_ERR(child))
962 return PTR_ERR(child);
963
964 child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3,
965 features);
966 if (IS_ERR(child))
967 return PTR_ERR(child);
968
969 child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg,
970 features);
971 if (IS_ERR(child))
972 return PTR_ERR(child);
973 }
974
975 /* 6030 and 6025 share this regulator */
831 if (twl_has_regulator() && twl_class_is_6030()) { 976 if (twl_has_regulator() && twl_class_is_6030()) {
832 child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc); 977 child = add_regulator(TWL6030_REG_VANA, pdata->vana,
978 features);
833 if (IS_ERR(child)) 979 if (IS_ERR(child))
834 return PTR_ERR(child); 980 return PTR_ERR(child);
981 }
835 982
836 child = add_regulator(TWL6030_REG_VPP, pdata->vpp); 983 /* twl6025 regulators */
984 if (twl_has_regulator() && twl_class_is_6030() &&
985 (features & TWL6025_SUBCLASS)) {
986 child = add_regulator(TWL6025_REG_LDO5, pdata->ldo5,
987 features);
837 if (IS_ERR(child)) 988 if (IS_ERR(child))
838 return PTR_ERR(child); 989 return PTR_ERR(child);
839 990
840 child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim); 991 child = add_regulator(TWL6025_REG_LDO1, pdata->ldo1,
992 features);
841 if (IS_ERR(child)) 993 if (IS_ERR(child))
842 return PTR_ERR(child); 994 return PTR_ERR(child);
843 995
844 child = add_regulator(TWL6030_REG_VANA, pdata->vana); 996 child = add_regulator(TWL6025_REG_LDO7, pdata->ldo7,
997 features);
845 if (IS_ERR(child)) 998 if (IS_ERR(child))
846 return PTR_ERR(child); 999 return PTR_ERR(child);
847 1000
848 child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio); 1001 child = add_regulator(TWL6025_REG_LDO6, pdata->ldo6,
1002 features);
849 if (IS_ERR(child)) 1003 if (IS_ERR(child))
850 return PTR_ERR(child); 1004 return PTR_ERR(child);
851 1005
852 child = add_regulator(TWL6030_REG_VDAC, pdata->vdac); 1006 child = add_regulator(TWL6025_REG_LDOLN, pdata->ldoln,
1007 features);
853 if (IS_ERR(child)) 1008 if (IS_ERR(child))
854 return PTR_ERR(child); 1009 return PTR_ERR(child);
855 1010
856 child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1); 1011 child = add_regulator(TWL6025_REG_LDO2, pdata->ldo2,
1012 features);
857 if (IS_ERR(child)) 1013 if (IS_ERR(child))
858 return PTR_ERR(child); 1014 return PTR_ERR(child);
859 1015
860 child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2); 1016 child = add_regulator(TWL6025_REG_LDO4, pdata->ldo4,
1017 features);
861 if (IS_ERR(child)) 1018 if (IS_ERR(child))
862 return PTR_ERR(child); 1019 return PTR_ERR(child);
863 1020
864 child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3); 1021 child = add_regulator(TWL6025_REG_LDO3, pdata->ldo3,
1022 features);
865 if (IS_ERR(child)) 1023 if (IS_ERR(child))
866 return PTR_ERR(child); 1024 return PTR_ERR(child);
867 1025
868 child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg); 1026 child = add_regulator(TWL6025_REG_SMPS3, pdata->smps3,
1027 features);
869 if (IS_ERR(child)) 1028 if (IS_ERR(child))
870 return PTR_ERR(child); 1029 return PTR_ERR(child);
1030
1031 child = add_regulator(TWL6025_REG_SMPS4, pdata->smps4,
1032 features);
1033 if (IS_ERR(child))
1034 return PTR_ERR(child);
1035
1036 child = add_regulator(TWL6025_REG_VIO, pdata->vio6025,
1037 features);
1038 if (IS_ERR(child))
1039 return PTR_ERR(child);
1040
871 } 1041 }
872 1042
873 if (twl_has_bci() && pdata->bci && 1043 if (twl_has_bci() && pdata->bci &&
@@ -1014,6 +1184,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1014 unsigned i; 1184 unsigned i;
1015 struct twl4030_platform_data *pdata = client->dev.platform_data; 1185 struct twl4030_platform_data *pdata = client->dev.platform_data;
1016 u8 temp; 1186 u8 temp;
1187 int ret = 0;
1017 1188
1018 if (!pdata) { 1189 if (!pdata) {
1019 dev_dbg(&client->dev, "no platform data?\n"); 1190 dev_dbg(&client->dev, "no platform data?\n");
@@ -1060,6 +1231,12 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1060 /* setup clock framework */ 1231 /* setup clock framework */
1061 clocks_init(&client->dev, pdata->clock); 1232 clocks_init(&client->dev, pdata->clock);
1062 1233
1234 /* read TWL IDCODE Register */
1235 if (twl_id == TWL4030_CLASS_ID) {
1236 ret = twl_read_idcode_register();
1237 WARN(ret < 0, "Error: reading twl_idcode register value\n");
1238 }
1239
1063 /* load power event scripts */ 1240 /* load power event scripts */
1064 if (twl_has_power() && pdata->power) 1241 if (twl_has_power() && pdata->power)
1065 twl4030_power_init(pdata->power); 1242 twl4030_power_init(pdata->power);
@@ -1108,6 +1285,7 @@ static const struct i2c_device_id twl_ids[] = {
1108 { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ 1285 { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */
1109 { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ 1286 { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */
1110 { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */ 1287 { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */
1288 { "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */
1111 { /* end of list */ }, 1289 { /* end of list */ },
1112}; 1290};
1113MODULE_DEVICE_TABLE(i2c, twl_ids); 1291MODULE_DEVICE_TABLE(i2c, twl_ids);
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
index c02fded316c9..2bf4136464c1 100644
--- a/drivers/mfd/twl4030-codec.c
+++ b/drivers/mfd/twl4030-codec.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * MFD driver for twl4030 codec submodule 2 * MFD driver for twl4030 codec submodule
3 * 3 *
4 * Author: Peter Ujfalusi <peter.ujfalusi@nokia.com> 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 * 5 *
6 * Copyright: (C) 2009 Nokia Corporation 6 * Copyright: (C) 2009 Nokia Corporation
7 * 7 *
@@ -208,13 +208,15 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
208 if (pdata->audio) { 208 if (pdata->audio) {
209 cell = &codec->cells[childs]; 209 cell = &codec->cells[childs];
210 cell->name = "twl4030-codec"; 210 cell->name = "twl4030-codec";
211 cell->mfd_data = pdata->audio; 211 cell->platform_data = pdata->audio;
212 cell->pdata_size = sizeof(*pdata->audio);
212 childs++; 213 childs++;
213 } 214 }
214 if (pdata->vibra) { 215 if (pdata->vibra) {
215 cell = &codec->cells[childs]; 216 cell = &codec->cells[childs];
216 cell->name = "twl4030-vibra"; 217 cell->name = "twl4030-vibra";
217 cell->mfd_data = pdata->vibra; 218 cell->platform_data = pdata->vibra;
219 cell->pdata_size = sizeof(*pdata->vibra);
218 childs++; 220 childs++;
219 } 221 }
220 222
@@ -270,6 +272,6 @@ static void __devexit twl4030_codec_exit(void)
270} 272}
271module_exit(twl4030_codec_exit); 273module_exit(twl4030_codec_exit);
272 274
273MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>"); 275MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
274MODULE_LICENSE("GPL"); 276MODULE_LICENSE("GPL");
275 277
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 2c0d4d16491a..a764676f0922 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -120,7 +120,7 @@ static u8 res_config_addrs[] = {
120 [RES_HFCLKOUT] = 0x8b, 120 [RES_HFCLKOUT] = 0x8b,
121 [RES_32KCLKOUT] = 0x8e, 121 [RES_32KCLKOUT] = 0x8e,
122 [RES_RESET] = 0x91, 122 [RES_RESET] = 0x91,
123 [RES_Main_Ref] = 0x94, 123 [RES_MAIN_REF] = 0x94,
124}; 124};
125 125
126static int __init twl4030_write_script_byte(u8 address, u8 byte) 126static int __init twl4030_write_script_byte(u8 address, u8 byte)
@@ -448,7 +448,7 @@ static int __init load_twl4030_script(struct twl4030_script *tscript,
448 goto out; 448 goto out;
449 } 449 }
450 if (tscript->flags & TWL4030_SLEEP_SCRIPT) { 450 if (tscript->flags & TWL4030_SLEEP_SCRIPT) {
451 if (order) 451 if (!order)
452 pr_warning("TWL4030: Bad order of scripts (sleep "\ 452 pr_warning("TWL4030: Bad order of scripts (sleep "\
453 "script before wakeup) Leads to boot"\ 453 "script before wakeup) Leads to boot"\
454 "failure on some boards\n"); 454 "failure on some boards\n");
@@ -485,9 +485,9 @@ int twl4030_remove_script(u8 flags)
485 return err; 485 return err;
486 } 486 }
487 if (flags & TWL4030_WAKEUP12_SCRIPT) { 487 if (flags & TWL4030_WAKEUP12_SCRIPT) {
488 if (err)
489 err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT, 488 err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
490 R_SEQ_ADD_S2A12); 489 R_SEQ_ADD_S2A12);
490 if (err)
491 return err; 491 return err;
492 } 492 }
493 if (flags & TWL4030_WAKEUP3_SCRIPT) { 493 if (flags & TWL4030_WAKEUP3_SCRIPT) {
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index dfbae34e1804..eb3b5f88e566 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -76,8 +76,8 @@ static int twl6030_interrupt_mapping[24] = {
76 USBOTG_INTR_OFFSET, /* Bit 18 ID */ 76 USBOTG_INTR_OFFSET, /* Bit 18 ID */
77 USB_PRES_INTR_OFFSET, /* Bit 19 VBUS */ 77 USB_PRES_INTR_OFFSET, /* Bit 19 VBUS */
78 CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */ 78 CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */
79 CHARGER_INTR_OFFSET, /* Bit 21 EXT_CHRG */ 79 CHARGERFAULT_INTR_OFFSET, /* Bit 21 EXT_CHRG */
80 CHARGER_INTR_OFFSET, /* Bit 22 INT_CHRG */ 80 CHARGERFAULT_INTR_OFFSET, /* Bit 22 INT_CHRG */
81 RSV_INTR_OFFSET, /* Bit 23 Reserved */ 81 RSV_INTR_OFFSET, /* Bit 23 Reserved */
82}; 82};
83/*----------------------------------------------------------------------*/ 83/*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index 04914f2836c0..d97a86945174 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -153,7 +153,6 @@ out:
153 */ 153 */
154static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume) 154static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume)
155{ 155{
156 u16 val;
157 int r; 156 int r;
158 157
159 if (volume > WL1273_MAX_VOLUME) 158 if (volume > WL1273_MAX_VOLUME)
@@ -217,7 +216,8 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
217 216
218 cell = &core->cells[children]; 217 cell = &core->cells[children];
219 cell->name = "wl1273_fm_radio"; 218 cell->name = "wl1273_fm_radio";
220 cell->mfd_data = &core; 219 cell->platform_data = &core;
220 cell->pdata_size = sizeof(core);
221 children++; 221 children++;
222 222
223 core->read = wl1273_fm_read_reg; 223 core->read = wl1273_fm_read_reg;
@@ -231,7 +231,8 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
231 231
232 dev_dbg(&client->dev, "%s: Have codec.\n", __func__); 232 dev_dbg(&client->dev, "%s: Have codec.\n", __func__);
233 cell->name = "wl1273-codec"; 233 cell->name = "wl1273-codec";
234 cell->mfd_data = &core; 234 cell->platform_data = &core;
235 cell->pdata_size = sizeof(core);
235 children++; 236 children++;
236 } 237 }
237 238
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 3fe9a58fe6c7..265f75fc6a25 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1442,7 +1442,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1442 struct wm831x_pdata *pdata = wm831x->dev->platform_data; 1442 struct wm831x_pdata *pdata = wm831x->dev->platform_data;
1443 int rev; 1443 int rev;
1444 enum wm831x_parent parent; 1444 enum wm831x_parent parent;
1445 int ret; 1445 int ret, i;
1446 1446
1447 mutex_init(&wm831x->io_lock); 1447 mutex_init(&wm831x->io_lock);
1448 mutex_init(&wm831x->key_lock); 1448 mutex_init(&wm831x->key_lock);
@@ -1581,6 +1581,17 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1581 } 1581 }
1582 } 1582 }
1583 1583
1584 if (pdata) {
1585 for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
1586 if (!pdata->gpio_defaults[i])
1587 continue;
1588
1589 wm831x_reg_write(wm831x,
1590 WM831X_GPIO1_CONTROL + i,
1591 pdata->gpio_defaults[i] & 0xffff);
1592 }
1593 }
1594
1584 ret = wm831x_irq_init(wm831x, irq); 1595 ret = wm831x_irq_init(wm831x, irq);
1585 if (ret != 0) 1596 if (ret != 0)
1586 goto err; 1597 goto err;
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 23e66af89dea..42b928ec891e 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -515,12 +515,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
515 0xffff); 515 0xffff);
516 } 516 }
517 517
518 if (!irq) {
519 dev_warn(wm831x->dev,
520 "No interrupt specified - functionality limited\n");
521 return 0;
522 }
523
524 if (!pdata || !pdata->irq_base) { 518 if (!pdata || !pdata->irq_base) {
525 dev_err(wm831x->dev, 519 dev_err(wm831x->dev,
526 "No interrupt base specified, no interrupts\n"); 520 "No interrupt base specified, no interrupts\n");
@@ -567,15 +561,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
567#endif 561#endif
568 } 562 }
569 563
570 ret = request_threaded_irq(irq, NULL, wm831x_irq_thread, 564 if (irq) {
571 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 565 ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
572 "wm831x", wm831x); 566 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
573 if (ret != 0) { 567 "wm831x", wm831x);
574 dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n", 568 if (ret != 0) {
575 irq, ret); 569 dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n",
576 return ret; 570 irq, ret);
571 return ret;
572 }
573 } else {
574 dev_warn(wm831x->dev,
575 "No interrupt specified - functionality limited\n");
577 } 576 }
578 577
578
579
579 /* Enable top level interrupts, we mask at secondary level */ 580 /* Enable top level interrupts, we mask at secondary level */
580 wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0); 581 wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0);
581 582
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 3a6e78cb0384..597f82edacaa 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -245,7 +245,8 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
245{ 245{
246 struct mfd_cell cell = { 246 struct mfd_cell cell = {
247 .name = "wm8400-codec", 247 .name = "wm8400-codec",
248 .mfd_data = wm8400, 248 .platform_data = wm8400,
249 .pdata_size = sizeof(*wm8400),
249 }; 250 };
250 251
251 return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0); 252 return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0);
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 74f16f167b8e..b0c56313dbbb 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -285,33 +285,26 @@ static void hw_break_val_write(void)
285static int check_and_rewind_pc(char *put_str, char *arg) 285static int check_and_rewind_pc(char *put_str, char *arg)
286{ 286{
287 unsigned long addr = lookup_addr(arg); 287 unsigned long addr = lookup_addr(arg);
288 unsigned long ip;
288 int offset = 0; 289 int offset = 0;
289 290
290 kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, 291 kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
291 NUMREGBYTES); 292 NUMREGBYTES);
292 gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); 293 gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
293 v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); 294 ip = instruction_pointer(&kgdbts_regs);
294#ifdef CONFIG_X86 295 v2printk("Stopped at IP: %lx\n", ip);
295 /* On x86 a breakpoint stop requires it to be decremented */ 296#ifdef GDB_ADJUSTS_BREAK_OFFSET
296 if (addr + 1 == kgdbts_regs.ip) 297 /* On some arches, a breakpoint stop requires it to be decremented */
297 offset = -1; 298 if (addr + BREAK_INSTR_SIZE == ip)
298#elif defined(CONFIG_SUPERH) 299 offset = -BREAK_INSTR_SIZE;
299 /* On SUPERH a breakpoint stop requires it to be decremented */
300 if (addr + 2 == kgdbts_regs.pc)
301 offset = -2;
302#endif 300#endif
303 if (strcmp(arg, "silent") && 301 if (strcmp(arg, "silent") && ip + offset != addr) {
304 instruction_pointer(&kgdbts_regs) + offset != addr) {
305 eprintk("kgdbts: BP mismatch %lx expected %lx\n", 302 eprintk("kgdbts: BP mismatch %lx expected %lx\n",
306 instruction_pointer(&kgdbts_regs) + offset, addr); 303 ip + offset, addr);
307 return 1; 304 return 1;
308 } 305 }
309#ifdef CONFIG_X86 306 /* Readjust the instruction pointer if needed */
310 /* On x86 adjust the instruction pointer if needed */ 307 instruction_pointer_set(&kgdbts_regs, ip + offset);
311 kgdbts_regs.ip += offset;
312#elif defined(CONFIG_SUPERH)
313 kgdbts_regs.pc += offset;
314#endif
315 return 0; 308 return 0;
316} 309}
317 310
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 61d233a7c118..71da5641e258 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -31,7 +31,11 @@
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/string_helpers.h> 33#include <linux/string_helpers.h>
34#include <linux/delay.h>
35#include <linux/capability.h>
36#include <linux/compat.h>
34 37
38#include <linux/mmc/ioctl.h>
35#include <linux/mmc/card.h> 39#include <linux/mmc/card.h>
36#include <linux/mmc/host.h> 40#include <linux/mmc/host.h>
37#include <linux/mmc/mmc.h> 41#include <linux/mmc/mmc.h>
@@ -48,6 +52,13 @@ MODULE_ALIAS("mmc:block");
48#endif 52#endif
49#define MODULE_PARAM_PREFIX "mmcblk." 53#define MODULE_PARAM_PREFIX "mmcblk."
50 54
55#define INAND_CMD38_ARG_EXT_CSD 113
56#define INAND_CMD38_ARG_ERASE 0x00
57#define INAND_CMD38_ARG_TRIM 0x01
58#define INAND_CMD38_ARG_SECERASE 0x80
59#define INAND_CMD38_ARG_SECTRIM1 0x81
60#define INAND_CMD38_ARG_SECTRIM2 0x88
61
51static DEFINE_MUTEX(block_mutex); 62static DEFINE_MUTEX(block_mutex);
52 63
53/* 64/*
@@ -64,6 +75,7 @@ static int max_devices;
64 75
65/* 256 minors, so at most 256 separate devices */ 76/* 256 minors, so at most 256 separate devices */
66static DECLARE_BITMAP(dev_use, 256); 77static DECLARE_BITMAP(dev_use, 256);
78static DECLARE_BITMAP(name_use, 256);
67 79
68/* 80/*
69 * There is one mmc_blk_data per slot. 81 * There is one mmc_blk_data per slot.
@@ -72,9 +84,24 @@ struct mmc_blk_data {
72 spinlock_t lock; 84 spinlock_t lock;
73 struct gendisk *disk; 85 struct gendisk *disk;
74 struct mmc_queue queue; 86 struct mmc_queue queue;
87 struct list_head part;
88
89 unsigned int flags;
90#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
91#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
75 92
76 unsigned int usage; 93 unsigned int usage;
77 unsigned int read_only; 94 unsigned int read_only;
95 unsigned int part_type;
96 unsigned int name_idx;
97
98 /*
99 * Only set in main mmc_blk_data associated
100 * with mmc_card with mmc_set_drvdata, and keeps
101 * track of the current selected device partition.
102 */
103 unsigned int part_curr;
104 struct device_attribute force_ro;
78}; 105};
79 106
80static DEFINE_MUTEX(open_lock); 107static DEFINE_MUTEX(open_lock);
@@ -97,17 +124,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
97 return md; 124 return md;
98} 125}
99 126
127static inline int mmc_get_devidx(struct gendisk *disk)
128{
129 int devmaj = MAJOR(disk_devt(disk));
130 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
131
132 if (!devmaj)
133 devidx = disk->first_minor / perdev_minors;
134 return devidx;
135}
136
100static void mmc_blk_put(struct mmc_blk_data *md) 137static void mmc_blk_put(struct mmc_blk_data *md)
101{ 138{
102 mutex_lock(&open_lock); 139 mutex_lock(&open_lock);
103 md->usage--; 140 md->usage--;
104 if (md->usage == 0) { 141 if (md->usage == 0) {
105 int devmaj = MAJOR(disk_devt(md->disk)); 142 int devidx = mmc_get_devidx(md->disk);
106 int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
107
108 if (!devmaj)
109 devidx = md->disk->first_minor / perdev_minors;
110
111 blk_cleanup_queue(md->queue.queue); 143 blk_cleanup_queue(md->queue.queue);
112 144
113 __clear_bit(devidx, dev_use); 145 __clear_bit(devidx, dev_use);
@@ -118,6 +150,38 @@ static void mmc_blk_put(struct mmc_blk_data *md)
118 mutex_unlock(&open_lock); 150 mutex_unlock(&open_lock);
119} 151}
120 152
153static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
154 char *buf)
155{
156 int ret;
157 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
158
159 ret = snprintf(buf, PAGE_SIZE, "%d",
160 get_disk_ro(dev_to_disk(dev)) ^
161 md->read_only);
162 mmc_blk_put(md);
163 return ret;
164}
165
166static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t count)
168{
169 int ret;
170 char *end;
171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
172 unsigned long set = simple_strtoul(buf, &end, 0);
173 if (end == buf) {
174 ret = -EINVAL;
175 goto out;
176 }
177
178 set_disk_ro(dev_to_disk(dev), set || md->read_only);
179 ret = count;
180out:
181 mmc_blk_put(md);
182 return ret;
183}
184
121static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 185static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
122{ 186{
123 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 187 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -158,35 +222,255 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
158 return 0; 222 return 0;
159} 223}
160 224
225struct mmc_blk_ioc_data {
226 struct mmc_ioc_cmd ic;
227 unsigned char *buf;
228 u64 buf_bytes;
229};
230
231static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
232 struct mmc_ioc_cmd __user *user)
233{
234 struct mmc_blk_ioc_data *idata;
235 int err;
236
237 idata = kzalloc(sizeof(*idata), GFP_KERNEL);
238 if (!idata) {
239 err = -ENOMEM;
240 goto out;
241 }
242
243 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
244 err = -EFAULT;
245 goto idata_err;
246 }
247
248 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
249 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
250 err = -EOVERFLOW;
251 goto idata_err;
252 }
253
254 idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
255 if (!idata->buf) {
256 err = -ENOMEM;
257 goto idata_err;
258 }
259
260 if (copy_from_user(idata->buf, (void __user *)(unsigned long)
261 idata->ic.data_ptr, idata->buf_bytes)) {
262 err = -EFAULT;
263 goto copy_err;
264 }
265
266 return idata;
267
268copy_err:
269 kfree(idata->buf);
270idata_err:
271 kfree(idata);
272out:
273 return ERR_PTR(err);
274}
275
276static int mmc_blk_ioctl_cmd(struct block_device *bdev,
277 struct mmc_ioc_cmd __user *ic_ptr)
278{
279 struct mmc_blk_ioc_data *idata;
280 struct mmc_blk_data *md;
281 struct mmc_card *card;
282 struct mmc_command cmd = {0};
283 struct mmc_data data = {0};
284 struct mmc_request mrq = {0};
285 struct scatterlist sg;
286 int err;
287
288 /*
289 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
290 * whole block device, not on a partition. This prevents overspray
291 * between sibling partitions.
292 */
293 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
294 return -EPERM;
295
296 idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
297 if (IS_ERR(idata))
298 return PTR_ERR(idata);
299
300 cmd.opcode = idata->ic.opcode;
301 cmd.arg = idata->ic.arg;
302 cmd.flags = idata->ic.flags;
303
304 data.sg = &sg;
305 data.sg_len = 1;
306 data.blksz = idata->ic.blksz;
307 data.blocks = idata->ic.blocks;
308
309 sg_init_one(data.sg, idata->buf, idata->buf_bytes);
310
311 if (idata->ic.write_flag)
312 data.flags = MMC_DATA_WRITE;
313 else
314 data.flags = MMC_DATA_READ;
315
316 mrq.cmd = &cmd;
317 mrq.data = &data;
318
319 md = mmc_blk_get(bdev->bd_disk);
320 if (!md) {
321 err = -EINVAL;
322 goto cmd_done;
323 }
324
325 card = md->queue.card;
326 if (IS_ERR(card)) {
327 err = PTR_ERR(card);
328 goto cmd_done;
329 }
330
331 mmc_claim_host(card->host);
332
333 if (idata->ic.is_acmd) {
334 err = mmc_app_cmd(card->host, card);
335 if (err)
336 goto cmd_rel_host;
337 }
338
339 /* data.flags must already be set before doing this. */
340 mmc_set_data_timeout(&data, card);
341 /* Allow overriding the timeout_ns for empirical tuning. */
342 if (idata->ic.data_timeout_ns)
343 data.timeout_ns = idata->ic.data_timeout_ns;
344
345 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
346 /*
347 * Pretend this is a data transfer and rely on the host driver
348 * to compute timeout. When all host drivers support
349 * cmd.cmd_timeout for R1B, this can be changed to:
350 *
351 * mrq.data = NULL;
352 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
353 */
354 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
355 }
356
357 mmc_wait_for_req(card->host, &mrq);
358
359 if (cmd.error) {
360 dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
361 __func__, cmd.error);
362 err = cmd.error;
363 goto cmd_rel_host;
364 }
365 if (data.error) {
366 dev_err(mmc_dev(card->host), "%s: data error %d\n",
367 __func__, data.error);
368 err = data.error;
369 goto cmd_rel_host;
370 }
371
372 /*
373 * According to the SD specs, some commands require a delay after
374 * issuing the command.
375 */
376 if (idata->ic.postsleep_min_us)
377 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
378
379 if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
380 err = -EFAULT;
381 goto cmd_rel_host;
382 }
383
384 if (!idata->ic.write_flag) {
385 if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
386 idata->buf, idata->buf_bytes)) {
387 err = -EFAULT;
388 goto cmd_rel_host;
389 }
390 }
391
392cmd_rel_host:
393 mmc_release_host(card->host);
394
395cmd_done:
396 mmc_blk_put(md);
397 kfree(idata->buf);
398 kfree(idata);
399 return err;
400}
401
402static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
403 unsigned int cmd, unsigned long arg)
404{
405 int ret = -EINVAL;
406 if (cmd == MMC_IOC_CMD)
407 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
408 return ret;
409}
410
411#ifdef CONFIG_COMPAT
412static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
413 unsigned int cmd, unsigned long arg)
414{
415 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
416}
417#endif
418
161static const struct block_device_operations mmc_bdops = { 419static const struct block_device_operations mmc_bdops = {
162 .open = mmc_blk_open, 420 .open = mmc_blk_open,
163 .release = mmc_blk_release, 421 .release = mmc_blk_release,
164 .getgeo = mmc_blk_getgeo, 422 .getgeo = mmc_blk_getgeo,
165 .owner = THIS_MODULE, 423 .owner = THIS_MODULE,
424 .ioctl = mmc_blk_ioctl,
425#ifdef CONFIG_COMPAT
426 .compat_ioctl = mmc_blk_compat_ioctl,
427#endif
166}; 428};
167 429
168struct mmc_blk_request { 430struct mmc_blk_request {
169 struct mmc_request mrq; 431 struct mmc_request mrq;
432 struct mmc_command sbc;
170 struct mmc_command cmd; 433 struct mmc_command cmd;
171 struct mmc_command stop; 434 struct mmc_command stop;
172 struct mmc_data data; 435 struct mmc_data data;
173}; 436};
174 437
438static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md)
440{
441 int ret;
442 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
443 if (main_md->part_curr == md->part_type)
444 return 0;
445
446 if (mmc_card_mmc(card)) {
447 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
448 card->ext_csd.part_config |= md->part_type;
449
450 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
451 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
452 card->ext_csd.part_time);
453 if (ret)
454 return ret;
455}
456
457 main_md->part_curr = md->part_type;
458 return 0;
459}
460
175static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) 461static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
176{ 462{
177 int err; 463 int err;
178 u32 result; 464 u32 result;
179 __be32 *blocks; 465 __be32 *blocks;
180 466
181 struct mmc_request mrq; 467 struct mmc_request mrq = {0};
182 struct mmc_command cmd; 468 struct mmc_command cmd = {0};
183 struct mmc_data data; 469 struct mmc_data data = {0};
184 unsigned int timeout_us; 470 unsigned int timeout_us;
185 471
186 struct scatterlist sg; 472 struct scatterlist sg;
187 473
188 memset(&cmd, 0, sizeof(struct mmc_command));
189
190 cmd.opcode = MMC_APP_CMD; 474 cmd.opcode = MMC_APP_CMD;
191 cmd.arg = card->rca << 16; 475 cmd.arg = card->rca << 16;
192 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 476 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -203,8 +487,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
203 cmd.arg = 0; 487 cmd.arg = 0;
204 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 488 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
205 489
206 memset(&data, 0, sizeof(struct mmc_data));
207
208 data.timeout_ns = card->csd.tacc_ns * 100; 490 data.timeout_ns = card->csd.tacc_ns * 100;
209 data.timeout_clks = card->csd.tacc_clks * 100; 491 data.timeout_clks = card->csd.tacc_clks * 100;
210 492
@@ -223,8 +505,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
223 data.sg = &sg; 505 data.sg = &sg;
224 data.sg_len = 1; 506 data.sg_len = 1;
225 507
226 memset(&mrq, 0, sizeof(struct mmc_request));
227
228 mrq.cmd = &cmd; 508 mrq.cmd = &cmd;
229 mrq.data = &data; 509 mrq.data = &data;
230 510
@@ -247,10 +527,9 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
247 527
248static u32 get_card_status(struct mmc_card *card, struct request *req) 528static u32 get_card_status(struct mmc_card *card, struct request *req)
249{ 529{
250 struct mmc_command cmd; 530 struct mmc_command cmd = {0};
251 int err; 531 int err;
252 532
253 memset(&cmd, 0, sizeof(struct mmc_command));
254 cmd.opcode = MMC_SEND_STATUS; 533 cmd.opcode = MMC_SEND_STATUS;
255 if (!mmc_host_is_spi(card->host)) 534 if (!mmc_host_is_spi(card->host))
256 cmd.arg = card->rca << 16; 535 cmd.arg = card->rca << 16;
@@ -269,8 +548,6 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
269 unsigned int from, nr, arg; 548 unsigned int from, nr, arg;
270 int err = 0; 549 int err = 0;
271 550
272 mmc_claim_host(card->host);
273
274 if (!mmc_can_erase(card)) { 551 if (!mmc_can_erase(card)) {
275 err = -EOPNOTSUPP; 552 err = -EOPNOTSUPP;
276 goto out; 553 goto out;
@@ -284,14 +561,22 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
284 else 561 else
285 arg = MMC_ERASE_ARG; 562 arg = MMC_ERASE_ARG;
286 563
564 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
565 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
566 INAND_CMD38_ARG_EXT_CSD,
567 arg == MMC_TRIM_ARG ?
568 INAND_CMD38_ARG_TRIM :
569 INAND_CMD38_ARG_ERASE,
570 0);
571 if (err)
572 goto out;
573 }
287 err = mmc_erase(card, from, nr, arg); 574 err = mmc_erase(card, from, nr, arg);
288out: 575out:
289 spin_lock_irq(&md->lock); 576 spin_lock_irq(&md->lock);
290 __blk_end_request(req, err, blk_rq_bytes(req)); 577 __blk_end_request(req, err, blk_rq_bytes(req));
291 spin_unlock_irq(&md->lock); 578 spin_unlock_irq(&md->lock);
292 579
293 mmc_release_host(card->host);
294
295 return err ? 0 : 1; 580 return err ? 0 : 1;
296} 581}
297 582
@@ -303,8 +588,6 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
303 unsigned int from, nr, arg; 588 unsigned int from, nr, arg;
304 int err = 0; 589 int err = 0;
305 590
306 mmc_claim_host(card->host);
307
308 if (!mmc_can_secure_erase_trim(card)) { 591 if (!mmc_can_secure_erase_trim(card)) {
309 err = -EOPNOTSUPP; 592 err = -EOPNOTSUPP;
310 goto out; 593 goto out;
@@ -318,19 +601,74 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
318 else 601 else
319 arg = MMC_SECURE_ERASE_ARG; 602 arg = MMC_SECURE_ERASE_ARG;
320 603
604 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
605 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
606 INAND_CMD38_ARG_EXT_CSD,
607 arg == MMC_SECURE_TRIM1_ARG ?
608 INAND_CMD38_ARG_SECTRIM1 :
609 INAND_CMD38_ARG_SECERASE,
610 0);
611 if (err)
612 goto out;
613 }
321 err = mmc_erase(card, from, nr, arg); 614 err = mmc_erase(card, from, nr, arg);
322 if (!err && arg == MMC_SECURE_TRIM1_ARG) 615 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
616 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
617 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
618 INAND_CMD38_ARG_EXT_CSD,
619 INAND_CMD38_ARG_SECTRIM2,
620 0);
621 if (err)
622 goto out;
623 }
323 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 624 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
625 }
324out: 626out:
325 spin_lock_irq(&md->lock); 627 spin_lock_irq(&md->lock);
326 __blk_end_request(req, err, blk_rq_bytes(req)); 628 __blk_end_request(req, err, blk_rq_bytes(req));
327 spin_unlock_irq(&md->lock); 629 spin_unlock_irq(&md->lock);
328 630
329 mmc_release_host(card->host);
330
331 return err ? 0 : 1; 631 return err ? 0 : 1;
332} 632}
333 633
634static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
635{
636 struct mmc_blk_data *md = mq->data;
637
638 /*
639 * No-op, only service this because we need REQ_FUA for reliable
640 * writes.
641 */
642 spin_lock_irq(&md->lock);
643 __blk_end_request_all(req, 0);
644 spin_unlock_irq(&md->lock);
645
646 return 1;
647}
648
649/*
650 * Reformat current write as a reliable write, supporting
651 * both legacy and the enhanced reliable write MMC cards.
652 * In each transfer we'll handle only as much as a single
653 * reliable write can handle, thus finish the request in
654 * partial completions.
655 */
656static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
657 struct mmc_card *card,
658 struct request *req)
659{
660 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
661 /* Legacy mode imposes restrictions on transfers. */
662 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
663 brq->data.blocks = 1;
664
665 if (brq->data.blocks > card->ext_csd.rel_sectors)
666 brq->data.blocks = card->ext_csd.rel_sectors;
667 else if (brq->data.blocks < card->ext_csd.rel_sectors)
668 brq->data.blocks = 1;
669 }
670}
671
334static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 672static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
335{ 673{
336 struct mmc_blk_data *md = mq->data; 674 struct mmc_blk_data *md = mq->data;
@@ -338,10 +676,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
338 struct mmc_blk_request brq; 676 struct mmc_blk_request brq;
339 int ret = 1, disable_multi = 0; 677 int ret = 1, disable_multi = 0;
340 678
341 mmc_claim_host(card->host); 679 /*
680 * Reliable writes are used to implement Forced Unit Access and
681 * REQ_META accesses, and are supported only on MMCs.
682 */
683 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
684 (req->cmd_flags & REQ_META)) &&
685 (rq_data_dir(req) == WRITE) &&
686 (md->flags & MMC_BLK_REL_WR);
342 687
343 do { 688 do {
344 struct mmc_command cmd; 689 struct mmc_command cmd = {0};
345 u32 readcmd, writecmd, status = 0; 690 u32 readcmd, writecmd, status = 0;
346 691
347 memset(&brq, 0, sizeof(struct mmc_blk_request)); 692 memset(&brq, 0, sizeof(struct mmc_blk_request));
@@ -374,12 +719,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
374 if (disable_multi && brq.data.blocks > 1) 719 if (disable_multi && brq.data.blocks > 1)
375 brq.data.blocks = 1; 720 brq.data.blocks = 1;
376 721
377 if (brq.data.blocks > 1) { 722 if (brq.data.blocks > 1 || do_rel_wr) {
378 /* SPI multiblock writes terminate using a special 723 /* SPI multiblock writes terminate using a special
379 * token, not a STOP_TRANSMISSION request. 724 * token, not a STOP_TRANSMISSION request.
380 */ 725 */
381 if (!mmc_host_is_spi(card->host) 726 if (!mmc_host_is_spi(card->host) ||
382 || rq_data_dir(req) == READ) 727 rq_data_dir(req) == READ)
383 brq.mrq.stop = &brq.stop; 728 brq.mrq.stop = &brq.stop;
384 readcmd = MMC_READ_MULTIPLE_BLOCK; 729 readcmd = MMC_READ_MULTIPLE_BLOCK;
385 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 730 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -396,6 +741,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
396 brq.data.flags |= MMC_DATA_WRITE; 741 brq.data.flags |= MMC_DATA_WRITE;
397 } 742 }
398 743
744 if (do_rel_wr)
745 mmc_apply_rel_rw(&brq, card, req);
746
747 /*
748 * Pre-defined multi-block transfers are preferable to
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
760 *
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
764 */
765
766 if ((md->flags & MMC_BLK_CMD23) &&
767 mmc_op_multi(brq.cmd.opcode) &&
768 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
769 brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
770 brq.sbc.arg = brq.data.blocks |
771 (do_rel_wr ? (1 << 31) : 0);
772 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
773 brq.mrq.sbc = &brq.sbc;
774 }
775
399 mmc_set_data_timeout(&brq.data, card); 776 mmc_set_data_timeout(&brq.data, card);
400 777
401 brq.data.sg = mq->sg; 778 brq.data.sg = mq->sg;
@@ -431,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
431 * until later as we need to wait for the card to leave 808 * until later as we need to wait for the card to leave
432 * programming mode even when things go wrong. 809 * programming mode even when things go wrong.
433 */ 810 */
434 if (brq.cmd.error || brq.data.error || brq.stop.error) { 811 if (brq.sbc.error || brq.cmd.error ||
812 brq.data.error || brq.stop.error) {
435 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { 813 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
436 /* Redo read one sector at a time */ 814 /* Redo read one sector at a time */
437 printk(KERN_WARNING "%s: retrying using single " 815 printk(KERN_WARNING "%s: retrying using single "
@@ -442,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
442 status = get_card_status(card, req); 820 status = get_card_status(card, req);
443 } 821 }
444 822
823 if (brq.sbc.error) {
824 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
825 "command, response %#x, card status %#x\n",
826 req->rq_disk->disk_name, brq.sbc.error,
827 brq.sbc.resp[0], status);
828 }
829
445 if (brq.cmd.error) { 830 if (brq.cmd.error) {
446 printk(KERN_ERR "%s: error %d sending read/write " 831 printk(KERN_ERR "%s: error %d sending read/write "
447 "command, response %#x, card status %#x\n", 832 "command, response %#x, card status %#x\n",
@@ -520,8 +905,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
520 spin_unlock_irq(&md->lock); 905 spin_unlock_irq(&md->lock);
521 } while (ret); 906 } while (ret);
522 907
523 mmc_release_host(card->host);
524
525 return 1; 908 return 1;
526 909
527 cmd_err: 910 cmd_err:
@@ -548,8 +931,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
548 spin_unlock_irq(&md->lock); 931 spin_unlock_irq(&md->lock);
549 } 932 }
550 933
551 mmc_release_host(card->host);
552
553 spin_lock_irq(&md->lock); 934 spin_lock_irq(&md->lock);
554 while (ret) 935 while (ret)
555 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 936 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
@@ -560,14 +941,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
560 941
561static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 942static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
562{ 943{
944 int ret;
945 struct mmc_blk_data *md = mq->data;
946 struct mmc_card *card = md->queue.card;
947
948 mmc_claim_host(card->host);
949 ret = mmc_blk_part_switch(card, md);
950 if (ret) {
951 ret = 0;
952 goto out;
953 }
954
563 if (req->cmd_flags & REQ_DISCARD) { 955 if (req->cmd_flags & REQ_DISCARD) {
564 if (req->cmd_flags & REQ_SECURE) 956 if (req->cmd_flags & REQ_SECURE)
565 return mmc_blk_issue_secdiscard_rq(mq, req); 957 ret = mmc_blk_issue_secdiscard_rq(mq, req);
566 else 958 else
567 return mmc_blk_issue_discard_rq(mq, req); 959 ret = mmc_blk_issue_discard_rq(mq, req);
960 } else if (req->cmd_flags & REQ_FLUSH) {
961 ret = mmc_blk_issue_flush(mq, req);
568 } else { 962 } else {
569 return mmc_blk_issue_rw_rq(mq, req); 963 ret = mmc_blk_issue_rw_rq(mq, req);
570 } 964 }
965
966out:
967 mmc_release_host(card->host);
968 return ret;
571} 969}
572 970
573static inline int mmc_blk_readonly(struct mmc_card *card) 971static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -576,7 +974,11 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
576 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 974 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
577} 975}
578 976
579static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 977static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
978 struct device *parent,
979 sector_t size,
980 bool default_ro,
981 const char *subname)
580{ 982{
581 struct mmc_blk_data *md; 983 struct mmc_blk_data *md;
582 int devidx, ret; 984 int devidx, ret;
@@ -592,6 +994,19 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
592 goto out; 994 goto out;
593 } 995 }
594 996
997 /*
998 * !subname implies we are creating main mmc_blk_data that will be
999 * associated with mmc_card with mmc_set_drvdata. Due to device
1000 * partitions, devidx will not coincide with a per-physical card
1001 * index anymore so we keep track of a name index.
1002 */
1003 if (!subname) {
1004 md->name_idx = find_first_zero_bit(name_use, max_devices);
1005 __set_bit(md->name_idx, name_use);
1006 }
1007 else
1008 md->name_idx = ((struct mmc_blk_data *)
1009 dev_to_disk(parent)->private_data)->name_idx;
595 1010
596 /* 1011 /*
597 * Set the read-only status based on the supported commands 1012 * Set the read-only status based on the supported commands
@@ -606,6 +1021,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
606 } 1021 }
607 1022
608 spin_lock_init(&md->lock); 1023 spin_lock_init(&md->lock);
1024 INIT_LIST_HEAD(&md->part);
609 md->usage = 1; 1025 md->usage = 1;
610 1026
611 ret = mmc_init_queue(&md->queue, card, &md->lock); 1027 ret = mmc_init_queue(&md->queue, card, &md->lock);
@@ -620,8 +1036,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
620 md->disk->fops = &mmc_bdops; 1036 md->disk->fops = &mmc_bdops;
621 md->disk->private_data = md; 1037 md->disk->private_data = md;
622 md->disk->queue = md->queue.queue; 1038 md->disk->queue = md->queue.queue;
623 md->disk->driverfs_dev = &card->dev; 1039 md->disk->driverfs_dev = parent;
624 set_disk_ro(md->disk, md->read_only); 1040 set_disk_ro(md->disk, md->read_only || default_ro);
625 1041
626 /* 1042 /*
627 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1043 * As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -636,32 +1052,107 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
636 */ 1052 */
637 1053
638 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1054 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
639 "mmcblk%d", devidx); 1055 "mmcblk%d%s", md->name_idx, subname ? subname : "");
640 1056
641 blk_queue_logical_block_size(md->queue.queue, 512); 1057 blk_queue_logical_block_size(md->queue.queue, 512);
1058 set_capacity(md->disk, size);
1059
1060 if (mmc_host_cmd23(card->host)) {
1061 if (mmc_card_mmc(card) ||
1062 (mmc_card_sd(card) &&
1063 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1064 md->flags |= MMC_BLK_CMD23;
1065 }
1066
1067 if (mmc_card_mmc(card) &&
1068 md->flags & MMC_BLK_CMD23 &&
1069 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1070 card->ext_csd.rel_sectors)) {
1071 md->flags |= MMC_BLK_REL_WR;
1072 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1073 }
1074
1075 return md;
1076
1077 err_putdisk:
1078 put_disk(md->disk);
1079 err_kfree:
1080 kfree(md);
1081 out:
1082 return ERR_PTR(ret);
1083}
1084
1085static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1086{
1087 sector_t size;
1088 struct mmc_blk_data *md;
642 1089
643 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1090 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
644 /* 1091 /*
645 * The EXT_CSD sector count is in number or 512 byte 1092 * The EXT_CSD sector count is in number or 512 byte
646 * sectors. 1093 * sectors.
647 */ 1094 */
648 set_capacity(md->disk, card->ext_csd.sectors); 1095 size = card->ext_csd.sectors;
649 } else { 1096 } else {
650 /* 1097 /*
651 * The CSD capacity field is in units of read_blkbits. 1098 * The CSD capacity field is in units of read_blkbits.
652 * set_capacity takes units of 512 bytes. 1099 * set_capacity takes units of 512 bytes.
653 */ 1100 */
654 set_capacity(md->disk, 1101 size = card->csd.capacity << (card->csd.read_blkbits - 9);
655 card->csd.capacity << (card->csd.read_blkbits - 9));
656 } 1102 }
1103
1104 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
657 return md; 1105 return md;
1106}
658 1107
659 err_putdisk: 1108static int mmc_blk_alloc_part(struct mmc_card *card,
660 put_disk(md->disk); 1109 struct mmc_blk_data *md,
661 err_kfree: 1110 unsigned int part_type,
662 kfree(md); 1111 sector_t size,
663 out: 1112 bool default_ro,
664 return ERR_PTR(ret); 1113 const char *subname)
1114{
1115 char cap_str[10];
1116 struct mmc_blk_data *part_md;
1117
1118 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1119 subname);
1120 if (IS_ERR(part_md))
1121 return PTR_ERR(part_md);
1122 part_md->part_type = part_type;
1123 list_add(&part_md->part, &md->part);
1124
1125 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1126 cap_str, sizeof(cap_str));
1127 printk(KERN_INFO "%s: %s %s partition %u %s\n",
1128 part_md->disk->disk_name, mmc_card_id(card),
1129 mmc_card_name(card), part_md->part_type, cap_str);
1130 return 0;
1131}
1132
1133static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1134{
1135 int ret = 0;
1136
1137 if (!mmc_card_mmc(card))
1138 return 0;
1139
1140 if (card->ext_csd.boot_size) {
1141 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
1142 card->ext_csd.boot_size >> 9,
1143 true,
1144 "boot0");
1145 if (ret)
1146 return ret;
1147 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
1148 card->ext_csd.boot_size >> 9,
1149 true,
1150 "boot1");
1151 if (ret)
1152 return ret;
1153 }
1154
1155 return ret;
665} 1156}
666 1157
667static int 1158static int
@@ -682,9 +1173,81 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
682 return 0; 1173 return 0;
683} 1174}
684 1175
1176static void mmc_blk_remove_req(struct mmc_blk_data *md)
1177{
1178 if (md) {
1179 if (md->disk->flags & GENHD_FL_UP) {
1180 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1181
1182 /* Stop new requests from getting into the queue */
1183 del_gendisk(md->disk);
1184 }
1185
1186 /* Then flush out any already in there */
1187 mmc_cleanup_queue(&md->queue);
1188 mmc_blk_put(md);
1189 }
1190}
1191
1192static void mmc_blk_remove_parts(struct mmc_card *card,
1193 struct mmc_blk_data *md)
1194{
1195 struct list_head *pos, *q;
1196 struct mmc_blk_data *part_md;
1197
1198 __clear_bit(md->name_idx, name_use);
1199 list_for_each_safe(pos, q, &md->part) {
1200 part_md = list_entry(pos, struct mmc_blk_data, part);
1201 list_del(pos);
1202 mmc_blk_remove_req(part_md);
1203 }
1204}
1205
1206static int mmc_add_disk(struct mmc_blk_data *md)
1207{
1208 int ret;
1209
1210 add_disk(md->disk);
1211 md->force_ro.show = force_ro_show;
1212 md->force_ro.store = force_ro_store;
1213 sysfs_attr_init(&md->force_ro.attr);
1214 md->force_ro.attr.name = "force_ro";
1215 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1216 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1217 if (ret)
1218 del_gendisk(md->disk);
1219
1220 return ret;
1221}
1222
1223static const struct mmc_fixup blk_fixups[] =
1224{
1225 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1226 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1227 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1228 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1229 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
1230
1231 /*
1232 * Some MMC cards experience performance degradation with CMD23
1233 * instead of CMD12-bounded multiblock transfers. For now we'll
1234 * black list what's bad...
1235 * - Certain Toshiba cards.
1236 *
1237 * N.B. This doesn't affect SD cards.
1238 */
1239 MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1240 MMC_QUIRK_BLK_NO_CMD23),
1241 MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1242 MMC_QUIRK_BLK_NO_CMD23),
1243 MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
1244 MMC_QUIRK_BLK_NO_CMD23),
1245 END_FIXUP
1246};
1247
685static int mmc_blk_probe(struct mmc_card *card) 1248static int mmc_blk_probe(struct mmc_card *card)
686{ 1249{
687 struct mmc_blk_data *md; 1250 struct mmc_blk_data *md, *part_md;
688 int err; 1251 int err;
689 char cap_str[10]; 1252 char cap_str[10];
690 1253
@@ -708,14 +1271,24 @@ static int mmc_blk_probe(struct mmc_card *card)
708 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 1271 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
709 cap_str, md->read_only ? "(ro)" : ""); 1272 cap_str, md->read_only ? "(ro)" : "");
710 1273
1274 if (mmc_blk_alloc_parts(card, md))
1275 goto out;
1276
711 mmc_set_drvdata(card, md); 1277 mmc_set_drvdata(card, md);
712 add_disk(md->disk); 1278 mmc_fixup_device(card, blk_fixups);
1279
1280 if (mmc_add_disk(md))
1281 goto out;
1282
1283 list_for_each_entry(part_md, &md->part, part) {
1284 if (mmc_add_disk(part_md))
1285 goto out;
1286 }
713 return 0; 1287 return 0;
714 1288
715 out: 1289 out:
716 mmc_cleanup_queue(&md->queue); 1290 mmc_blk_remove_parts(card, md);
717 mmc_blk_put(md); 1291 mmc_blk_remove_req(md);
718
719 return err; 1292 return err;
720} 1293}
721 1294
@@ -723,36 +1296,43 @@ static void mmc_blk_remove(struct mmc_card *card)
723{ 1296{
724 struct mmc_blk_data *md = mmc_get_drvdata(card); 1297 struct mmc_blk_data *md = mmc_get_drvdata(card);
725 1298
726 if (md) { 1299 mmc_blk_remove_parts(card, md);
727 /* Stop new requests from getting into the queue */ 1300 mmc_blk_remove_req(md);
728 del_gendisk(md->disk);
729
730 /* Then flush out any already in there */
731 mmc_cleanup_queue(&md->queue);
732
733 mmc_blk_put(md);
734 }
735 mmc_set_drvdata(card, NULL); 1301 mmc_set_drvdata(card, NULL);
736} 1302}
737 1303
738#ifdef CONFIG_PM 1304#ifdef CONFIG_PM
739static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) 1305static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
740{ 1306{
1307 struct mmc_blk_data *part_md;
741 struct mmc_blk_data *md = mmc_get_drvdata(card); 1308 struct mmc_blk_data *md = mmc_get_drvdata(card);
742 1309
743 if (md) { 1310 if (md) {
744 mmc_queue_suspend(&md->queue); 1311 mmc_queue_suspend(&md->queue);
1312 list_for_each_entry(part_md, &md->part, part) {
1313 mmc_queue_suspend(&part_md->queue);
1314 }
745 } 1315 }
746 return 0; 1316 return 0;
747} 1317}
748 1318
749static int mmc_blk_resume(struct mmc_card *card) 1319static int mmc_blk_resume(struct mmc_card *card)
750{ 1320{
1321 struct mmc_blk_data *part_md;
751 struct mmc_blk_data *md = mmc_get_drvdata(card); 1322 struct mmc_blk_data *md = mmc_get_drvdata(card);
752 1323
753 if (md) { 1324 if (md) {
754 mmc_blk_set_blksize(md, card); 1325 mmc_blk_set_blksize(md, card);
1326
1327 /*
1328 * Resume involves the card going into idle state,
1329 * so current partition is always the main one.
1330 */
1331 md->part_curr = md->part_type;
755 mmc_queue_resume(&md->queue); 1332 mmc_queue_resume(&md->queue);
1333 list_for_each_entry(part_md, &md->part, part) {
1334 mmc_queue_resume(&part_md->queue);
1335 }
756 } 1336 }
757 return 0; 1337 return 0;
758} 1338}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index abc1a63bcc5e..233cdfae92f4 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -212,7 +212,7 @@ static int mmc_test_busy(struct mmc_command *cmd)
212static int mmc_test_wait_busy(struct mmc_test_card *test) 212static int mmc_test_wait_busy(struct mmc_test_card *test)
213{ 213{
214 int ret, busy; 214 int ret, busy;
215 struct mmc_command cmd; 215 struct mmc_command cmd = {0};
216 216
217 busy = 0; 217 busy = 0;
218 do { 218 do {
@@ -246,18 +246,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
246{ 246{
247 int ret; 247 int ret;
248 248
249 struct mmc_request mrq; 249 struct mmc_request mrq = {0};
250 struct mmc_command cmd; 250 struct mmc_command cmd = {0};
251 struct mmc_command stop; 251 struct mmc_command stop = {0};
252 struct mmc_data data; 252 struct mmc_data data = {0};
253 253
254 struct scatterlist sg; 254 struct scatterlist sg;
255 255
256 memset(&mrq, 0, sizeof(struct mmc_request));
257 memset(&cmd, 0, sizeof(struct mmc_command));
258 memset(&data, 0, sizeof(struct mmc_data));
259 memset(&stop, 0, sizeof(struct mmc_command));
260
261 mrq.cmd = &cmd; 256 mrq.cmd = &cmd;
262 mrq.data = &data; 257 mrq.data = &data;
263 mrq.stop = &stop; 258 mrq.stop = &stop;
@@ -731,15 +726,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
731 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, 726 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
732 unsigned blocks, unsigned blksz, int write) 727 unsigned blocks, unsigned blksz, int write)
733{ 728{
734 struct mmc_request mrq; 729 struct mmc_request mrq = {0};
735 struct mmc_command cmd; 730 struct mmc_command cmd = {0};
736 struct mmc_command stop; 731 struct mmc_command stop = {0};
737 struct mmc_data data; 732 struct mmc_data data = {0};
738
739 memset(&mrq, 0, sizeof(struct mmc_request));
740 memset(&cmd, 0, sizeof(struct mmc_command));
741 memset(&data, 0, sizeof(struct mmc_data));
742 memset(&stop, 0, sizeof(struct mmc_command));
743 733
744 mrq.cmd = &cmd; 734 mrq.cmd = &cmd;
745 mrq.data = &data; 735 mrq.data = &data;
@@ -761,18 +751,13 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
761static int mmc_test_broken_transfer(struct mmc_test_card *test, 751static int mmc_test_broken_transfer(struct mmc_test_card *test,
762 unsigned blocks, unsigned blksz, int write) 752 unsigned blocks, unsigned blksz, int write)
763{ 753{
764 struct mmc_request mrq; 754 struct mmc_request mrq = {0};
765 struct mmc_command cmd; 755 struct mmc_command cmd = {0};
766 struct mmc_command stop; 756 struct mmc_command stop = {0};
767 struct mmc_data data; 757 struct mmc_data data = {0};
768 758
769 struct scatterlist sg; 759 struct scatterlist sg;
770 760
771 memset(&mrq, 0, sizeof(struct mmc_request));
772 memset(&cmd, 0, sizeof(struct mmc_command));
773 memset(&data, 0, sizeof(struct mmc_data));
774 memset(&stop, 0, sizeof(struct mmc_command));
775
776 mrq.cmd = &cmd; 761 mrq.cmd = &cmd;
777 mrq.data = &data; 762 mrq.data = &data;
778 mrq.stop = &stop; 763 mrq.stop = &stop;
@@ -1401,8 +1386,9 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1401 */ 1386 */
1402static int mmc_test_area_fill(struct mmc_test_card *test) 1387static int mmc_test_area_fill(struct mmc_test_card *test)
1403{ 1388{
1404 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr, 1389 struct mmc_test_area *t = &test->area;
1405 1, 0, 0); 1390
1391 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1406} 1392}
1407 1393
1408/* 1394/*
@@ -1415,7 +1401,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)
1415 if (!mmc_can_erase(test->card)) 1401 if (!mmc_can_erase(test->card))
1416 return 0; 1402 return 0;
1417 1403
1418 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9, 1404 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1419 MMC_ERASE_ARG); 1405 MMC_ERASE_ARG);
1420} 1406}
1421 1407
@@ -1542,8 +1528,10 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1542static int mmc_test_best_performance(struct mmc_test_card *test, int write, 1528static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1543 int max_scatter) 1529 int max_scatter)
1544{ 1530{
1545 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr, 1531 struct mmc_test_area *t = &test->area;
1546 write, max_scatter, 1); 1532
1533 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1534 max_scatter, 1);
1547} 1535}
1548 1536
1549/* 1537/*
@@ -1583,18 +1571,19 @@ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1583 */ 1571 */
1584static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1572static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1585{ 1573{
1574 struct mmc_test_area *t = &test->area;
1586 unsigned long sz; 1575 unsigned long sz;
1587 unsigned int dev_addr; 1576 unsigned int dev_addr;
1588 int ret; 1577 int ret;
1589 1578
1590 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1579 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1591 dev_addr = test->area.dev_addr + (sz >> 9); 1580 dev_addr = t->dev_addr + (sz >> 9);
1592 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1581 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1593 if (ret) 1582 if (ret)
1594 return ret; 1583 return ret;
1595 } 1584 }
1596 sz = test->area.max_tfr; 1585 sz = t->max_tfr;
1597 dev_addr = test->area.dev_addr; 1586 dev_addr = t->dev_addr;
1598 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); 1587 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1599} 1588}
1600 1589
@@ -1603,6 +1592,7 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1603 */ 1592 */
1604static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1593static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1605{ 1594{
1595 struct mmc_test_area *t = &test->area;
1606 unsigned long sz; 1596 unsigned long sz;
1607 unsigned int dev_addr; 1597 unsigned int dev_addr;
1608 int ret; 1598 int ret;
@@ -1610,8 +1600,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1610 ret = mmc_test_area_erase(test); 1600 ret = mmc_test_area_erase(test);
1611 if (ret) 1601 if (ret)
1612 return ret; 1602 return ret;
1613 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1603 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1614 dev_addr = test->area.dev_addr + (sz >> 9); 1604 dev_addr = t->dev_addr + (sz >> 9);
1615 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1605 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1616 if (ret) 1606 if (ret)
1617 return ret; 1607 return ret;
@@ -1619,8 +1609,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1619 ret = mmc_test_area_erase(test); 1609 ret = mmc_test_area_erase(test);
1620 if (ret) 1610 if (ret)
1621 return ret; 1611 return ret;
1622 sz = test->area.max_tfr; 1612 sz = t->max_tfr;
1623 dev_addr = test->area.dev_addr; 1613 dev_addr = t->dev_addr;
1624 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); 1614 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1625} 1615}
1626 1616
@@ -1629,6 +1619,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1629 */ 1619 */
1630static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1620static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1631{ 1621{
1622 struct mmc_test_area *t = &test->area;
1632 unsigned long sz; 1623 unsigned long sz;
1633 unsigned int dev_addr; 1624 unsigned int dev_addr;
1634 struct timespec ts1, ts2; 1625 struct timespec ts1, ts2;
@@ -1640,8 +1631,8 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1640 if (!mmc_can_erase(test->card)) 1631 if (!mmc_can_erase(test->card))
1641 return RESULT_UNSUP_HOST; 1632 return RESULT_UNSUP_HOST;
1642 1633
1643 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { 1634 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1644 dev_addr = test->area.dev_addr + (sz >> 9); 1635 dev_addr = t->dev_addr + (sz >> 9);
1645 getnstimeofday(&ts1); 1636 getnstimeofday(&ts1);
1646 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1637 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1647 if (ret) 1638 if (ret)
@@ -1649,7 +1640,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1649 getnstimeofday(&ts2); 1640 getnstimeofday(&ts2);
1650 mmc_test_print_rate(test, sz, &ts1, &ts2); 1641 mmc_test_print_rate(test, sz, &ts1, &ts2);
1651 } 1642 }
1652 dev_addr = test->area.dev_addr; 1643 dev_addr = t->dev_addr;
1653 getnstimeofday(&ts1); 1644 getnstimeofday(&ts1);
1654 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); 1645 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1655 if (ret) 1646 if (ret)
@@ -1661,12 +1652,13 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1661 1652
1662static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) 1653static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1663{ 1654{
1655 struct mmc_test_area *t = &test->area;
1664 unsigned int dev_addr, i, cnt; 1656 unsigned int dev_addr, i, cnt;
1665 struct timespec ts1, ts2; 1657 struct timespec ts1, ts2;
1666 int ret; 1658 int ret;
1667 1659
1668 cnt = test->area.max_sz / sz; 1660 cnt = t->max_sz / sz;
1669 dev_addr = test->area.dev_addr; 1661 dev_addr = t->dev_addr;
1670 getnstimeofday(&ts1); 1662 getnstimeofday(&ts1);
1671 for (i = 0; i < cnt; i++) { 1663 for (i = 0; i < cnt; i++) {
1672 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); 1664 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
@@ -1684,20 +1676,22 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1684 */ 1676 */
1685static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1677static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1686{ 1678{
1679 struct mmc_test_area *t = &test->area;
1687 unsigned long sz; 1680 unsigned long sz;
1688 int ret; 1681 int ret;
1689 1682
1690 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1683 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1691 ret = mmc_test_seq_read_perf(test, sz); 1684 ret = mmc_test_seq_read_perf(test, sz);
1692 if (ret) 1685 if (ret)
1693 return ret; 1686 return ret;
1694 } 1687 }
1695 sz = test->area.max_tfr; 1688 sz = t->max_tfr;
1696 return mmc_test_seq_read_perf(test, sz); 1689 return mmc_test_seq_read_perf(test, sz);
1697} 1690}
1698 1691
1699static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) 1692static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1700{ 1693{
1694 struct mmc_test_area *t = &test->area;
1701 unsigned int dev_addr, i, cnt; 1695 unsigned int dev_addr, i, cnt;
1702 struct timespec ts1, ts2; 1696 struct timespec ts1, ts2;
1703 int ret; 1697 int ret;
@@ -1705,8 +1699,8 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1705 ret = mmc_test_area_erase(test); 1699 ret = mmc_test_area_erase(test);
1706 if (ret) 1700 if (ret)
1707 return ret; 1701 return ret;
1708 cnt = test->area.max_sz / sz; 1702 cnt = t->max_sz / sz;
1709 dev_addr = test->area.dev_addr; 1703 dev_addr = t->dev_addr;
1710 getnstimeofday(&ts1); 1704 getnstimeofday(&ts1);
1711 for (i = 0; i < cnt; i++) { 1705 for (i = 0; i < cnt; i++) {
1712 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); 1706 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
@@ -1724,15 +1718,16 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1724 */ 1718 */
1725static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1719static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1726{ 1720{
1721 struct mmc_test_area *t = &test->area;
1727 unsigned long sz; 1722 unsigned long sz;
1728 int ret; 1723 int ret;
1729 1724
1730 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1725 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1731 ret = mmc_test_seq_write_perf(test, sz); 1726 ret = mmc_test_seq_write_perf(test, sz);
1732 if (ret) 1727 if (ret)
1733 return ret; 1728 return ret;
1734 } 1729 }
1735 sz = test->area.max_tfr; 1730 sz = t->max_tfr;
1736 return mmc_test_seq_write_perf(test, sz); 1731 return mmc_test_seq_write_perf(test, sz);
1737} 1732}
1738 1733
@@ -1741,6 +1736,7 @@ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1741 */ 1736 */
1742static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1737static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1743{ 1738{
1739 struct mmc_test_area *t = &test->area;
1744 unsigned long sz; 1740 unsigned long sz;
1745 unsigned int dev_addr, i, cnt; 1741 unsigned int dev_addr, i, cnt;
1746 struct timespec ts1, ts2; 1742 struct timespec ts1, ts2;
@@ -1752,15 +1748,15 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1752 if (!mmc_can_erase(test->card)) 1748 if (!mmc_can_erase(test->card))
1753 return RESULT_UNSUP_HOST; 1749 return RESULT_UNSUP_HOST;
1754 1750
1755 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { 1751 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1756 ret = mmc_test_area_erase(test); 1752 ret = mmc_test_area_erase(test);
1757 if (ret) 1753 if (ret)
1758 return ret; 1754 return ret;
1759 ret = mmc_test_area_fill(test); 1755 ret = mmc_test_area_fill(test);
1760 if (ret) 1756 if (ret)
1761 return ret; 1757 return ret;
1762 cnt = test->area.max_sz / sz; 1758 cnt = t->max_sz / sz;
1763 dev_addr = test->area.dev_addr; 1759 dev_addr = t->dev_addr;
1764 getnstimeofday(&ts1); 1760 getnstimeofday(&ts1);
1765 for (i = 0; i < cnt; i++) { 1761 for (i = 0; i < cnt; i++) {
1766 ret = mmc_erase(test->card, dev_addr, sz >> 9, 1762 ret = mmc_erase(test->card, dev_addr, sz >> 9,
@@ -1823,11 +1819,12 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1823 1819
1824static int mmc_test_random_perf(struct mmc_test_card *test, int write) 1820static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1825{ 1821{
1822 struct mmc_test_area *t = &test->area;
1826 unsigned int next; 1823 unsigned int next;
1827 unsigned long sz; 1824 unsigned long sz;
1828 int ret; 1825 int ret;
1829 1826
1830 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { 1827 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1831 /* 1828 /*
1832 * When writing, try to get more consistent results by running 1829 * When writing, try to get more consistent results by running
1833 * the test twice with exactly the same I/O but outputting the 1830 * the test twice with exactly the same I/O but outputting the
@@ -1844,7 +1841,7 @@ static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1844 if (ret) 1841 if (ret)
1845 return ret; 1842 return ret;
1846 } 1843 }
1847 sz = test->area.max_tfr; 1844 sz = t->max_tfr;
1848 if (write) { 1845 if (write) {
1849 next = rnd_next; 1846 next = rnd_next;
1850 ret = mmc_test_rnd_perf(test, write, 0, sz); 1847 ret = mmc_test_rnd_perf(test, write, 0, sz);
@@ -1874,17 +1871,18 @@ static int mmc_test_random_write_perf(struct mmc_test_card *test)
1874static int mmc_test_seq_perf(struct mmc_test_card *test, int write, 1871static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1875 unsigned int tot_sz, int max_scatter) 1872 unsigned int tot_sz, int max_scatter)
1876{ 1873{
1874 struct mmc_test_area *t = &test->area;
1877 unsigned int dev_addr, i, cnt, sz, ssz; 1875 unsigned int dev_addr, i, cnt, sz, ssz;
1878 struct timespec ts1, ts2; 1876 struct timespec ts1, ts2;
1879 int ret; 1877 int ret;
1880 1878
1881 sz = test->area.max_tfr; 1879 sz = t->max_tfr;
1880
1882 /* 1881 /*
1883 * In the case of a maximally scattered transfer, the maximum transfer 1882 * In the case of a maximally scattered transfer, the maximum transfer
1884 * size is further limited by using PAGE_SIZE segments. 1883 * size is further limited by using PAGE_SIZE segments.
1885 */ 1884 */
1886 if (max_scatter) { 1885 if (max_scatter) {
1887 struct mmc_test_area *t = &test->area;
1888 unsigned long max_tfr; 1886 unsigned long max_tfr;
1889 1887
1890 if (t->max_seg_sz >= PAGE_SIZE) 1888 if (t->max_seg_sz >= PAGE_SIZE)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 2ae727568df9..c07322c2658c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -343,18 +343,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
343 */ 343 */
344void mmc_queue_bounce_pre(struct mmc_queue *mq) 344void mmc_queue_bounce_pre(struct mmc_queue *mq)
345{ 345{
346 unsigned long flags;
347
348 if (!mq->bounce_buf) 346 if (!mq->bounce_buf)
349 return; 347 return;
350 348
351 if (rq_data_dir(mq->req) != WRITE) 349 if (rq_data_dir(mq->req) != WRITE)
352 return; 350 return;
353 351
354 local_irq_save(flags);
355 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 352 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
356 mq->bounce_buf, mq->sg[0].length); 353 mq->bounce_buf, mq->sg[0].length);
357 local_irq_restore(flags);
358} 354}
359 355
360/* 356/*
@@ -363,17 +359,13 @@ void mmc_queue_bounce_pre(struct mmc_queue *mq)
363 */ 359 */
364void mmc_queue_bounce_post(struct mmc_queue *mq) 360void mmc_queue_bounce_post(struct mmc_queue *mq)
365{ 361{
366 unsigned long flags;
367
368 if (!mq->bounce_buf) 362 if (!mq->bounce_buf)
369 return; 363 return;
370 364
371 if (rq_data_dir(mq->req) != READ) 365 if (rq_data_dir(mq->req) != READ)
372 return; 366 return;
373 367
374 local_irq_save(flags);
375 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 368 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
376 mq->bounce_buf, mq->sg[0].length); 369 mq->bounce_buf, mq->sg[0].length);
377 local_irq_restore(flags);
378} 370}
379 371
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index d6d62fd07ee9..393d817ed040 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -274,8 +274,12 @@ int mmc_add_card(struct mmc_card *card)
274 break; 274 break;
275 case MMC_TYPE_SD: 275 case MMC_TYPE_SD:
276 type = "SD"; 276 type = "SD";
277 if (mmc_card_blockaddr(card)) 277 if (mmc_card_blockaddr(card)) {
278 type = "SDHC"; 278 if (mmc_card_ext_capacity(card))
279 type = "SDXC";
280 else
281 type = "SDHC";
282 }
279 break; 283 break;
280 case MMC_TYPE_SDIO: 284 case MMC_TYPE_SDIO:
281 type = "SDIO"; 285 type = "SDIO";
@@ -299,7 +303,8 @@ int mmc_add_card(struct mmc_card *card)
299 } else { 303 } else {
300 printk(KERN_INFO "%s: new %s%s%s card at address %04x\n", 304 printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
301 mmc_hostname(card->host), 305 mmc_hostname(card->host),
302 mmc_card_highspeed(card) ? "high speed " : "", 306 mmc_sd_card_uhs(card) ? "ultra high speed " :
307 (mmc_card_highspeed(card) ? "high speed " : ""),
303 mmc_card_ddr_mode(card) ? "DDR " : "", 308 mmc_card_ddr_mode(card) ? "DDR " : "",
304 type, card->rca); 309 type, card->rca);
305 } 310 }
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1f453acc8682..68091dda3f31 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -236,12 +236,10 @@ EXPORT_SYMBOL(mmc_wait_for_req);
236 */ 236 */
237int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) 237int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
238{ 238{
239 struct mmc_request mrq; 239 struct mmc_request mrq = {0};
240 240
241 WARN_ON(!host->claimed); 241 WARN_ON(!host->claimed);
242 242
243 memset(&mrq, 0, sizeof(struct mmc_request));
244
245 memset(cmd->resp, 0, sizeof(cmd->resp)); 243 memset(cmd->resp, 0, sizeof(cmd->resp));
246 cmd->retries = retries; 244 cmd->retries = retries;
247 245
@@ -720,22 +718,12 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
720} 718}
721 719
722/* 720/*
723 * Change data bus width and DDR mode of a host.
724 */
725void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
726 unsigned int ddr)
727{
728 host->ios.bus_width = width;
729 host->ios.ddr = ddr;
730 mmc_set_ios(host);
731}
732
733/*
734 * Change data bus width of a host. 721 * Change data bus width of a host.
735 */ 722 */
736void mmc_set_bus_width(struct mmc_host *host, unsigned int width) 723void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
737{ 724{
738 mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE); 725 host->ios.bus_width = width;
726 mmc_set_ios(host);
739} 727}
740 728
741/** 729/**
@@ -944,6 +932,38 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
944 return ocr; 932 return ocr;
945} 933}
946 934
935int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
936{
937 struct mmc_command cmd = {0};
938 int err = 0;
939
940 BUG_ON(!host);
941
942 /*
943 * Send CMD11 only if the request is to switch the card to
944 * 1.8V signalling.
945 */
946 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
947 cmd.opcode = SD_SWITCH_VOLTAGE;
948 cmd.arg = 0;
949 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
950
951 err = mmc_wait_for_cmd(host, &cmd, 0);
952 if (err)
953 return err;
954
955 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
956 return -EIO;
957 }
958
959 host->ios.signal_voltage = signal_voltage;
960
961 if (host->ops->start_signal_voltage_switch)
962 err = host->ops->start_signal_voltage_switch(host, &host->ios);
963
964 return err;
965}
966
947/* 967/*
948 * Select timing parameters for host. 968 * Select timing parameters for host.
949 */ 969 */
@@ -954,6 +974,15 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
954} 974}
955 975
956/* 976/*
977 * Select appropriate driver type for host.
978 */
979void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
980{
981 host->ios.drv_type = drv_type;
982 mmc_set_ios(host);
983}
984
985/*
957 * Apply power to the MMC stack. This is a two-stage process. 986 * Apply power to the MMC stack. This is a two-stage process.
958 * First, we enable power to the card without the clock running. 987 * First, we enable power to the card without the clock running.
959 * We then wait a bit for the power to stabilise. Finally, 988 * We then wait a bit for the power to stabilise. Finally,
@@ -1187,9 +1216,8 @@ void mmc_init_erase(struct mmc_card *card)
1187 } 1216 }
1188} 1217}
1189 1218
1190static void mmc_set_mmc_erase_timeout(struct mmc_card *card, 1219static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1191 struct mmc_command *cmd, 1220 unsigned int arg, unsigned int qty)
1192 unsigned int arg, unsigned int qty)
1193{ 1221{
1194 unsigned int erase_timeout; 1222 unsigned int erase_timeout;
1195 1223
@@ -1246,44 +1274,48 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card,
1246 if (mmc_host_is_spi(card->host) && erase_timeout < 1000) 1274 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1247 erase_timeout = 1000; 1275 erase_timeout = 1000;
1248 1276
1249 cmd->erase_timeout = erase_timeout; 1277 return erase_timeout;
1250} 1278}
1251 1279
1252static void mmc_set_sd_erase_timeout(struct mmc_card *card, 1280static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1253 struct mmc_command *cmd, unsigned int arg, 1281 unsigned int arg,
1254 unsigned int qty) 1282 unsigned int qty)
1255{ 1283{
1284 unsigned int erase_timeout;
1285
1256 if (card->ssr.erase_timeout) { 1286 if (card->ssr.erase_timeout) {
1257 /* Erase timeout specified in SD Status Register (SSR) */ 1287 /* Erase timeout specified in SD Status Register (SSR) */
1258 cmd->erase_timeout = card->ssr.erase_timeout * qty + 1288 erase_timeout = card->ssr.erase_timeout * qty +
1259 card->ssr.erase_offset; 1289 card->ssr.erase_offset;
1260 } else { 1290 } else {
1261 /* 1291 /*
1262 * Erase timeout not specified in SD Status Register (SSR) so 1292 * Erase timeout not specified in SD Status Register (SSR) so
1263 * use 250ms per write block. 1293 * use 250ms per write block.
1264 */ 1294 */
1265 cmd->erase_timeout = 250 * qty; 1295 erase_timeout = 250 * qty;
1266 } 1296 }
1267 1297
1268 /* Must not be less than 1 second */ 1298 /* Must not be less than 1 second */
1269 if (cmd->erase_timeout < 1000) 1299 if (erase_timeout < 1000)
1270 cmd->erase_timeout = 1000; 1300 erase_timeout = 1000;
1301
1302 return erase_timeout;
1271} 1303}
1272 1304
1273static void mmc_set_erase_timeout(struct mmc_card *card, 1305static unsigned int mmc_erase_timeout(struct mmc_card *card,
1274 struct mmc_command *cmd, unsigned int arg, 1306 unsigned int arg,
1275 unsigned int qty) 1307 unsigned int qty)
1276{ 1308{
1277 if (mmc_card_sd(card)) 1309 if (mmc_card_sd(card))
1278 mmc_set_sd_erase_timeout(card, cmd, arg, qty); 1310 return mmc_sd_erase_timeout(card, arg, qty);
1279 else 1311 else
1280 mmc_set_mmc_erase_timeout(card, cmd, arg, qty); 1312 return mmc_mmc_erase_timeout(card, arg, qty);
1281} 1313}
1282 1314
1283static int mmc_do_erase(struct mmc_card *card, unsigned int from, 1315static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1284 unsigned int to, unsigned int arg) 1316 unsigned int to, unsigned int arg)
1285{ 1317{
1286 struct mmc_command cmd; 1318 struct mmc_command cmd = {0};
1287 unsigned int qty = 0; 1319 unsigned int qty = 0;
1288 int err; 1320 int err;
1289 1321
@@ -1317,7 +1349,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1317 to <<= 9; 1349 to <<= 9;
1318 } 1350 }
1319 1351
1320 memset(&cmd, 0, sizeof(struct mmc_command));
1321 if (mmc_card_sd(card)) 1352 if (mmc_card_sd(card))
1322 cmd.opcode = SD_ERASE_WR_BLK_START; 1353 cmd.opcode = SD_ERASE_WR_BLK_START;
1323 else 1354 else
@@ -1351,7 +1382,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1351 cmd.opcode = MMC_ERASE; 1382 cmd.opcode = MMC_ERASE;
1352 cmd.arg = arg; 1383 cmd.arg = arg;
1353 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1384 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1354 mmc_set_erase_timeout(card, &cmd, arg, qty); 1385 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1355 err = mmc_wait_for_cmd(card->host, &cmd, 0); 1386 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1356 if (err) { 1387 if (err) {
1357 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", 1388 printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n",
@@ -1487,12 +1518,11 @@ EXPORT_SYMBOL(mmc_erase_group_aligned);
1487 1518
1488int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) 1519int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1489{ 1520{
1490 struct mmc_command cmd; 1521 struct mmc_command cmd = {0};
1491 1522
1492 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) 1523 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1493 return 0; 1524 return 0;
1494 1525
1495 memset(&cmd, 0, sizeof(struct mmc_command));
1496 cmd.opcode = MMC_SET_BLOCKLEN; 1526 cmd.opcode = MMC_SET_BLOCKLEN;
1497 cmd.arg = blocklen; 1527 cmd.arg = blocklen;
1498 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1528 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -1578,7 +1608,7 @@ void mmc_rescan(struct work_struct *work)
1578 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1608 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1579 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) 1609 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1580 break; 1610 break;
1581 if (freqs[i] < host->f_min) 1611 if (freqs[i] <= host->f_min)
1582 break; 1612 break;
1583 } 1613 }
1584 mmc_release_host(host); 1614 mmc_release_host(host);
@@ -1746,7 +1776,7 @@ int mmc_suspend_host(struct mmc_host *host)
1746 } 1776 }
1747 mmc_bus_put(host); 1777 mmc_bus_put(host);
1748 1778
1749 if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) 1779 if (!err && !mmc_card_keep_power(host))
1750 mmc_power_off(host); 1780 mmc_power_off(host);
1751 1781
1752 return err; 1782 return err;
@@ -1764,7 +1794,7 @@ int mmc_resume_host(struct mmc_host *host)
1764 1794
1765 mmc_bus_get(host); 1795 mmc_bus_get(host);
1766 if (host->bus_ops && !host->bus_dead) { 1796 if (host->bus_ops && !host->bus_dead) {
1767 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1797 if (!mmc_card_keep_power(host)) {
1768 mmc_power_up(host); 1798 mmc_power_up(host);
1769 mmc_select_voltage(host, host->ocr); 1799 mmc_select_voltage(host, host->ocr);
1770 /* 1800 /*
@@ -1789,6 +1819,7 @@ int mmc_resume_host(struct mmc_host *host)
1789 err = 0; 1819 err = 0;
1790 } 1820 }
1791 } 1821 }
1822 host->pm_flags &= ~MMC_PM_KEEP_POWER;
1792 mmc_bus_put(host); 1823 mmc_bus_put(host);
1793 1824
1794 return err; 1825 return err;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 20b1c0831eac..d9411ed2a39b 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -38,10 +38,11 @@ void mmc_ungate_clock(struct mmc_host *host);
38void mmc_set_ungated(struct mmc_host *host); 38void mmc_set_ungated(struct mmc_host *host);
39void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); 39void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
40void mmc_set_bus_width(struct mmc_host *host, unsigned int width); 40void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
41void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
42 unsigned int ddr);
43u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); 41u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
42int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage,
43 bool cmd11);
44void mmc_set_timing(struct mmc_host *host, unsigned int timing); 44void mmc_set_timing(struct mmc_host *host, unsigned int timing);
45void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
45 46
46static inline void mmc_delay(unsigned int ms) 47static inline void mmc_delay(unsigned int ms)
47{ 48{
@@ -61,8 +62,6 @@ int mmc_attach_mmc(struct mmc_host *host);
61int mmc_attach_sd(struct mmc_host *host); 62int mmc_attach_sd(struct mmc_host *host);
62int mmc_attach_sdio(struct mmc_host *host); 63int mmc_attach_sdio(struct mmc_host *host);
63 64
64void mmc_fixup_device(struct mmc_card *card);
65
66/* Module parameters */ 65/* Module parameters */
67extern int use_spi_crc; 66extern int use_spi_crc;
68 67
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 461e6a17fb90..b29d3e8fd3a2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -325,12 +325,12 @@ int mmc_add_host(struct mmc_host *host)
325 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && 325 WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
326 !host->ops->enable_sdio_irq); 326 !host->ops->enable_sdio_irq);
327 327
328 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
329
330 err = device_add(&host->class_dev); 328 err = device_add(&host->class_dev);
331 if (err) 329 if (err)
332 return err; 330 return err;
333 331
332 led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
333
334#ifdef CONFIG_DEBUG_FS 334#ifdef CONFIG_DEBUG_FS
335 mmc_add_host_debugfs(host); 335 mmc_add_host_debugfs(host);
336#endif 336#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 772d0d0a541b..2a7e43bc796d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -20,6 +20,7 @@
20#include "core.h" 20#include "core.h"
21#include "bus.h" 21#include "bus.h"
22#include "mmc_ops.h" 22#include "mmc_ops.h"
23#include "sd_ops.h"
23 24
24static const unsigned int tran_exp[] = { 25static const unsigned int tran_exp[] = {
25 10000, 100000, 1000000, 10000000, 26 10000, 100000, 1000000, 10000000,
@@ -173,14 +174,17 @@ static int mmc_decode_csd(struct mmc_card *card)
173} 174}
174 175
175/* 176/*
176 * Read and decode extended CSD. 177 * Read extended CSD.
177 */ 178 */
178static int mmc_read_ext_csd(struct mmc_card *card) 179static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
179{ 180{
180 int err; 181 int err;
181 u8 *ext_csd; 182 u8 *ext_csd;
182 183
183 BUG_ON(!card); 184 BUG_ON(!card);
185 BUG_ON(!new_ext_csd);
186
187 *new_ext_csd = NULL;
184 188
185 if (card->csd.mmca_vsn < CSD_SPEC_VER_4) 189 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
186 return 0; 190 return 0;
@@ -198,12 +202,15 @@ static int mmc_read_ext_csd(struct mmc_card *card)
198 202
199 err = mmc_send_ext_csd(card, ext_csd); 203 err = mmc_send_ext_csd(card, ext_csd);
200 if (err) { 204 if (err) {
205 kfree(ext_csd);
206 *new_ext_csd = NULL;
207
201 /* If the host or the card can't do the switch, 208 /* If the host or the card can't do the switch,
202 * fail more gracefully. */ 209 * fail more gracefully. */
203 if ((err != -EINVAL) 210 if ((err != -EINVAL)
204 && (err != -ENOSYS) 211 && (err != -ENOSYS)
205 && (err != -EFAULT)) 212 && (err != -EFAULT))
206 goto out; 213 return err;
207 214
208 /* 215 /*
209 * High capacity cards should have this "magic" size 216 * High capacity cards should have this "magic" size
@@ -221,9 +228,23 @@ static int mmc_read_ext_csd(struct mmc_card *card)
221 mmc_hostname(card->host)); 228 mmc_hostname(card->host));
222 err = 0; 229 err = 0;
223 } 230 }
231 } else
232 *new_ext_csd = ext_csd;
224 233
225 goto out; 234 return err;
226 } 235}
236
237/*
238 * Decode extended CSD.
239 */
240static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
241{
242 int err = 0;
243
244 BUG_ON(!card);
245
246 if (!ext_csd)
247 return 0;
227 248
228 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ 249 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
229 if (card->csd.structure == 3) { 250 if (card->csd.structure == 3) {
@@ -288,6 +309,10 @@ static int mmc_read_ext_csd(struct mmc_card *card)
288 309
289 if (card->ext_csd.rev >= 3) { 310 if (card->ext_csd.rev >= 3) {
290 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; 311 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
312 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
313
314 /* EXT_CSD value is in units of 10ms, but we store in ms */
315 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
291 316
292 /* Sleep / awake timeout in 100ns units */ 317 /* Sleep / awake timeout in 100ns units */
293 if (sa_shift > 0 && sa_shift <= 0x17) 318 if (sa_shift > 0 && sa_shift <= 0x17)
@@ -299,6 +324,14 @@ static int mmc_read_ext_csd(struct mmc_card *card)
299 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; 324 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
300 card->ext_csd.hc_erase_size = 325 card->ext_csd.hc_erase_size =
301 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; 326 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
327
328 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
329
330 /*
331 * There are two boot regions of equal size, defined in
332 * multiples of 128K.
333 */
334 card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
302 } 335 }
303 336
304 if (card->ext_csd.rev >= 4) { 337 if (card->ext_csd.rev >= 4) {
@@ -350,14 +383,78 @@ static int mmc_read_ext_csd(struct mmc_card *card)
350 ext_csd[EXT_CSD_TRIM_MULT]; 383 ext_csd[EXT_CSD_TRIM_MULT];
351 } 384 }
352 385
386 if (card->ext_csd.rev >= 5)
387 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
388
353 if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) 389 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
354 card->erased_byte = 0xFF; 390 card->erased_byte = 0xFF;
355 else 391 else
356 card->erased_byte = 0x0; 392 card->erased_byte = 0x0;
357 393
358out: 394out:
395 return err;
396}
397
398static inline void mmc_free_ext_csd(u8 *ext_csd)
399{
359 kfree(ext_csd); 400 kfree(ext_csd);
401}
402
403
404static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
405 unsigned bus_width)
406{
407 u8 *bw_ext_csd;
408 int err;
409
410 err = mmc_get_ext_csd(card, &bw_ext_csd);
411 if (err)
412 return err;
413
414 if ((ext_csd == NULL || bw_ext_csd == NULL)) {
415 if (bus_width != MMC_BUS_WIDTH_1)
416 err = -EINVAL;
417 goto out;
418 }
360 419
420 if (bus_width == MMC_BUS_WIDTH_1)
421 goto out;
422
423 /* only compare read only fields */
424 err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
425 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
426 (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
427 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
428 (ext_csd[EXT_CSD_REV] ==
429 bw_ext_csd[EXT_CSD_REV]) &&
430 (ext_csd[EXT_CSD_STRUCTURE] ==
431 bw_ext_csd[EXT_CSD_STRUCTURE]) &&
432 (ext_csd[EXT_CSD_CARD_TYPE] ==
433 bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
434 (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
435 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
436 (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
437 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
438 (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
439 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
440 (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
441 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
442 (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
443 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
444 (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
445 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
446 (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
447 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
448 (ext_csd[EXT_CSD_TRIM_MULT] ==
449 bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
450 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
451 &bw_ext_csd[EXT_CSD_SEC_CNT],
452 4) != 0);
453 if (err)
454 err = -EINVAL;
455
456out:
457 mmc_free_ext_csd(bw_ext_csd);
361 return err; 458 return err;
362} 459}
363 460
@@ -422,6 +519,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
422 u32 cid[4]; 519 u32 cid[4];
423 unsigned int max_dtr; 520 unsigned int max_dtr;
424 u32 rocr; 521 u32 rocr;
522 u8 *ext_csd = NULL;
425 523
426 BUG_ON(!host); 524 BUG_ON(!host);
427 WARN_ON(!host->claimed); 525 WARN_ON(!host->claimed);
@@ -520,7 +618,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
520 /* 618 /*
521 * Fetch and process extended CSD. 619 * Fetch and process extended CSD.
522 */ 620 */
523 err = mmc_read_ext_csd(card); 621
622 err = mmc_get_ext_csd(card, &ext_csd);
623 if (err)
624 goto free_card;
625 err = mmc_read_ext_csd(card, ext_csd);
524 if (err) 626 if (err)
525 goto free_card; 627 goto free_card;
526 628
@@ -542,7 +644,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
542 */ 644 */
543 if (card->ext_csd.enhanced_area_en) { 645 if (card->ext_csd.enhanced_area_en) {
544 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 646 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
545 EXT_CSD_ERASE_GROUP_DEF, 1); 647 EXT_CSD_ERASE_GROUP_DEF, 1, 0);
546 648
547 if (err && err != -EBADMSG) 649 if (err && err != -EBADMSG)
548 goto free_card; 650 goto free_card;
@@ -568,12 +670,24 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
568 } 670 }
569 671
570 /* 672 /*
673 * Ensure eMMC user default partition is enabled
674 */
675 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
676 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
677 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
678 card->ext_csd.part_config,
679 card->ext_csd.part_time);
680 if (err && err != -EBADMSG)
681 goto free_card;
682 }
683
684 /*
571 * Activate high speed (if supported) 685 * Activate high speed (if supported)
572 */ 686 */
573 if ((card->ext_csd.hs_max_dtr != 0) && 687 if ((card->ext_csd.hs_max_dtr != 0) &&
574 (host->caps & MMC_CAP_MMC_HIGHSPEED)) { 688 (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
575 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 689 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
576 EXT_CSD_HS_TIMING, 1); 690 EXT_CSD_HS_TIMING, 1, 0);
577 if (err && err != -EBADMSG) 691 if (err && err != -EBADMSG)
578 goto free_card; 692 goto free_card;
579 693
@@ -606,10 +720,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
606 */ 720 */
607 if (mmc_card_highspeed(card)) { 721 if (mmc_card_highspeed(card)) {
608 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) 722 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
609 && (host->caps & (MMC_CAP_1_8V_DDR))) 723 && ((host->caps & (MMC_CAP_1_8V_DDR |
724 MMC_CAP_UHS_DDR50))
725 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
610 ddr = MMC_1_8V_DDR_MODE; 726 ddr = MMC_1_8V_DDR_MODE;
611 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) 727 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
612 && (host->caps & (MMC_CAP_1_2V_DDR))) 728 && ((host->caps & (MMC_CAP_1_2V_DDR |
729 MMC_CAP_UHS_DDR50))
730 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
613 ddr = MMC_1_2V_DDR_MODE; 731 ddr = MMC_1_2V_DDR_MODE;
614 } 732 }
615 733
@@ -640,18 +758,22 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
640 ddr = 0; /* no DDR for 1-bit width */ 758 ddr = 0; /* no DDR for 1-bit width */
641 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 759 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
642 EXT_CSD_BUS_WIDTH, 760 EXT_CSD_BUS_WIDTH,
643 ext_csd_bits[idx][0]); 761 ext_csd_bits[idx][0],
762 0);
644 if (!err) { 763 if (!err) {
645 mmc_set_bus_width_ddr(card->host, 764 mmc_set_bus_width(card->host, bus_width);
646 bus_width, MMC_SDR_MODE); 765
647 /* 766 /*
648 * If controller can't handle bus width test, 767 * If controller can't handle bus width test,
649 * use the highest bus width to maintain 768 * compare ext_csd previously read in 1 bit mode
650 * compatibility with previous MMC behavior. 769 * against ext_csd at new bus width
651 */ 770 */
652 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) 771 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
653 break; 772 err = mmc_compare_ext_csds(card,
654 err = mmc_bus_test(card, bus_width); 773 ext_csd,
774 bus_width);
775 else
776 err = mmc_bus_test(card, bus_width);
655 if (!err) 777 if (!err)
656 break; 778 break;
657 } 779 }
@@ -659,8 +781,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
659 781
660 if (!err && ddr) { 782 if (!err && ddr) {
661 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 783 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
662 EXT_CSD_BUS_WIDTH, 784 EXT_CSD_BUS_WIDTH,
663 ext_csd_bits[idx][1]); 785 ext_csd_bits[idx][1],
786 0);
664 } 787 }
665 if (err) { 788 if (err) {
666 printk(KERN_WARNING "%s: switch to bus width %d ddr %d " 789 printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
@@ -668,20 +791,43 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
668 1 << bus_width, ddr); 791 1 << bus_width, ddr);
669 goto free_card; 792 goto free_card;
670 } else if (ddr) { 793 } else if (ddr) {
794 /*
795 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
796 * signaling.
797 *
798 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
799 *
800 * 1.8V vccq at 3.3V core voltage (vcc) is not required
801 * in the JEDEC spec for DDR.
802 *
803 * Do not force change in vccq since we are obviously
804 * working and no change to vccq is needed.
805 *
806 * WARNING: eMMC rules are NOT the same as SD DDR
807 */
808 if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
809 err = mmc_set_signal_voltage(host,
810 MMC_SIGNAL_VOLTAGE_120, 0);
811 if (err)
812 goto err;
813 }
671 mmc_card_set_ddr_mode(card); 814 mmc_card_set_ddr_mode(card);
672 mmc_set_bus_width_ddr(card->host, bus_width, ddr); 815 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
816 mmc_set_bus_width(card->host, bus_width);
673 } 817 }
674 } 818 }
675 819
676 if (!oldcard) 820 if (!oldcard)
677 host->card = card; 821 host->card = card;
678 822
823 mmc_free_ext_csd(ext_csd);
679 return 0; 824 return 0;
680 825
681free_card: 826free_card:
682 if (!oldcard) 827 if (!oldcard)
683 mmc_remove_card(card); 828 mmc_remove_card(card);
684err: 829err:
830 mmc_free_ext_csd(ext_csd);
685 831
686 return err; 832 return err;
687} 833}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index f3b22bf89cc9..845ce7c533b9 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -23,12 +23,10 @@
23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) 23static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
24{ 24{
25 int err; 25 int err;
26 struct mmc_command cmd; 26 struct mmc_command cmd = {0};
27 27
28 BUG_ON(!host); 28 BUG_ON(!host);
29 29
30 memset(&cmd, 0, sizeof(struct mmc_command));
31
32 cmd.opcode = MMC_SELECT_CARD; 30 cmd.opcode = MMC_SELECT_CARD;
33 31
34 if (card) { 32 if (card) {
@@ -60,15 +58,13 @@ int mmc_deselect_cards(struct mmc_host *host)
60 58
61int mmc_card_sleepawake(struct mmc_host *host, int sleep) 59int mmc_card_sleepawake(struct mmc_host *host, int sleep)
62{ 60{
63 struct mmc_command cmd; 61 struct mmc_command cmd = {0};
64 struct mmc_card *card = host->card; 62 struct mmc_card *card = host->card;
65 int err; 63 int err;
66 64
67 if (sleep) 65 if (sleep)
68 mmc_deselect_cards(host); 66 mmc_deselect_cards(host);
69 67
70 memset(&cmd, 0, sizeof(struct mmc_command));
71
72 cmd.opcode = MMC_SLEEP_AWAKE; 68 cmd.opcode = MMC_SLEEP_AWAKE;
73 cmd.arg = card->rca << 16; 69 cmd.arg = card->rca << 16;
74 if (sleep) 70 if (sleep)
@@ -97,7 +93,7 @@ int mmc_card_sleepawake(struct mmc_host *host, int sleep)
97int mmc_go_idle(struct mmc_host *host) 93int mmc_go_idle(struct mmc_host *host)
98{ 94{
99 int err; 95 int err;
100 struct mmc_command cmd; 96 struct mmc_command cmd = {0};
101 97
102 /* 98 /*
103 * Non-SPI hosts need to prevent chipselect going active during 99 * Non-SPI hosts need to prevent chipselect going active during
@@ -113,8 +109,6 @@ int mmc_go_idle(struct mmc_host *host)
113 mmc_delay(1); 109 mmc_delay(1);
114 } 110 }
115 111
116 memset(&cmd, 0, sizeof(struct mmc_command));
117
118 cmd.opcode = MMC_GO_IDLE_STATE; 112 cmd.opcode = MMC_GO_IDLE_STATE;
119 cmd.arg = 0; 113 cmd.arg = 0;
120 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; 114 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
@@ -135,13 +129,11 @@ int mmc_go_idle(struct mmc_host *host)
135 129
136int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 130int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
137{ 131{
138 struct mmc_command cmd; 132 struct mmc_command cmd = {0};
139 int i, err = 0; 133 int i, err = 0;
140 134
141 BUG_ON(!host); 135 BUG_ON(!host);
142 136
143 memset(&cmd, 0, sizeof(struct mmc_command));
144
145 cmd.opcode = MMC_SEND_OP_COND; 137 cmd.opcode = MMC_SEND_OP_COND;
146 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; 138 cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
147 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; 139 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
@@ -178,13 +170,11 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
178int mmc_all_send_cid(struct mmc_host *host, u32 *cid) 170int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
179{ 171{
180 int err; 172 int err;
181 struct mmc_command cmd; 173 struct mmc_command cmd = {0};
182 174
183 BUG_ON(!host); 175 BUG_ON(!host);
184 BUG_ON(!cid); 176 BUG_ON(!cid);
185 177
186 memset(&cmd, 0, sizeof(struct mmc_command));
187
188 cmd.opcode = MMC_ALL_SEND_CID; 178 cmd.opcode = MMC_ALL_SEND_CID;
189 cmd.arg = 0; 179 cmd.arg = 0;
190 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; 180 cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
@@ -201,13 +191,11 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
201int mmc_set_relative_addr(struct mmc_card *card) 191int mmc_set_relative_addr(struct mmc_card *card)
202{ 192{
203 int err; 193 int err;
204 struct mmc_command cmd; 194 struct mmc_command cmd = {0};
205 195
206 BUG_ON(!card); 196 BUG_ON(!card);
207 BUG_ON(!card->host); 197 BUG_ON(!card->host);
208 198
209 memset(&cmd, 0, sizeof(struct mmc_command));
210
211 cmd.opcode = MMC_SET_RELATIVE_ADDR; 199 cmd.opcode = MMC_SET_RELATIVE_ADDR;
212 cmd.arg = card->rca << 16; 200 cmd.arg = card->rca << 16;
213 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 201 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
@@ -223,13 +211,11 @@ static int
223mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) 211mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
224{ 212{
225 int err; 213 int err;
226 struct mmc_command cmd; 214 struct mmc_command cmd = {0};
227 215
228 BUG_ON(!host); 216 BUG_ON(!host);
229 BUG_ON(!cxd); 217 BUG_ON(!cxd);
230 218
231 memset(&cmd, 0, sizeof(struct mmc_command));
232
233 cmd.opcode = opcode; 219 cmd.opcode = opcode;
234 cmd.arg = arg; 220 cmd.arg = arg;
235 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; 221 cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
@@ -247,9 +233,9 @@ static int
247mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, 233mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
248 u32 opcode, void *buf, unsigned len) 234 u32 opcode, void *buf, unsigned len)
249{ 235{
250 struct mmc_request mrq; 236 struct mmc_request mrq = {0};
251 struct mmc_command cmd; 237 struct mmc_command cmd = {0};
252 struct mmc_data data; 238 struct mmc_data data = {0};
253 struct scatterlist sg; 239 struct scatterlist sg;
254 void *data_buf; 240 void *data_buf;
255 241
@@ -260,10 +246,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
260 if (data_buf == NULL) 246 if (data_buf == NULL)
261 return -ENOMEM; 247 return -ENOMEM;
262 248
263 memset(&mrq, 0, sizeof(struct mmc_request));
264 memset(&cmd, 0, sizeof(struct mmc_command));
265 memset(&data, 0, sizeof(struct mmc_data));
266
267 mrq.cmd = &cmd; 249 mrq.cmd = &cmd;
268 mrq.data = &data; 250 mrq.data = &data;
269 251
@@ -355,11 +337,9 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
355 337
356int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) 338int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
357{ 339{
358 struct mmc_command cmd; 340 struct mmc_command cmd = {0};
359 int err; 341 int err;
360 342
361 memset(&cmd, 0, sizeof(struct mmc_command));
362
363 cmd.opcode = MMC_SPI_READ_OCR; 343 cmd.opcode = MMC_SPI_READ_OCR;
364 cmd.arg = highcap ? (1 << 30) : 0; 344 cmd.arg = highcap ? (1 << 30) : 0;
365 cmd.flags = MMC_RSP_SPI_R3; 345 cmd.flags = MMC_RSP_SPI_R3;
@@ -372,11 +352,9 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
372 352
373int mmc_spi_set_crc(struct mmc_host *host, int use_crc) 353int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
374{ 354{
375 struct mmc_command cmd; 355 struct mmc_command cmd = {0};
376 int err; 356 int err;
377 357
378 memset(&cmd, 0, sizeof(struct mmc_command));
379
380 cmd.opcode = MMC_SPI_CRC_ON_OFF; 358 cmd.opcode = MMC_SPI_CRC_ON_OFF;
381 cmd.flags = MMC_RSP_SPI_R1; 359 cmd.flags = MMC_RSP_SPI_R1;
382 cmd.arg = use_crc; 360 cmd.arg = use_crc;
@@ -387,23 +365,34 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
387 return err; 365 return err;
388} 366}
389 367
390int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) 368/**
369 * mmc_switch - modify EXT_CSD register
370 * @card: the MMC card associated with the data transfer
371 * @set: cmd set values
372 * @index: EXT_CSD register index
373 * @value: value to program into EXT_CSD register
374 * @timeout_ms: timeout (ms) for operation performed by register write,
375 * timeout of zero implies maximum possible timeout
376 *
377 * Modifies the EXT_CSD register for selected card.
378 */
379int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
380 unsigned int timeout_ms)
391{ 381{
392 int err; 382 int err;
393 struct mmc_command cmd; 383 struct mmc_command cmd = {0};
394 u32 status; 384 u32 status;
395 385
396 BUG_ON(!card); 386 BUG_ON(!card);
397 BUG_ON(!card->host); 387 BUG_ON(!card->host);
398 388
399 memset(&cmd, 0, sizeof(struct mmc_command));
400
401 cmd.opcode = MMC_SWITCH; 389 cmd.opcode = MMC_SWITCH;
402 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | 390 cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
403 (index << 16) | 391 (index << 16) |
404 (value << 8) | 392 (value << 8) |
405 set; 393 set;
406 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 394 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
395 cmd.cmd_timeout_ms = timeout_ms;
407 396
408 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); 397 err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
409 if (err) 398 if (err)
@@ -433,17 +422,16 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
433 422
434 return 0; 423 return 0;
435} 424}
425EXPORT_SYMBOL_GPL(mmc_switch);
436 426
437int mmc_send_status(struct mmc_card *card, u32 *status) 427int mmc_send_status(struct mmc_card *card, u32 *status)
438{ 428{
439 int err; 429 int err;
440 struct mmc_command cmd; 430 struct mmc_command cmd = {0};
441 431
442 BUG_ON(!card); 432 BUG_ON(!card);
443 BUG_ON(!card->host); 433 BUG_ON(!card->host);
444 434
445 memset(&cmd, 0, sizeof(struct mmc_command));
446
447 cmd.opcode = MMC_SEND_STATUS; 435 cmd.opcode = MMC_SEND_STATUS;
448 if (!mmc_host_is_spi(card->host)) 436 if (!mmc_host_is_spi(card->host))
449 cmd.arg = card->rca << 16; 437 cmd.arg = card->rca << 16;
@@ -466,9 +454,9 @@ static int
466mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, 454mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
467 u8 len) 455 u8 len)
468{ 456{
469 struct mmc_request mrq; 457 struct mmc_request mrq = {0};
470 struct mmc_command cmd; 458 struct mmc_command cmd = {0};
471 struct mmc_data data; 459 struct mmc_data data = {0};
472 struct scatterlist sg; 460 struct scatterlist sg;
473 u8 *data_buf; 461 u8 *data_buf;
474 u8 *test_buf; 462 u8 *test_buf;
@@ -497,10 +485,6 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
497 if (opcode == MMC_BUS_TEST_W) 485 if (opcode == MMC_BUS_TEST_W)
498 memcpy(data_buf, test_buf, len); 486 memcpy(data_buf, test_buf, len);
499 487
500 memset(&mrq, 0, sizeof(struct mmc_request));
501 memset(&cmd, 0, sizeof(struct mmc_command));
502 memset(&data, 0, sizeof(struct mmc_data));
503
504 mrq.cmd = &cmd; 488 mrq.cmd = &cmd;
505 mrq.data = &data; 489 mrq.data = &data;
506 cmd.opcode = opcode; 490 cmd.opcode = opcode;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index e6d44b8a18db..9276946fa5b7 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -20,7 +20,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid);
20int mmc_set_relative_addr(struct mmc_card *card); 20int mmc_set_relative_addr(struct mmc_card *card);
21int mmc_send_csd(struct mmc_card *card, u32 *csd); 21int mmc_send_csd(struct mmc_card *card, u32 *csd);
22int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); 22int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
23int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value);
24int mmc_send_status(struct mmc_card *card, u32 *status); 23int mmc_send_status(struct mmc_card *card, u32 *status);
25int mmc_send_cid(struct mmc_host *host, u32 *cid); 24int mmc_send_cid(struct mmc_host *host, u32 *cid);
26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 25int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 11118b74eb20..3a596217029e 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * This file contains work-arounds for many known sdio hardware 2 * This file contains work-arounds for many known SD/MMC
3 * bugs. 3 * and SDIO hardware bugs.
4 * 4 *
5 * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com>
5 * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com> 6 * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com>
6 * Inspired from pci fixup code: 7 * Inspired from pci fixup code:
7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz> 8 * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
@@ -11,34 +12,14 @@
11#include <linux/types.h> 12#include <linux/types.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
13#include <linux/mmc/card.h> 14#include <linux/mmc/card.h>
14#include <linux/mod_devicetable.h>
15 15
16/* 16#ifndef SDIO_VENDOR_ID_TI
17 * The world is not perfect and supplies us with broken mmc/sdio devices. 17#define SDIO_VENDOR_ID_TI 0x0097
18 * For at least a part of these bugs we need a work-around 18#endif
19 */
20
21struct mmc_fixup {
22 u16 vendor, device; /* You can use SDIO_ANY_ID here of course */
23 void (*vendor_fixup)(struct mmc_card *card, int data);
24 int data;
25};
26
27/*
28 * This hook just adds a quirk unconditionnally
29 */
30static void __maybe_unused add_quirk(struct mmc_card *card, int data)
31{
32 card->quirks |= data;
33}
34 19
35/* 20#ifndef SDIO_DEVICE_ID_TI_WL1271
36 * This hook just removes a quirk unconditionnally 21#define SDIO_DEVICE_ID_TI_WL1271 0x4076
37 */ 22#endif
38static void __maybe_unused remove_quirk(struct mmc_card *card, int data)
39{
40 card->quirks &= ~data;
41}
42 23
43/* 24/*
44 * This hook just adds a quirk for all sdio devices 25 * This hook just adds a quirk for all sdio devices
@@ -49,33 +30,47 @@ static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
49 card->quirks |= data; 30 card->quirks |= data;
50} 31}
51 32
52#ifndef SDIO_VENDOR_ID_TI
53#define SDIO_VENDOR_ID_TI 0x0097
54#endif
55
56#ifndef SDIO_DEVICE_ID_TI_WL1271
57#define SDIO_DEVICE_ID_TI_WL1271 0x4076
58#endif
59
60static const struct mmc_fixup mmc_fixup_methods[] = { 33static const struct mmc_fixup mmc_fixup_methods[] = {
61 /* by default sdio devices are considered CLK_GATING broken */ 34 /* by default sdio devices are considered CLK_GATING broken */
62 /* good cards will be whitelisted as they are tested */ 35 /* good cards will be whitelisted as they are tested */
63 { SDIO_ANY_ID, SDIO_ANY_ID, 36 SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
64 add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING }, 37 add_quirk_for_sdio_devices,
65 { SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, 38 MMC_QUIRK_BROKEN_CLK_GATING),
66 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING }, 39
67 { 0 } 40 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
41 remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
42
43 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
44 add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
45
46 SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
47 add_quirk, MMC_QUIRK_DISABLE_CD),
48
49 END_FIXUP
68}; 50};
69 51
70void mmc_fixup_device(struct mmc_card *card) 52void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
71{ 53{
72 const struct mmc_fixup *f; 54 const struct mmc_fixup *f;
55 u64 rev = cid_rev_card(card);
56
57 /* Non-core specific workarounds. */
58 if (!table)
59 table = mmc_fixup_methods;
73 60
74 for (f = mmc_fixup_methods; f->vendor_fixup; f++) { 61 for (f = table; f->vendor_fixup; f++) {
75 if ((f->vendor == card->cis.vendor 62 if ((f->manfid == CID_MANFID_ANY ||
76 || f->vendor == (u16) SDIO_ANY_ID) && 63 f->manfid == card->cid.manfid) &&
77 (f->device == card->cis.device 64 (f->oemid == CID_OEMID_ANY ||
78 || f->device == (u16) SDIO_ANY_ID)) { 65 f->oemid == card->cid.oemid) &&
66 (f->name == CID_NAME_ANY ||
67 !strncmp(f->name, card->cid.prod_name,
68 sizeof(card->cid.prod_name))) &&
69 (f->cis_vendor == card->cis.vendor ||
70 f->cis_vendor == (u16) SDIO_ANY_ID) &&
71 (f->cis_device == card->cis.device ||
72 f->cis_device == (u16) SDIO_ANY_ID) &&
73 rev >= f->rev_start && rev <= f->rev_end) {
79 dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup); 74 dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup);
80 f->vendor_fixup(card, f->data); 75 f->vendor_fixup(card, f->data);
81 } 76 }
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 6dac89fe0535..ff2774128aa9 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -130,7 +130,7 @@ static int mmc_decode_csd(struct mmc_card *card)
130 break; 130 break;
131 case 1: 131 case 1:
132 /* 132 /*
133 * This is a block-addressed SDHC card. Most 133 * This is a block-addressed SDHC or SDXC card. Most
134 * interesting fields are unused and have fixed 134 * interesting fields are unused and have fixed
135 * values. To avoid getting tripped by buggy cards, 135 * values. To avoid getting tripped by buggy cards,
136 * we assume those fixed values ourselves. 136 * we assume those fixed values ourselves.
@@ -144,6 +144,11 @@ static int mmc_decode_csd(struct mmc_card *card)
144 e = UNSTUFF_BITS(resp, 96, 3); 144 e = UNSTUFF_BITS(resp, 96, 3);
145 csd->max_dtr = tran_exp[e] * tran_mant[m]; 145 csd->max_dtr = tran_exp[e] * tran_mant[m];
146 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); 146 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
147 csd->c_size = UNSTUFF_BITS(resp, 48, 22);
148
149 /* SDXC cards have a minimum C_SIZE of 0x00FFFF */
150 if (csd->c_size >= 0xFFFF)
151 mmc_card_set_ext_capacity(card);
147 152
148 m = UNSTUFF_BITS(resp, 48, 22); 153 m = UNSTUFF_BITS(resp, 48, 22);
149 csd->capacity = (1 + m) << 10; 154 csd->capacity = (1 + m) << 10;
@@ -189,12 +194,17 @@ static int mmc_decode_scr(struct mmc_card *card)
189 194
190 scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); 195 scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
191 scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); 196 scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
197 if (scr->sda_vsn == SCR_SPEC_VER_2)
198 /* Check if Physical Layer Spec v3.0 is supported */
199 scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);
192 200
193 if (UNSTUFF_BITS(resp, 55, 1)) 201 if (UNSTUFF_BITS(resp, 55, 1))
194 card->erased_byte = 0xFF; 202 card->erased_byte = 0xFF;
195 else 203 else
196 card->erased_byte = 0x0; 204 card->erased_byte = 0x0;
197 205
206 if (scr->sda_spec3)
207 scr->cmds = UNSTUFF_BITS(resp, 32, 2);
198 return 0; 208 return 0;
199} 209}
200 210
@@ -274,29 +284,74 @@ static int mmc_read_switch(struct mmc_card *card)
274 status = kmalloc(64, GFP_KERNEL); 284 status = kmalloc(64, GFP_KERNEL);
275 if (!status) { 285 if (!status) {
276 printk(KERN_ERR "%s: could not allocate a buffer for " 286 printk(KERN_ERR "%s: could not allocate a buffer for "
277 "switch capabilities.\n", mmc_hostname(card->host)); 287 "switch capabilities.\n",
288 mmc_hostname(card->host));
278 return -ENOMEM; 289 return -ENOMEM;
279 } 290 }
280 291
292 /* Find out the supported Bus Speed Modes. */
281 err = mmc_sd_switch(card, 0, 0, 1, status); 293 err = mmc_sd_switch(card, 0, 0, 1, status);
282 if (err) { 294 if (err) {
283 /* If the host or the card can't do the switch, 295 /*
284 * fail more gracefully. */ 296 * If the host or the card can't do the switch,
285 if ((err != -EINVAL) 297 * fail more gracefully.
286 && (err != -ENOSYS) 298 */
287 && (err != -EFAULT)) 299 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
288 goto out; 300 goto out;
289 301
290 printk(KERN_WARNING "%s: problem reading switch " 302 printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n",
291 "capabilities, performance might suffer.\n",
292 mmc_hostname(card->host)); 303 mmc_hostname(card->host));
293 err = 0; 304 err = 0;
294 305
295 goto out; 306 goto out;
296 } 307 }
297 308
298 if (status[13] & 0x02) 309 if (card->scr.sda_spec3) {
299 card->sw_caps.hs_max_dtr = 50000000; 310 card->sw_caps.sd3_bus_mode = status[13];
311
312 /* Find out Driver Strengths supported by the card */
313 err = mmc_sd_switch(card, 0, 2, 1, status);
314 if (err) {
315 /*
316 * If the host or the card can't do the switch,
317 * fail more gracefully.
318 */
319 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
320 goto out;
321
322 printk(KERN_WARNING "%s: problem reading "
323 "Driver Strength.\n",
324 mmc_hostname(card->host));
325 err = 0;
326
327 goto out;
328 }
329
330 card->sw_caps.sd3_drv_type = status[9];
331
332 /* Find out Current Limits supported by the card */
333 err = mmc_sd_switch(card, 0, 3, 1, status);
334 if (err) {
335 /*
336 * If the host or the card can't do the switch,
337 * fail more gracefully.
338 */
339 if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
340 goto out;
341
342 printk(KERN_WARNING "%s: problem reading "
343 "Current Limit.\n",
344 mmc_hostname(card->host));
345 err = 0;
346
347 goto out;
348 }
349
350 card->sw_caps.sd3_curr_limit = status[7];
351 } else {
352 if (status[13] & 0x02)
353 card->sw_caps.hs_max_dtr = 50000000;
354 }
300 355
301out: 356out:
302 kfree(status); 357 kfree(status);
@@ -352,6 +407,232 @@ out:
352 return err; 407 return err;
353} 408}
354 409
410static int sd_select_driver_type(struct mmc_card *card, u8 *status)
411{
412 int host_drv_type = 0, card_drv_type = 0;
413 int err;
414
415 /*
416 * If the host doesn't support any of the Driver Types A,C or D,
417 * default Driver Type B is used.
418 */
419 if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
420 | MMC_CAP_DRIVER_TYPE_D)))
421 return 0;
422
423 if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) {
424 host_drv_type = MMC_SET_DRIVER_TYPE_A;
425 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
426 card_drv_type = MMC_SET_DRIVER_TYPE_A;
427 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
428 card_drv_type = MMC_SET_DRIVER_TYPE_B;
429 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
430 card_drv_type = MMC_SET_DRIVER_TYPE_C;
431 } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) {
432 host_drv_type = MMC_SET_DRIVER_TYPE_C;
433 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
434 card_drv_type = MMC_SET_DRIVER_TYPE_C;
435 } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) {
436 /*
437 * If we are here, that means only the default driver type
438 * B is supported by the host.
439 */
440 host_drv_type = MMC_SET_DRIVER_TYPE_B;
441 if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
442 card_drv_type = MMC_SET_DRIVER_TYPE_B;
443 else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
444 card_drv_type = MMC_SET_DRIVER_TYPE_C;
445 }
446
447 err = mmc_sd_switch(card, 1, 2, card_drv_type, status);
448 if (err)
449 return err;
450
451 if ((status[15] & 0xF) != card_drv_type) {
452 printk(KERN_WARNING "%s: Problem setting driver strength!\n",
453 mmc_hostname(card->host));
454 return 0;
455 }
456
457 mmc_set_driver_type(card->host, host_drv_type);
458
459 return 0;
460}
461
462static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
463{
464 unsigned int bus_speed = 0, timing = 0;
465 int err;
466
467 /*
468 * If the host doesn't support any of the UHS-I modes, fallback on
469 * default speed.
470 */
471 if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
472 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
473 return 0;
474
475 if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
476 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
477 bus_speed = UHS_SDR104_BUS_SPEED;
478 timing = MMC_TIMING_UHS_SDR104;
479 card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
480 } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
481 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
482 bus_speed = UHS_DDR50_BUS_SPEED;
483 timing = MMC_TIMING_UHS_DDR50;
484 card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
485 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
486 MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
487 SD_MODE_UHS_SDR50)) {
488 bus_speed = UHS_SDR50_BUS_SPEED;
489 timing = MMC_TIMING_UHS_SDR50;
490 card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
491 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
492 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
493 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
494 bus_speed = UHS_SDR25_BUS_SPEED;
495 timing = MMC_TIMING_UHS_SDR25;
496 card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
497 } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
498 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
499 MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
500 SD_MODE_UHS_SDR12)) {
501 bus_speed = UHS_SDR12_BUS_SPEED;
502 timing = MMC_TIMING_UHS_SDR12;
503 card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
504 }
505
506 card->sd_bus_speed = bus_speed;
507 err = mmc_sd_switch(card, 1, 0, bus_speed, status);
508 if (err)
509 return err;
510
511 if ((status[16] & 0xF) != bus_speed)
512 printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
513 mmc_hostname(card->host));
514 else {
515 mmc_set_timing(card->host, timing);
516 mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
517 }
518
519 return 0;
520}
521
522static int sd_set_current_limit(struct mmc_card *card, u8 *status)
523{
524 int current_limit = 0;
525 int err;
526
527 /*
528 * Current limit switch is only defined for SDR50, SDR104, and DDR50
529 * bus speed modes. For other bus speed modes, we set the default
530 * current limit of 200mA.
531 */
532 if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) ||
533 (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) ||
534 (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) {
535 if (card->host->caps & MMC_CAP_MAX_CURRENT_800) {
536 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
537 current_limit = SD_SET_CURRENT_LIMIT_800;
538 else if (card->sw_caps.sd3_curr_limit &
539 SD_MAX_CURRENT_600)
540 current_limit = SD_SET_CURRENT_LIMIT_600;
541 else if (card->sw_caps.sd3_curr_limit &
542 SD_MAX_CURRENT_400)
543 current_limit = SD_SET_CURRENT_LIMIT_400;
544 else if (card->sw_caps.sd3_curr_limit &
545 SD_MAX_CURRENT_200)
546 current_limit = SD_SET_CURRENT_LIMIT_200;
547 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) {
548 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
549 current_limit = SD_SET_CURRENT_LIMIT_600;
550 else if (card->sw_caps.sd3_curr_limit &
551 SD_MAX_CURRENT_400)
552 current_limit = SD_SET_CURRENT_LIMIT_400;
553 else if (card->sw_caps.sd3_curr_limit &
554 SD_MAX_CURRENT_200)
555 current_limit = SD_SET_CURRENT_LIMIT_200;
556 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) {
557 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
558 current_limit = SD_SET_CURRENT_LIMIT_400;
559 else if (card->sw_caps.sd3_curr_limit &
560 SD_MAX_CURRENT_200)
561 current_limit = SD_SET_CURRENT_LIMIT_200;
562 } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) {
563 if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
564 current_limit = SD_SET_CURRENT_LIMIT_200;
565 }
566 } else
567 current_limit = SD_SET_CURRENT_LIMIT_200;
568
569 err = mmc_sd_switch(card, 1, 3, current_limit, status);
570 if (err)
571 return err;
572
573 if (((status[15] >> 4) & 0x0F) != current_limit)
574 printk(KERN_WARNING "%s: Problem setting current limit!\n",
575 mmc_hostname(card->host));
576
577 return 0;
578}
579
580/*
581 * UHS-I specific initialization procedure
582 */
583static int mmc_sd_init_uhs_card(struct mmc_card *card)
584{
585 int err;
586 u8 *status;
587
588 if (!card->scr.sda_spec3)
589 return 0;
590
591 if (!(card->csd.cmdclass & CCC_SWITCH))
592 return 0;
593
594 status = kmalloc(64, GFP_KERNEL);
595 if (!status) {
596 printk(KERN_ERR "%s: could not allocate a buffer for "
597 "switch capabilities.\n", mmc_hostname(card->host));
598 return -ENOMEM;
599 }
600
601 /* Set 4-bit bus width */
602 if ((card->host->caps & MMC_CAP_4_BIT_DATA) &&
603 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
604 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
605 if (err)
606 goto out;
607
608 mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
609 }
610
611 /* Set the driver strength for the card */
612 err = sd_select_driver_type(card, status);
613 if (err)
614 goto out;
615
616 /* Set bus speed mode of the card */
617 err = sd_set_bus_speed_mode(card, status);
618 if (err)
619 goto out;
620
621 /* Set current limit for the card */
622 err = sd_set_current_limit(card, status);
623 if (err)
624 goto out;
625
626 /* SPI mode doesn't define CMD19 */
627 if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
628 err = card->host->ops->execute_tuning(card->host);
629
630out:
631 kfree(status);
632
633 return err;
634}
635
355MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], 636MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
356 card->raw_cid[2], card->raw_cid[3]); 637 card->raw_cid[2], card->raw_cid[3]);
357MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 638MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
@@ -400,7 +681,7 @@ struct device_type sd_type = {
400/* 681/*
401 * Fetch CID from card. 682 * Fetch CID from card.
402 */ 683 */
403int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid) 684int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
404{ 685{
405 int err; 686 int err;
406 687
@@ -420,12 +701,39 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid)
420 */ 701 */
421 err = mmc_send_if_cond(host, ocr); 702 err = mmc_send_if_cond(host, ocr);
422 if (!err) 703 if (!err)
423 ocr |= 1 << 30; 704 ocr |= SD_OCR_CCS;
705
706 /*
707 * If the host supports one of UHS-I modes, request the card
708 * to switch to 1.8V signaling level.
709 */
710 if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
711 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
712 ocr |= SD_OCR_S18R;
713
714 /* If the host can supply more than 150mA, XPC should be set to 1. */
715 if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
716 MMC_CAP_SET_XPC_180))
717 ocr |= SD_OCR_XPC;
424 718
425 err = mmc_send_app_op_cond(host, ocr, NULL); 719try_again:
720 err = mmc_send_app_op_cond(host, ocr, rocr);
426 if (err) 721 if (err)
427 return err; 722 return err;
428 723
724 /*
725 * In case CCS and S18A in the response is set, start Signal Voltage
726 * Switch procedure. SPI mode doesn't support CMD11.
727 */
728 if (!mmc_host_is_spi(host) && rocr &&
729 ((*rocr & 0x41000000) == 0x41000000)) {
730 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true);
731 if (err) {
732 ocr &= ~SD_OCR_S18R;
733 goto try_again;
734 }
735 }
736
429 if (mmc_host_is_spi(host)) 737 if (mmc_host_is_spi(host))
430 err = mmc_send_cid(host, cid); 738 err = mmc_send_cid(host, cid);
431 else 739 else
@@ -553,11 +861,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
553 struct mmc_card *card; 861 struct mmc_card *card;
554 int err; 862 int err;
555 u32 cid[4]; 863 u32 cid[4];
864 u32 rocr = 0;
556 865
557 BUG_ON(!host); 866 BUG_ON(!host);
558 WARN_ON(!host->claimed); 867 WARN_ON(!host->claimed);
559 868
560 err = mmc_sd_get_cid(host, ocr, cid); 869 err = mmc_sd_get_cid(host, ocr, cid, &rocr);
561 if (err) 870 if (err)
562 return err; 871 return err;
563 872
@@ -610,30 +919,47 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
610 if (err) 919 if (err)
611 goto free_card; 920 goto free_card;
612 921
613 /* 922 /* Initialization sequence for UHS-I cards */
614 * Attempt to change to high-speed (if supported) 923 if (rocr & SD_ROCR_S18A) {
615 */ 924 err = mmc_sd_init_uhs_card(card);
616 err = mmc_sd_switch_hs(card); 925 if (err)
617 if (err > 0) 926 goto free_card;
618 mmc_sd_go_highspeed(card);
619 else if (err)
620 goto free_card;
621 927
622 /* 928 /* Card is an ultra-high-speed card */
623 * Set bus speed. 929 mmc_sd_card_set_uhs(card);
624 */
625 mmc_set_clock(host, mmc_sd_get_max_clock(card));
626 930
627 /* 931 /*
628 * Switch to wider bus (if supported). 932 * Since initialization is now complete, enable preset
629 */ 933 * value registers for UHS-I cards.
630 if ((host->caps & MMC_CAP_4_BIT_DATA) && 934 */
631 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { 935 if (host->ops->enable_preset_value)
632 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); 936 host->ops->enable_preset_value(host, true);
633 if (err) 937 } else {
938 /*
939 * Attempt to change to high-speed (if supported)
940 */
941 err = mmc_sd_switch_hs(card);
942 if (err > 0)
943 mmc_sd_go_highspeed(card);
944 else if (err)
634 goto free_card; 945 goto free_card;
635 946
636 mmc_set_bus_width(host, MMC_BUS_WIDTH_4); 947 /*
948 * Set bus speed.
949 */
950 mmc_set_clock(host, mmc_sd_get_max_clock(card));
951
952 /*
953 * Switch to wider bus (if supported).
954 */
955 if ((host->caps & MMC_CAP_4_BIT_DATA) &&
956 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
957 err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4);
958 if (err)
959 goto free_card;
960
961 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
962 }
637 } 963 }
638 964
639 host->card = card; 965 host->card = card;
@@ -773,6 +1099,15 @@ int mmc_attach_sd(struct mmc_host *host)
773 BUG_ON(!host); 1099 BUG_ON(!host);
774 WARN_ON(!host->claimed); 1100 WARN_ON(!host->claimed);
775 1101
1102 /* Make sure we are at 3.3V signalling voltage */
1103 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
1104 if (err)
1105 return err;
1106
1107 /* Disable preset value enable if already set since last time */
1108 if (host->ops->enable_preset_value)
1109 host->ops->enable_preset_value(host, false);
1110
776 err = mmc_send_app_op_cond(host, 0, &ocr); 1111 err = mmc_send_app_op_cond(host, 0, &ocr);
777 if (err) 1112 if (err)
778 return err; 1113 return err;
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 3d8800fa7600..4b34b24f3f76 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -5,7 +5,7 @@
5 5
6extern struct device_type sd_type; 6extern struct device_type sd_type;
7 7
8int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid); 8int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);
9int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card); 9int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);
10void mmc_decode_cid(struct mmc_card *card); 10void mmc_decode_cid(struct mmc_card *card);
11int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, 11int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 76af349c14b4..021fed153804 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -21,10 +21,10 @@
21#include "core.h" 21#include "core.h"
22#include "sd_ops.h" 22#include "sd_ops.h"
23 23
24static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) 24int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
25{ 25{
26 int err; 26 int err;
27 struct mmc_command cmd; 27 struct mmc_command cmd = {0};
28 28
29 BUG_ON(!host); 29 BUG_ON(!host);
30 BUG_ON(card && (card->host != host)); 30 BUG_ON(card && (card->host != host));
@@ -49,6 +49,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
49 49
50 return 0; 50 return 0;
51} 51}
52EXPORT_SYMBOL_GPL(mmc_app_cmd);
52 53
53/** 54/**
54 * mmc_wait_for_app_cmd - start an application command and wait for 55 * mmc_wait_for_app_cmd - start an application command and wait for
@@ -66,7 +67,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
66int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, 67int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
67 struct mmc_command *cmd, int retries) 68 struct mmc_command *cmd, int retries)
68{ 69{
69 struct mmc_request mrq; 70 struct mmc_request mrq = {0};
70 71
71 int i, err; 72 int i, err;
72 73
@@ -119,13 +120,11 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
119int mmc_app_set_bus_width(struct mmc_card *card, int width) 120int mmc_app_set_bus_width(struct mmc_card *card, int width)
120{ 121{
121 int err; 122 int err;
122 struct mmc_command cmd; 123 struct mmc_command cmd = {0};
123 124
124 BUG_ON(!card); 125 BUG_ON(!card);
125 BUG_ON(!card->host); 126 BUG_ON(!card->host);
126 127
127 memset(&cmd, 0, sizeof(struct mmc_command));
128
129 cmd.opcode = SD_APP_SET_BUS_WIDTH; 128 cmd.opcode = SD_APP_SET_BUS_WIDTH;
130 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 129 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
131 130
@@ -149,13 +148,11 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
149 148
150int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 149int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
151{ 150{
152 struct mmc_command cmd; 151 struct mmc_command cmd = {0};
153 int i, err = 0; 152 int i, err = 0;
154 153
155 BUG_ON(!host); 154 BUG_ON(!host);
156 155
157 memset(&cmd, 0, sizeof(struct mmc_command));
158
159 cmd.opcode = SD_APP_OP_COND; 156 cmd.opcode = SD_APP_OP_COND;
160 if (mmc_host_is_spi(host)) 157 if (mmc_host_is_spi(host))
161 cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */ 158 cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */
@@ -194,7 +191,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
194 191
195int mmc_send_if_cond(struct mmc_host *host, u32 ocr) 192int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
196{ 193{
197 struct mmc_command cmd; 194 struct mmc_command cmd = {0};
198 int err; 195 int err;
199 static const u8 test_pattern = 0xAA; 196 static const u8 test_pattern = 0xAA;
200 u8 result_pattern; 197 u8 result_pattern;
@@ -226,13 +223,11 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr)
226int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) 223int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
227{ 224{
228 int err; 225 int err;
229 struct mmc_command cmd; 226 struct mmc_command cmd = {0};
230 227
231 BUG_ON(!host); 228 BUG_ON(!host);
232 BUG_ON(!rca); 229 BUG_ON(!rca);
233 230
234 memset(&cmd, 0, sizeof(struct mmc_command));
235
236 cmd.opcode = SD_SEND_RELATIVE_ADDR; 231 cmd.opcode = SD_SEND_RELATIVE_ADDR;
237 cmd.arg = 0; 232 cmd.arg = 0;
238 cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; 233 cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR;
@@ -249,9 +244,9 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)
249int mmc_app_send_scr(struct mmc_card *card, u32 *scr) 244int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
250{ 245{
251 int err; 246 int err;
252 struct mmc_request mrq; 247 struct mmc_request mrq = {0};
253 struct mmc_command cmd; 248 struct mmc_command cmd = {0};
254 struct mmc_data data; 249 struct mmc_data data = {0};
255 struct scatterlist sg; 250 struct scatterlist sg;
256 void *data_buf; 251 void *data_buf;
257 252
@@ -272,10 +267,6 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
272 if (data_buf == NULL) 267 if (data_buf == NULL)
273 return -ENOMEM; 268 return -ENOMEM;
274 269
275 memset(&mrq, 0, sizeof(struct mmc_request));
276 memset(&cmd, 0, sizeof(struct mmc_command));
277 memset(&data, 0, sizeof(struct mmc_data));
278
279 mrq.cmd = &cmd; 270 mrq.cmd = &cmd;
280 mrq.data = &data; 271 mrq.data = &data;
281 272
@@ -312,9 +303,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
312int mmc_sd_switch(struct mmc_card *card, int mode, int group, 303int mmc_sd_switch(struct mmc_card *card, int mode, int group,
313 u8 value, u8 *resp) 304 u8 value, u8 *resp)
314{ 305{
315 struct mmc_request mrq; 306 struct mmc_request mrq = {0};
316 struct mmc_command cmd; 307 struct mmc_command cmd = {0};
317 struct mmc_data data; 308 struct mmc_data data = {0};
318 struct scatterlist sg; 309 struct scatterlist sg;
319 310
320 BUG_ON(!card); 311 BUG_ON(!card);
@@ -325,10 +316,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
325 mode = !!mode; 316 mode = !!mode;
326 value &= 0xF; 317 value &= 0xF;
327 318
328 memset(&mrq, 0, sizeof(struct mmc_request));
329 memset(&cmd, 0, sizeof(struct mmc_command));
330 memset(&data, 0, sizeof(struct mmc_data));
331
332 mrq.cmd = &cmd; 319 mrq.cmd = &cmd;
333 mrq.data = &data; 320 mrq.data = &data;
334 321
@@ -361,9 +348,9 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
361int mmc_app_sd_status(struct mmc_card *card, void *ssr) 348int mmc_app_sd_status(struct mmc_card *card, void *ssr)
362{ 349{
363 int err; 350 int err;
364 struct mmc_request mrq; 351 struct mmc_request mrq = {0};
365 struct mmc_command cmd; 352 struct mmc_command cmd = {0};
366 struct mmc_data data; 353 struct mmc_data data = {0};
367 struct scatterlist sg; 354 struct scatterlist sg;
368 355
369 BUG_ON(!card); 356 BUG_ON(!card);
@@ -376,10 +363,6 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)
376 if (err) 363 if (err)
377 return err; 364 return err;
378 365
379 memset(&mrq, 0, sizeof(struct mmc_request));
380 memset(&cmd, 0, sizeof(struct mmc_command));
381 memset(&data, 0, sizeof(struct mmc_data));
382
383 mrq.cmd = &cmd; 366 mrq.cmd = &cmd;
384 mrq.data = &data; 367 mrq.data = &data;
385 368
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index db0f0b44d684..4d0c15bfa514 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -16,6 +16,7 @@
16#include <linux/mmc/card.h> 16#include <linux/mmc/card.h>
17#include <linux/mmc/sdio.h> 17#include <linux/mmc/sdio.h>
18#include <linux/mmc/sdio_func.h> 18#include <linux/mmc/sdio_func.h>
19#include <linux/mmc/sdio_ids.h>
19 20
20#include "core.h" 21#include "core.h"
21#include "bus.h" 22#include "bus.h"
@@ -31,6 +32,11 @@ static int sdio_read_fbr(struct sdio_func *func)
31 int ret; 32 int ret;
32 unsigned char data; 33 unsigned char data;
33 34
35 if (mmc_card_nonstd_func_interface(func->card)) {
36 func->class = SDIO_CLASS_NONE;
37 return 0;
38 }
39
34 ret = mmc_io_rw_direct(func->card, 0, 0, 40 ret = mmc_io_rw_direct(func->card, 0, 0,
35 SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data); 41 SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);
36 if (ret) 42 if (ret)
@@ -181,7 +187,7 @@ static int sdio_disable_cd(struct mmc_card *card)
181 int ret; 187 int ret;
182 u8 ctrl; 188 u8 ctrl;
183 189
184 if (!card->cccr.disable_cd) 190 if (!mmc_card_disable_cd(card))
185 return 0; 191 return 0;
186 192
187 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl); 193 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
@@ -363,8 +369,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
363 goto err; 369 goto err;
364 } 370 }
365 371
366 if (ocr & R4_MEMORY_PRESENT 372 if ((ocr & R4_MEMORY_PRESENT) &&
367 && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) { 373 mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) {
368 card->type = MMC_TYPE_SD_COMBO; 374 card->type = MMC_TYPE_SD_COMBO;
369 375
370 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || 376 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
@@ -466,7 +472,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
466 472
467 card = oldcard; 473 card = oldcard;
468 } 474 }
469 mmc_fixup_device(card); 475 mmc_fixup_device(card, NULL);
470 476
471 if (card->type == MMC_TYPE_SD_COMBO) { 477 if (card->type == MMC_TYPE_SD_COMBO) {
472 err = mmc_sd_setup_card(host, card, oldcard != NULL); 478 err = mmc_sd_setup_card(host, card, oldcard != NULL);
@@ -625,7 +631,7 @@ static int mmc_sdio_suspend(struct mmc_host *host)
625 } 631 }
626 } 632 }
627 633
628 if (!err && host->pm_flags & MMC_PM_KEEP_POWER) { 634 if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
629 mmc_claim_host(host); 635 mmc_claim_host(host);
630 sdio_disable_wide(host->card); 636 sdio_disable_wide(host->card);
631 mmc_release_host(host); 637 mmc_release_host(host);
@@ -645,10 +651,10 @@ static int mmc_sdio_resume(struct mmc_host *host)
645 mmc_claim_host(host); 651 mmc_claim_host(host);
646 652
647 /* No need to reinitialize powered-resumed nonremovable cards */ 653 /* No need to reinitialize powered-resumed nonremovable cards */
648 if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host)) 654 if (mmc_card_is_removable(host) || !mmc_card_keep_power(host))
649 err = mmc_sdio_init_card(host, host->ocr, host->card, 655 err = mmc_sdio_init_card(host, host->ocr, host->card,
650 (host->pm_flags & MMC_PM_KEEP_POWER)); 656 mmc_card_keep_power(host));
651 else if (mmc_card_is_powered_resumed(host)) { 657 else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
652 /* We may have switched to 1-bit mode during suspend */ 658 /* We may have switched to 1-bit mode during suspend */
653 err = sdio_enable_4bit_bus(host->card); 659 err = sdio_enable_4bit_bus(host->card);
654 if (err > 0) { 660 if (err > 0) {
@@ -691,7 +697,7 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
691 697
692 mmc_claim_host(host); 698 mmc_claim_host(host);
693 ret = mmc_sdio_init_card(host, host->ocr, host->card, 699 ret = mmc_sdio_init_card(host, host->ocr, host->card,
694 (host->pm_flags & MMC_PM_KEEP_POWER)); 700 mmc_card_keep_power(host));
695 if (!ret && host->sdio_irqs) 701 if (!ret && host->sdio_irqs)
696 mmc_signal_sdio_irq(host); 702 mmc_signal_sdio_irq(host);
697 mmc_release_host(host); 703 mmc_release_host(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index b3001617e67d..03ead028d2ce 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -31,6 +31,17 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
31{ 31{
32 int i, ret, count; 32 int i, ret, count;
33 unsigned char pending; 33 unsigned char pending;
34 struct sdio_func *func;
35
36 /*
37 * Optimization, if there is only 1 function interrupt registered
38 * call irq handler directly
39 */
40 func = card->sdio_single_irq;
41 if (func) {
42 func->irq_handler(func);
43 return 1;
44 }
34 45
35 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); 46 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
36 if (ret) { 47 if (ret) {
@@ -42,7 +53,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card)
42 count = 0; 53 count = 0;
43 for (i = 1; i <= 7; i++) { 54 for (i = 1; i <= 7; i++) {
44 if (pending & (1 << i)) { 55 if (pending & (1 << i)) {
45 struct sdio_func *func = card->sdio_func[i - 1]; 56 func = card->sdio_func[i - 1];
46 if (!func) { 57 if (!func) {
47 printk(KERN_WARNING "%s: pending IRQ for " 58 printk(KERN_WARNING "%s: pending IRQ for "
48 "non-existent function\n", 59 "non-existent function\n",
@@ -186,6 +197,24 @@ static int sdio_card_irq_put(struct mmc_card *card)
186 return 0; 197 return 0;
187} 198}
188 199
200/* If there is only 1 function registered set sdio_single_irq */
201static void sdio_single_irq_set(struct mmc_card *card)
202{
203 struct sdio_func *func;
204 int i;
205
206 card->sdio_single_irq = NULL;
207 if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
208 card->host->sdio_irqs == 1)
209 for (i = 0; i < card->sdio_funcs; i++) {
210 func = card->sdio_func[i];
211 if (func && func->irq_handler) {
212 card->sdio_single_irq = func;
213 break;
214 }
215 }
216}
217
189/** 218/**
190 * sdio_claim_irq - claim the IRQ for a SDIO function 219 * sdio_claim_irq - claim the IRQ for a SDIO function
191 * @func: SDIO function 220 * @func: SDIO function
@@ -227,6 +256,7 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
227 ret = sdio_card_irq_get(func->card); 256 ret = sdio_card_irq_get(func->card);
228 if (ret) 257 if (ret)
229 func->irq_handler = NULL; 258 func->irq_handler = NULL;
259 sdio_single_irq_set(func->card);
230 260
231 return ret; 261 return ret;
232} 262}
@@ -251,6 +281,7 @@ int sdio_release_irq(struct sdio_func *func)
251 if (func->irq_handler) { 281 if (func->irq_handler) {
252 func->irq_handler = NULL; 282 func->irq_handler = NULL;
253 sdio_card_irq_put(func->card); 283 sdio_card_irq_put(func->card);
284 sdio_single_irq_set(func->card);
254 } 285 }
255 286
256 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg); 287 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index dea36d9c22e6..f087d876c573 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -21,13 +21,11 @@
21 21
22int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 22int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
23{ 23{
24 struct mmc_command cmd; 24 struct mmc_command cmd = {0};
25 int i, err = 0; 25 int i, err = 0;
26 26
27 BUG_ON(!host); 27 BUG_ON(!host);
28 28
29 memset(&cmd, 0, sizeof(struct mmc_command));
30
31 cmd.opcode = SD_IO_SEND_OP_COND; 29 cmd.opcode = SD_IO_SEND_OP_COND;
32 cmd.arg = ocr; 30 cmd.arg = ocr;
33 cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR; 31 cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
@@ -70,7 +68,7 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
70static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn, 68static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
71 unsigned addr, u8 in, u8 *out) 69 unsigned addr, u8 in, u8 *out)
72{ 70{
73 struct mmc_command cmd; 71 struct mmc_command cmd = {0};
74 int err; 72 int err;
75 73
76 BUG_ON(!host); 74 BUG_ON(!host);
@@ -80,8 +78,6 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
80 if (addr & ~0x1FFFF) 78 if (addr & ~0x1FFFF)
81 return -EINVAL; 79 return -EINVAL;
82 80
83 memset(&cmd, 0, sizeof(struct mmc_command));
84
85 cmd.opcode = SD_IO_RW_DIRECT; 81 cmd.opcode = SD_IO_RW_DIRECT;
86 cmd.arg = write ? 0x80000000 : 0x00000000; 82 cmd.arg = write ? 0x80000000 : 0x00000000;
87 cmd.arg |= fn << 28; 83 cmd.arg |= fn << 28;
@@ -125,9 +121,9 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
125int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, 121int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
126 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) 122 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
127{ 123{
128 struct mmc_request mrq; 124 struct mmc_request mrq = {0};
129 struct mmc_command cmd; 125 struct mmc_command cmd = {0};
130 struct mmc_data data; 126 struct mmc_data data = {0};
131 struct scatterlist sg; 127 struct scatterlist sg;
132 128
133 BUG_ON(!card); 129 BUG_ON(!card);
@@ -140,10 +136,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
140 if (addr & ~0x1FFFF) 136 if (addr & ~0x1FFFF)
141 return -EINVAL; 137 return -EINVAL;
142 138
143 memset(&mrq, 0, sizeof(struct mmc_request));
144 memset(&cmd, 0, sizeof(struct mmc_command));
145 memset(&data, 0, sizeof(struct mmc_data));
146
147 mrq.cmd = &cmd; 139 mrq.cmd = &cmd;
148 mrq.data = &data; 140 mrq.data = &data;
149 141
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 94df40531c38..56dbf3f6ad08 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -154,7 +154,7 @@ config MMC_SDHCI_DOVE
154 If unsure, say N. 154 If unsure, say N.
155 155
156config MMC_SDHCI_TEGRA 156config MMC_SDHCI_TEGRA
157 tristate "SDHCI platform support for the Tegra SD/MMC Controller" 157 bool "SDHCI platform support for the Tegra SD/MMC Controller"
158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA 158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
159 select MMC_SDHCI_IO_ACCESSORS 159 select MMC_SDHCI_IO_ACCESSORS
160 help 160 help
@@ -535,6 +535,37 @@ config MMC_JZ4740
535 If you have a board based on such a SoC and with a SD/MMC slot, 535 If you have a board based on such a SoC and with a SD/MMC slot,
536 say Y or M here. 536 say Y or M here.
537 537
538config MMC_VUB300
539 tristate "VUB300 USB to SDIO/SD/MMC Host Controller support"
540 depends on USB
541 help
542 This selects support for Elan Digital Systems' VUB300 chip.
543
544 The VUB300 is a USB-SDIO Host Controller Interface chip
545 that enables the host computer to use SDIO/SD/MMC cards
546 via a USB 2.0 or USB 1.1 host.
547
548 The VUB300 chip will be found in both physically separate
549 USB to SDIO/SD/MMC adapters and embedded on some motherboards.
550
551 The VUB300 chip supports SD and MMC memory cards in addition
552 to single and multifunction SDIO cards.
553
554 Some SDIO cards will need a firmware file to be loaded and
555 sent to VUB300 chip in order to achieve better data throughput.
556 Download these "Offload Pseudocode" from Elan Digital Systems'
557 web-site http://www.elandigitalsystems.com/support/downloads.php
558 and put them in /lib/firmware. Note that without these additional
559 firmware files the VUB300 chip will still function, but not at
560 the best obtainable data rate.
561
562 To compile this mmc host controller driver as a module,
563 choose M here: the module will be called vub300.
564
565 If you have a computer with an embedded VUB300 chip
566 or if you intend connecting a USB adapter based on a
567 VUB300 chip say Y or M here.
568
538config MMC_USHC 569config MMC_USHC
539 tristate "USB SD Host Controller (USHC) support" 570 tristate "USB SD Host Controller (USHC) support"
540 depends on USB 571 depends on USB
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 4f1df0aae574..58a5cf73d6e9 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
41obj-$(CONFIG_MMC_DW) += dw_mmc.o 41obj-$(CONFIG_MMC_DW) += dw_mmc.o
42obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 42obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
43obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 43obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
44obj-$(CONFIG_MMC_VUB300) += vub300.o
44obj-$(CONFIG_MMC_USHC) += ushc.o 45obj-$(CONFIG_MMC_USHC) += ushc.o
45 46
46obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o 47obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 87e1f57ec9ba..66dcddb9c205 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1769,9 +1769,6 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1769 int i, ret; 1769 int i, ret;
1770 struct dw_mci *host = platform_get_drvdata(pdev); 1770 struct dw_mci *host = platform_get_drvdata(pdev);
1771 1771
1772 if (host->vmmc)
1773 regulator_enable(host->vmmc);
1774
1775 for (i = 0; i < host->num_slots; i++) { 1772 for (i = 0; i < host->num_slots; i++) {
1776 struct dw_mci_slot *slot = host->slot[i]; 1773 struct dw_mci_slot *slot = host->slot[i];
1777 if (!slot) 1774 if (!slot)
@@ -1798,6 +1795,9 @@ static int dw_mci_resume(struct platform_device *pdev)
1798 int i, ret; 1795 int i, ret;
1799 struct dw_mci *host = platform_get_drvdata(pdev); 1796 struct dw_mci *host = platform_get_drvdata(pdev);
1800 1797
1798 if (host->vmmc)
1799 regulator_enable(host->vmmc);
1800
1801 if (host->dma_ops->init) 1801 if (host->dma_ops->init)
1802 host->dma_ops->init(host); 1802 host->dma_ops->init(host);
1803 1803
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index f8b5f37007b2..936bbca19c0a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -18,11 +18,9 @@
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/device.h> 20#include <linux/device.h>
21
22#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
23 22#include <linux/scatterlist.h>
24#include <asm/scatterlist.h> 23#include <linux/io.h>
25#include <asm/io.h>
26 24
27#include "sdhci.h" 25#include "sdhci.h"
28 26
@@ -46,14 +44,14 @@ struct sdhci_pci_slot;
46struct sdhci_pci_fixes { 44struct sdhci_pci_fixes {
47 unsigned int quirks; 45 unsigned int quirks;
48 46
49 int (*probe)(struct sdhci_pci_chip*); 47 int (*probe) (struct sdhci_pci_chip *);
50 48
51 int (*probe_slot)(struct sdhci_pci_slot*); 49 int (*probe_slot) (struct sdhci_pci_slot *);
52 void (*remove_slot)(struct sdhci_pci_slot*, int); 50 void (*remove_slot) (struct sdhci_pci_slot *, int);
53 51
54 int (*suspend)(struct sdhci_pci_chip*, 52 int (*suspend) (struct sdhci_pci_chip *,
55 pm_message_t); 53 pm_message_t);
56 int (*resume)(struct sdhci_pci_chip*); 54 int (*resume) (struct sdhci_pci_chip *);
57}; 55};
58 56
59struct sdhci_pci_slot { 57struct sdhci_pci_slot {
@@ -329,6 +327,11 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
329 return ret; 327 return ret;
330 } 328 }
331 329
330 /* quirk for unsable RO-detection on JM388 chips */
331 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
332 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
333 chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
334
332 return 0; 335 return 0;
333} 336}
334 337
@@ -402,7 +405,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
402 405
403 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 406 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
404 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 407 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
405 for (i = 0;i < chip->num_slots;i++) 408 for (i = 0; i < chip->num_slots; i++)
406 jmicron_enable_mmc(chip->slots[i]->host, 0); 409 jmicron_enable_mmc(chip->slots[i]->host, 0);
407 } 410 }
408 411
@@ -415,7 +418,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
415 418
416 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || 419 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
417 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { 420 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
418 for (i = 0;i < chip->num_slots;i++) 421 for (i = 0; i < chip->num_slots; i++)
419 jmicron_enable_mmc(chip->slots[i]->host, 1); 422 jmicron_enable_mmc(chip->slots[i]->host, 1);
420 } 423 }
421 424
@@ -798,7 +801,7 @@ static struct sdhci_ops sdhci_pci_ops = {
798 801
799#ifdef CONFIG_PM 802#ifdef CONFIG_PM
800 803
801static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) 804static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
802{ 805{
803 struct sdhci_pci_chip *chip; 806 struct sdhci_pci_chip *chip;
804 struct sdhci_pci_slot *slot; 807 struct sdhci_pci_slot *slot;
@@ -810,7 +813,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
810 if (!chip) 813 if (!chip)
811 return 0; 814 return 0;
812 815
813 for (i = 0;i < chip->num_slots;i++) { 816 for (i = 0; i < chip->num_slots; i++) {
814 slot = chip->slots[i]; 817 slot = chip->slots[i];
815 if (!slot) 818 if (!slot)
816 continue; 819 continue;
@@ -818,7 +821,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
818 ret = sdhci_suspend_host(slot->host, state); 821 ret = sdhci_suspend_host(slot->host, state);
819 822
820 if (ret) { 823 if (ret) {
821 for (i--;i >= 0;i--) 824 for (i--; i >= 0; i--)
822 sdhci_resume_host(chip->slots[i]->host); 825 sdhci_resume_host(chip->slots[i]->host);
823 return ret; 826 return ret;
824 } 827 }
@@ -833,7 +836,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
833 if (chip->fixes && chip->fixes->suspend) { 836 if (chip->fixes && chip->fixes->suspend) {
834 ret = chip->fixes->suspend(chip, state); 837 ret = chip->fixes->suspend(chip, state);
835 if (ret) { 838 if (ret) {
836 for (i = chip->num_slots - 1;i >= 0;i--) 839 for (i = chip->num_slots - 1; i >= 0; i--)
837 sdhci_resume_host(chip->slots[i]->host); 840 sdhci_resume_host(chip->slots[i]->host);
838 return ret; 841 return ret;
839 } 842 }
@@ -855,7 +858,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
855 return 0; 858 return 0;
856} 859}
857 860
858static int sdhci_pci_resume (struct pci_dev *pdev) 861static int sdhci_pci_resume(struct pci_dev *pdev)
859{ 862{
860 struct sdhci_pci_chip *chip; 863 struct sdhci_pci_chip *chip;
861 struct sdhci_pci_slot *slot; 864 struct sdhci_pci_slot *slot;
@@ -877,7 +880,7 @@ static int sdhci_pci_resume (struct pci_dev *pdev)
877 return ret; 880 return ret;
878 } 881 }
879 882
880 for (i = 0;i < chip->num_slots;i++) { 883 for (i = 0; i < chip->num_slots; i++) {
881 slot = chip->slots[i]; 884 slot = chip->slots[i];
882 if (!slot) 885 if (!slot)
883 continue; 886 continue;
@@ -1059,7 +1062,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
1059 } 1062 }
1060 1063
1061 chip->pdev = pdev; 1064 chip->pdev = pdev;
1062 chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; 1065 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
1063 if (chip->fixes) 1066 if (chip->fixes)
1064 chip->quirks = chip->fixes->quirks; 1067 chip->quirks = chip->fixes->quirks;
1065 chip->num_slots = slots; 1068 chip->num_slots = slots;
@@ -1074,10 +1077,10 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
1074 1077
1075 slots = chip->num_slots; /* Quirk may have changed this */ 1078 slots = chip->num_slots; /* Quirk may have changed this */
1076 1079
1077 for (i = 0;i < slots;i++) { 1080 for (i = 0; i < slots; i++) {
1078 slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); 1081 slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
1079 if (IS_ERR(slot)) { 1082 if (IS_ERR(slot)) {
1080 for (i--;i >= 0;i--) 1083 for (i--; i >= 0; i--)
1081 sdhci_pci_remove_slot(chip->slots[i]); 1084 sdhci_pci_remove_slot(chip->slots[i]);
1082 ret = PTR_ERR(slot); 1085 ret = PTR_ERR(slot);
1083 goto free; 1086 goto free;
@@ -1105,7 +1108,7 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
1105 chip = pci_get_drvdata(pdev); 1108 chip = pci_get_drvdata(pdev);
1106 1109
1107 if (chip) { 1110 if (chip) {
1108 for (i = 0;i < chip->num_slots; i++) 1111 for (i = 0; i < chip->num_slots; i++)
1109 sdhci_pci_remove_slot(chip->slots[i]); 1112 sdhci_pci_remove_slot(chip->slots[i]);
1110 1113
1111 pci_set_drvdata(pdev, NULL); 1114 pci_set_drvdata(pdev, NULL);
@@ -1116,9 +1119,9 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)
1116} 1119}
1117 1120
1118static struct pci_driver sdhci_driver = { 1121static struct pci_driver sdhci_driver = {
1119 .name = "sdhci-pci", 1122 .name = "sdhci-pci",
1120 .id_table = pci_ids, 1123 .id_table = pci_ids,
1121 .probe = sdhci_pci_probe, 1124 .probe = sdhci_pci_probe,
1122 .remove = __devexit_p(sdhci_pci_remove), 1125 .remove = __devexit_p(sdhci_pci_remove),
1123 .suspend = sdhci_pci_suspend, 1126 .suspend = sdhci_pci_suspend,
1124 .resume = sdhci_pci_resume, 1127 .resume = sdhci_pci_resume,
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
index 5a61208cbc66..089c9a68b7b1 100644
--- a/drivers/mmc/host/sdhci-pxa.c
+++ b/drivers/mmc/host/sdhci-pxa.c
@@ -69,7 +69,45 @@ static void set_clock(struct sdhci_host *host, unsigned int clock)
69 } 69 }
70} 70}
71 71
72static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
73{
74 u16 ctrl_2;
75
76 /*
77 * Set V18_EN -- UHS modes do not work without this.
78 * does not change signaling voltage
79 */
80 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
81
82 /* Select Bus Speed Mode for host */
83 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
84 switch (uhs) {
85 case MMC_TIMING_UHS_SDR12:
86 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
87 break;
88 case MMC_TIMING_UHS_SDR25:
89 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
90 break;
91 case MMC_TIMING_UHS_SDR50:
92 ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
93 break;
94 case MMC_TIMING_UHS_SDR104:
95 ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
96 break;
97 case MMC_TIMING_UHS_DDR50:
98 ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
99 break;
100 }
101
102 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
103 pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n",
104 __func__, mmc_hostname(host->mmc), uhs, ctrl_2);
105
106 return 0;
107}
108
72static struct sdhci_ops sdhci_pxa_ops = { 109static struct sdhci_ops sdhci_pxa_ops = {
110 .set_uhs_signaling = set_uhs_signaling,
73 .set_clock = set_clock, 111 .set_clock = set_clock,
74}; 112};
75 113
@@ -136,11 +174,19 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
136 host->hw_name = "MMC"; 174 host->hw_name = "MMC";
137 host->ops = &sdhci_pxa_ops; 175 host->ops = &sdhci_pxa_ops;
138 host->irq = irq; 176 host->irq = irq;
139 host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 177 host->quirks = SDHCI_QUIRK_BROKEN_ADMA
178 | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
179 | SDHCI_QUIRK_32BIT_DMA_ADDR
180 | SDHCI_QUIRK_32BIT_DMA_SIZE
181 | SDHCI_QUIRK_32BIT_ADMA_SIZE
182 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
140 183
141 if (pdata->quirks) 184 if (pdata->quirks)
142 host->quirks |= pdata->quirks; 185 host->quirks |= pdata->quirks;
143 186
187 /* enable 1/8V DDR capable */
188 host->mmc->caps |= MMC_CAP_1_8V_DDR;
189
144 /* If slot design supports 8 bit data, indicate this to MMC. */ 190 /* If slot design supports 8 bit data, indicate this to MMC. */
145 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) 191 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
146 host->mmc->caps |= MMC_CAP_8_BIT_DATA; 192 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f7e1f964395f..343c97edba32 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -184,6 +184,8 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
184 clk_enable(clk); 184 clk_enable(clk);
185 pltfm_host->clk = clk; 185 pltfm_host->clk = clk;
186 186
187 host->mmc->pm_caps = plat->pm_flags;
188
187 if (plat->is_8bit) 189 if (plat->is_8bit)
188 host->mmc->caps |= MMC_CAP_8_BIT_DATA; 190 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
189 191
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 5d20661bc357..58d5436ff649 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -38,13 +38,16 @@
38#define SDHCI_USE_LEDS_CLASS 38#define SDHCI_USE_LEDS_CLASS
39#endif 39#endif
40 40
41#define MAX_TUNING_LOOP 40
42
41static unsigned int debug_quirks = 0; 43static unsigned int debug_quirks = 0;
42 44
43static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
44static void sdhci_finish_data(struct sdhci_host *); 45static void sdhci_finish_data(struct sdhci_host *);
45 46
46static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); 47static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
47static void sdhci_finish_command(struct sdhci_host *); 48static void sdhci_finish_command(struct sdhci_host *);
49static int sdhci_execute_tuning(struct mmc_host *mmc);
50static void sdhci_tuning_timer(unsigned long data);
48 51
49static void sdhci_dumpregs(struct sdhci_host *host) 52static void sdhci_dumpregs(struct sdhci_host *host)
50{ 53{
@@ -84,6 +87,8 @@ static void sdhci_dumpregs(struct sdhci_host *host)
84 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", 87 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
85 sdhci_readw(host, SDHCI_COMMAND), 88 sdhci_readw(host, SDHCI_COMMAND),
86 sdhci_readl(host, SDHCI_MAX_CURRENT)); 89 sdhci_readl(host, SDHCI_MAX_CURRENT));
90 printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n",
91 sdhci_readw(host, SDHCI_HOST_CONTROL2));
87 92
88 if (host->flags & SDHCI_USE_ADMA) 93 if (host->flags & SDHCI_USE_ADMA)
89 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 94 printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
@@ -157,6 +162,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
157 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 162 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
158 ier = sdhci_readl(host, SDHCI_INT_ENABLE); 163 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
159 164
165 if (host->ops->platform_reset_enter)
166 host->ops->platform_reset_enter(host, mask);
167
160 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 168 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
161 169
162 if (mask & SDHCI_RESET_ALL) 170 if (mask & SDHCI_RESET_ALL)
@@ -177,6 +185,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
177 mdelay(1); 185 mdelay(1);
178 } 186 }
179 187
188 if (host->ops->platform_reset_exit)
189 host->ops->platform_reset_exit(host, mask);
190
180 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) 191 if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
181 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); 192 sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
182} 193}
@@ -591,9 +602,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
591 data->sg_len, direction); 602 data->sg_len, direction);
592} 603}
593 604
594static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) 605static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
595{ 606{
596 u8 count; 607 u8 count;
608 struct mmc_data *data = cmd->data;
597 unsigned target_timeout, current_timeout; 609 unsigned target_timeout, current_timeout;
598 610
599 /* 611 /*
@@ -605,9 +617,16 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
605 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 617 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
606 return 0xE; 618 return 0xE;
607 619
620 /* Unspecified timeout, assume max */
621 if (!data && !cmd->cmd_timeout_ms)
622 return 0xE;
623
608 /* timeout in us */ 624 /* timeout in us */
609 target_timeout = data->timeout_ns / 1000 + 625 if (!data)
610 data->timeout_clks / host->clock; 626 target_timeout = cmd->cmd_timeout_ms * 1000;
627 else
628 target_timeout = data->timeout_ns / 1000 +
629 data->timeout_clks / host->clock;
611 630
612 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 631 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
613 host->timeout_clk = host->clock / 1000; 632 host->timeout_clk = host->clock / 1000;
@@ -622,6 +641,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
622 * => 641 * =>
623 * (1) / (2) > 2^6 642 * (1) / (2) > 2^6
624 */ 643 */
644 BUG_ON(!host->timeout_clk);
625 count = 0; 645 count = 0;
626 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 646 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
627 while (current_timeout < target_timeout) { 647 while (current_timeout < target_timeout) {
@@ -632,8 +652,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
632 } 652 }
633 653
634 if (count >= 0xF) { 654 if (count >= 0xF) {
635 printk(KERN_WARNING "%s: Too large timeout requested!\n", 655 printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
636 mmc_hostname(host->mmc)); 656 mmc_hostname(host->mmc), cmd->opcode);
637 count = 0xE; 657 count = 0xE;
638 } 658 }
639 659
@@ -651,15 +671,21 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
651 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); 671 sdhci_clear_set_irqs(host, dma_irqs, pio_irqs);
652} 672}
653 673
654static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) 674static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
655{ 675{
656 u8 count; 676 u8 count;
657 u8 ctrl; 677 u8 ctrl;
678 struct mmc_data *data = cmd->data;
658 int ret; 679 int ret;
659 680
660 WARN_ON(host->data); 681 WARN_ON(host->data);
661 682
662 if (data == NULL) 683 if (data || (cmd->flags & MMC_RSP_BUSY)) {
684 count = sdhci_calc_timeout(host, cmd);
685 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
686 }
687
688 if (!data)
663 return; 689 return;
664 690
665 /* Sanity checks */ 691 /* Sanity checks */
@@ -669,9 +695,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
669 695
670 host->data = data; 696 host->data = data;
671 host->data_early = 0; 697 host->data_early = 0;
672 698 host->data->bytes_xfered = 0;
673 count = sdhci_calc_timeout(host, data);
674 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
675 699
676 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) 700 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
677 host->flags |= SDHCI_REQ_USE_DMA; 701 host->flags |= SDHCI_REQ_USE_DMA;
@@ -807,15 +831,17 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
807 831
808 sdhci_set_transfer_irqs(host); 832 sdhci_set_transfer_irqs(host);
809 833
810 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 834 /* Set the DMA boundary value and block size */
811 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); 835 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
836 data->blksz), SDHCI_BLOCK_SIZE);
812 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 837 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
813} 838}
814 839
815static void sdhci_set_transfer_mode(struct sdhci_host *host, 840static void sdhci_set_transfer_mode(struct sdhci_host *host,
816 struct mmc_data *data) 841 struct mmc_command *cmd)
817{ 842{
818 u16 mode; 843 u16 mode;
844 struct mmc_data *data = cmd->data;
819 845
820 if (data == NULL) 846 if (data == NULL)
821 return; 847 return;
@@ -823,12 +849,20 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
823 WARN_ON(!host->data); 849 WARN_ON(!host->data);
824 850
825 mode = SDHCI_TRNS_BLK_CNT_EN; 851 mode = SDHCI_TRNS_BLK_CNT_EN;
826 if (data->blocks > 1) { 852 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
827 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 853 mode |= SDHCI_TRNS_MULTI;
828 mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12; 854 /*
829 else 855 * If we are sending CMD23, CMD12 never gets sent
830 mode |= SDHCI_TRNS_MULTI; 856 * on successful completion (so no Auto-CMD12).
857 */
858 if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12))
859 mode |= SDHCI_TRNS_AUTO_CMD12;
860 else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
861 mode |= SDHCI_TRNS_AUTO_CMD23;
862 sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
863 }
831 } 864 }
865
832 if (data->flags & MMC_DATA_READ) 866 if (data->flags & MMC_DATA_READ)
833 mode |= SDHCI_TRNS_READ; 867 mode |= SDHCI_TRNS_READ;
834 if (host->flags & SDHCI_REQ_USE_DMA) 868 if (host->flags & SDHCI_REQ_USE_DMA)
@@ -868,7 +902,15 @@ static void sdhci_finish_data(struct sdhci_host *host)
868 else 902 else
869 data->bytes_xfered = data->blksz * data->blocks; 903 data->bytes_xfered = data->blksz * data->blocks;
870 904
871 if (data->stop) { 905 /*
906 * Need to send CMD12 if -
907 * a) open-ended multiblock transfer (no CMD23)
908 * b) error in multiblock transfer
909 */
910 if (data->stop &&
911 (data->error ||
912 !host->mrq->sbc)) {
913
872 /* 914 /*
873 * The controller needs a reset of internal state machines 915 * The controller needs a reset of internal state machines
874 * upon error conditions. 916 * upon error conditions.
@@ -920,11 +962,11 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
920 962
921 host->cmd = cmd; 963 host->cmd = cmd;
922 964
923 sdhci_prepare_data(host, cmd->data); 965 sdhci_prepare_data(host, cmd);
924 966
925 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 967 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
926 968
927 sdhci_set_transfer_mode(host, cmd->data); 969 sdhci_set_transfer_mode(host, cmd);
928 970
929 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 971 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
930 printk(KERN_ERR "%s: Unsupported response type!\n", 972 printk(KERN_ERR "%s: Unsupported response type!\n",
@@ -947,7 +989,9 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
947 flags |= SDHCI_CMD_CRC; 989 flags |= SDHCI_CMD_CRC;
948 if (cmd->flags & MMC_RSP_OPCODE) 990 if (cmd->flags & MMC_RSP_OPCODE)
949 flags |= SDHCI_CMD_INDEX; 991 flags |= SDHCI_CMD_INDEX;
950 if (cmd->data) 992
993 /* CMD19 is special in that the Data Present Select should be set */
994 if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK))
951 flags |= SDHCI_CMD_DATA; 995 flags |= SDHCI_CMD_DATA;
952 996
953 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 997 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
@@ -977,19 +1021,27 @@ static void sdhci_finish_command(struct sdhci_host *host)
977 1021
978 host->cmd->error = 0; 1022 host->cmd->error = 0;
979 1023
980 if (host->data && host->data_early) 1024 /* Finished CMD23, now send actual command. */
981 sdhci_finish_data(host); 1025 if (host->cmd == host->mrq->sbc) {
1026 host->cmd = NULL;
1027 sdhci_send_command(host, host->mrq->cmd);
1028 } else {
982 1029
983 if (!host->cmd->data) 1030 /* Processed actual command. */
984 tasklet_schedule(&host->finish_tasklet); 1031 if (host->data && host->data_early)
1032 sdhci_finish_data(host);
985 1033
986 host->cmd = NULL; 1034 if (!host->cmd->data)
1035 tasklet_schedule(&host->finish_tasklet);
1036
1037 host->cmd = NULL;
1038 }
987} 1039}
988 1040
989static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 1041static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
990{ 1042{
991 int div; 1043 int div = 0; /* Initialized for compiler warning */
992 u16 clk; 1044 u16 clk = 0;
993 unsigned long timeout; 1045 unsigned long timeout;
994 1046
995 if (clock == host->clock) 1047 if (clock == host->clock)
@@ -1007,14 +1059,45 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1007 goto out; 1059 goto out;
1008 1060
1009 if (host->version >= SDHCI_SPEC_300) { 1061 if (host->version >= SDHCI_SPEC_300) {
1010 /* Version 3.00 divisors must be a multiple of 2. */ 1062 /*
1011 if (host->max_clk <= clock) 1063 * Check if the Host Controller supports Programmable Clock
1012 div = 1; 1064 * Mode.
1013 else { 1065 */
1014 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { 1066 if (host->clk_mul) {
1015 if ((host->max_clk / div) <= clock) 1067 u16 ctrl;
1016 break; 1068
1069 /*
1070 * We need to figure out whether the Host Driver needs
1071 * to select Programmable Clock Mode, or the value can
1072 * be set automatically by the Host Controller based on
1073 * the Preset Value registers.
1074 */
1075 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1076 if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1077 for (div = 1; div <= 1024; div++) {
1078 if (((host->max_clk * host->clk_mul) /
1079 div) <= clock)
1080 break;
1081 }
1082 /*
1083 * Set Programmable Clock Mode in the Clock
1084 * Control register.
1085 */
1086 clk = SDHCI_PROG_CLOCK_MODE;
1087 div--;
1017 } 1088 }
1089 } else {
1090 /* Version 3.00 divisors must be a multiple of 2. */
1091 if (host->max_clk <= clock)
1092 div = 1;
1093 else {
1094 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1095 div += 2) {
1096 if ((host->max_clk / div) <= clock)
1097 break;
1098 }
1099 }
1100 div >>= 1;
1018 } 1101 }
1019 } else { 1102 } else {
1020 /* Version 2.00 divisors must be a power of 2. */ 1103 /* Version 2.00 divisors must be a power of 2. */
@@ -1022,10 +1105,10 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1022 if ((host->max_clk / div) <= clock) 1105 if ((host->max_clk / div) <= clock)
1023 break; 1106 break;
1024 } 1107 }
1108 div >>= 1;
1025 } 1109 }
1026 div >>= 1;
1027 1110
1028 clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1111 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1029 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1112 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1030 << SDHCI_DIVIDER_HI_SHIFT; 1113 << SDHCI_DIVIDER_HI_SHIFT;
1031 clk |= SDHCI_CLOCK_INT_EN; 1114 clk |= SDHCI_CLOCK_INT_EN;
@@ -1131,7 +1214,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1131#ifndef SDHCI_USE_LEDS_CLASS 1214#ifndef SDHCI_USE_LEDS_CLASS
1132 sdhci_activate_led(host); 1215 sdhci_activate_led(host);
1133#endif 1216#endif
1134 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) { 1217
1218 /*
1219 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1220 * requests if Auto-CMD12 is enabled.
1221 */
1222 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
1135 if (mrq->stop) { 1223 if (mrq->stop) {
1136 mrq->data->stop = NULL; 1224 mrq->data->stop = NULL;
1137 mrq->stop = NULL; 1225 mrq->stop = NULL;
@@ -1150,8 +1238,30 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1150 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1238 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1151 host->mrq->cmd->error = -ENOMEDIUM; 1239 host->mrq->cmd->error = -ENOMEDIUM;
1152 tasklet_schedule(&host->finish_tasklet); 1240 tasklet_schedule(&host->finish_tasklet);
1153 } else 1241 } else {
1154 sdhci_send_command(host, mrq->cmd); 1242 u32 present_state;
1243
1244 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1245 /*
1246 * Check if the re-tuning timer has already expired and there
1247 * is no on-going data transfer. If so, we need to execute
1248 * tuning procedure before sending command.
1249 */
1250 if ((host->flags & SDHCI_NEEDS_RETUNING) &&
1251 !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
1252 spin_unlock_irqrestore(&host->lock, flags);
1253 sdhci_execute_tuning(mmc);
1254 spin_lock_irqsave(&host->lock, flags);
1255
1256 /* Restore original mmc_request structure */
1257 host->mrq = mrq;
1258 }
1259
1260 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1261 sdhci_send_command(host, mrq->sbc);
1262 else
1263 sdhci_send_command(host, mrq->cmd);
1264 }
1155 1265
1156 mmiowb(); 1266 mmiowb();
1157 spin_unlock_irqrestore(&host->lock, flags); 1267 spin_unlock_irqrestore(&host->lock, flags);
@@ -1222,7 +1332,84 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1222 else 1332 else
1223 ctrl &= ~SDHCI_CTRL_HISPD; 1333 ctrl &= ~SDHCI_CTRL_HISPD;
1224 1334
1225 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 1335 if (host->version >= SDHCI_SPEC_300) {
1336 u16 clk, ctrl_2;
1337 unsigned int clock;
1338
1339 /* In case of UHS-I modes, set High Speed Enable */
1340 if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
1341 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1342 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1343 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1344 (ios->timing == MMC_TIMING_UHS_SDR12))
1345 ctrl |= SDHCI_CTRL_HISPD;
1346
1347 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1348 if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1349 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1350 /*
1351 * We only need to set Driver Strength if the
1352 * preset value enable is not set.
1353 */
1354 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1355 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1356 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1357 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1358 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1359
1360 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1361 } else {
1362 /*
1363 * According to SDHC Spec v3.00, if the Preset Value
1364 * Enable in the Host Control 2 register is set, we
1365 * need to reset SD Clock Enable before changing High
1366 * Speed Enable to avoid generating clock gliches.
1367 */
1368
1369 /* Reset SD Clock Enable */
1370 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1371 clk &= ~SDHCI_CLOCK_CARD_EN;
1372 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1373
1374 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1375
1376 /* Re-enable SD Clock */
1377 clock = host->clock;
1378 host->clock = 0;
1379 sdhci_set_clock(host, clock);
1380 }
1381
1382
1383 /* Reset SD Clock Enable */
1384 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1385 clk &= ~SDHCI_CLOCK_CARD_EN;
1386 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1387
1388 if (host->ops->set_uhs_signaling)
1389 host->ops->set_uhs_signaling(host, ios->timing);
1390 else {
1391 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1392 /* Select Bus Speed Mode for host */
1393 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1394 if (ios->timing == MMC_TIMING_UHS_SDR12)
1395 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1396 else if (ios->timing == MMC_TIMING_UHS_SDR25)
1397 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1398 else if (ios->timing == MMC_TIMING_UHS_SDR50)
1399 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1400 else if (ios->timing == MMC_TIMING_UHS_SDR104)
1401 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1402 else if (ios->timing == MMC_TIMING_UHS_DDR50)
1403 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1404 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1405 }
1406
1407 /* Re-enable SD Clock */
1408 clock = host->clock;
1409 host->clock = 0;
1410 sdhci_set_clock(host, clock);
1411 } else
1412 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1226 1413
1227 /* 1414 /*
1228 * Some (ENE) controllers go apeshit on some ios operation, 1415 * Some (ENE) controllers go apeshit on some ios operation,
@@ -1237,14 +1424,11 @@ out:
1237 spin_unlock_irqrestore(&host->lock, flags); 1424 spin_unlock_irqrestore(&host->lock, flags);
1238} 1425}
1239 1426
1240static int sdhci_get_ro(struct mmc_host *mmc) 1427static int check_ro(struct sdhci_host *host)
1241{ 1428{
1242 struct sdhci_host *host;
1243 unsigned long flags; 1429 unsigned long flags;
1244 int is_readonly; 1430 int is_readonly;
1245 1431
1246 host = mmc_priv(mmc);
1247
1248 spin_lock_irqsave(&host->lock, flags); 1432 spin_lock_irqsave(&host->lock, flags);
1249 1433
1250 if (host->flags & SDHCI_DEVICE_DEAD) 1434 if (host->flags & SDHCI_DEVICE_DEAD)
@@ -1262,6 +1446,29 @@ static int sdhci_get_ro(struct mmc_host *mmc)
1262 !is_readonly : is_readonly; 1446 !is_readonly : is_readonly;
1263} 1447}
1264 1448
1449#define SAMPLE_COUNT 5
1450
1451static int sdhci_get_ro(struct mmc_host *mmc)
1452{
1453 struct sdhci_host *host;
1454 int i, ro_count;
1455
1456 host = mmc_priv(mmc);
1457
1458 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1459 return check_ro(host);
1460
1461 ro_count = 0;
1462 for (i = 0; i < SAMPLE_COUNT; i++) {
1463 if (check_ro(host)) {
1464 if (++ro_count > SAMPLE_COUNT / 2)
1465 return 1;
1466 }
1467 msleep(30);
1468 }
1469 return 0;
1470}
1471
1265static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1472static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1266{ 1473{
1267 struct sdhci_host *host; 1474 struct sdhci_host *host;
@@ -1284,11 +1491,322 @@ out:
1284 spin_unlock_irqrestore(&host->lock, flags); 1491 spin_unlock_irqrestore(&host->lock, flags);
1285} 1492}
1286 1493
1494static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1495 struct mmc_ios *ios)
1496{
1497 struct sdhci_host *host;
1498 u8 pwr;
1499 u16 clk, ctrl;
1500 u32 present_state;
1501
1502 host = mmc_priv(mmc);
1503
1504 /*
1505 * Signal Voltage Switching is only applicable for Host Controllers
1506 * v3.00 and above.
1507 */
1508 if (host->version < SDHCI_SPEC_300)
1509 return 0;
1510
1511 /*
1512 * We first check whether the request is to set signalling voltage
1513 * to 3.3V. If so, we change the voltage to 3.3V and return quickly.
1514 */
1515 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1516 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1517 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1518 ctrl &= ~SDHCI_CTRL_VDD_180;
1519 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1520
1521 /* Wait for 5ms */
1522 usleep_range(5000, 5500);
1523
1524 /* 3.3V regulator output should be stable within 5 ms */
1525 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1526 if (!(ctrl & SDHCI_CTRL_VDD_180))
1527 return 0;
1528 else {
1529 printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V "
1530 "signalling voltage failed\n");
1531 return -EIO;
1532 }
1533 } else if (!(ctrl & SDHCI_CTRL_VDD_180) &&
1534 (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) {
1535 /* Stop SDCLK */
1536 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1537 clk &= ~SDHCI_CLOCK_CARD_EN;
1538 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1539
1540 /* Check whether DAT[3:0] is 0000 */
1541 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1542 if (!((present_state & SDHCI_DATA_LVL_MASK) >>
1543 SDHCI_DATA_LVL_SHIFT)) {
1544 /*
1545 * Enable 1.8V Signal Enable in the Host Control2
1546 * register
1547 */
1548 ctrl |= SDHCI_CTRL_VDD_180;
1549 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1550
1551 /* Wait for 5ms */
1552 usleep_range(5000, 5500);
1553
1554 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1555 if (ctrl & SDHCI_CTRL_VDD_180) {
1556 /* Provide SDCLK again and wait for 1ms*/
1557 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1558 clk |= SDHCI_CLOCK_CARD_EN;
1559 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1560 usleep_range(1000, 1500);
1561
1562 /*
1563 * If DAT[3:0] level is 1111b, then the card
1564 * was successfully switched to 1.8V signaling.
1565 */
1566 present_state = sdhci_readl(host,
1567 SDHCI_PRESENT_STATE);
1568 if ((present_state & SDHCI_DATA_LVL_MASK) ==
1569 SDHCI_DATA_LVL_MASK)
1570 return 0;
1571 }
1572 }
1573
1574 /*
1575 * If we are here, that means the switch to 1.8V signaling
1576 * failed. We power cycle the card, and retry initialization
1577 * sequence by setting S18R to 0.
1578 */
1579 pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
1580 pwr &= ~SDHCI_POWER_ON;
1581 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1582
1583 /* Wait for 1ms as per the spec */
1584 usleep_range(1000, 1500);
1585 pwr |= SDHCI_POWER_ON;
1586 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1587
1588 printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling "
1589 "voltage failed, retrying with S18R set to 0\n");
1590 return -EAGAIN;
1591 } else
1592 /* No signal voltage switch required */
1593 return 0;
1594}
1595
1596static int sdhci_execute_tuning(struct mmc_host *mmc)
1597{
1598 struct sdhci_host *host;
1599 u16 ctrl;
1600 u32 ier;
1601 int tuning_loop_counter = MAX_TUNING_LOOP;
1602 unsigned long timeout;
1603 int err = 0;
1604
1605 host = mmc_priv(mmc);
1606
1607 disable_irq(host->irq);
1608 spin_lock(&host->lock);
1609
1610 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1611
1612 /*
1613 * Host Controller needs tuning only in case of SDR104 mode
1614 * and for SDR50 mode when Use Tuning for SDR50 is set in
1615 * Capabilities register.
1616 */
1617 if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
1618 (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
1619 (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
1620 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1621 else {
1622 spin_unlock(&host->lock);
1623 enable_irq(host->irq);
1624 return 0;
1625 }
1626
1627 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1628
1629 /*
1630 * As per the Host Controller spec v3.00, tuning command
1631 * generates Buffer Read Ready interrupt, so enable that.
1632 *
1633 * Note: The spec clearly says that when tuning sequence
1634 * is being performed, the controller does not generate
1635 * interrupts other than Buffer Read Ready interrupt. But
1636 * to make sure we don't hit a controller bug, we _only_
1637 * enable Buffer Read Ready interrupt here.
1638 */
1639 ier = sdhci_readl(host, SDHCI_INT_ENABLE);
1640 sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL);
1641
1642 /*
1643 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
1644 * of loops reaches 40 times or a timeout of 150ms occurs.
1645 */
1646 timeout = 150;
1647 do {
1648 struct mmc_command cmd = {0};
1649 struct mmc_request mrq = {0};
1650
1651 if (!tuning_loop_counter && !timeout)
1652 break;
1653
1654 cmd.opcode = MMC_SEND_TUNING_BLOCK;
1655 cmd.arg = 0;
1656 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
1657 cmd.retries = 0;
1658 cmd.data = NULL;
1659 cmd.error = 0;
1660
1661 mrq.cmd = &cmd;
1662 host->mrq = &mrq;
1663
1664 /*
1665 * In response to CMD19, the card sends 64 bytes of tuning
1666 * block to the Host Controller. So we set the block size
1667 * to 64 here.
1668 */
1669 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
1670
1671 /*
1672 * The tuning block is sent by the card to the host controller.
1673 * So we set the TRNS_READ bit in the Transfer Mode register.
1674 * This also takes care of setting DMA Enable and Multi Block
1675 * Select in the same register to 0.
1676 */
1677 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1678
1679 sdhci_send_command(host, &cmd);
1680
1681 host->cmd = NULL;
1682 host->mrq = NULL;
1683
1684 spin_unlock(&host->lock);
1685 enable_irq(host->irq);
1686
1687 /* Wait for Buffer Read Ready interrupt */
1688 wait_event_interruptible_timeout(host->buf_ready_int,
1689 (host->tuning_done == 1),
1690 msecs_to_jiffies(50));
1691 disable_irq(host->irq);
1692 spin_lock(&host->lock);
1693
1694 if (!host->tuning_done) {
1695 printk(KERN_INFO DRIVER_NAME ": Timeout waiting for "
1696 "Buffer Read Ready interrupt during tuning "
1697 "procedure, falling back to fixed sampling "
1698 "clock\n");
1699 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1700 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1701 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
1702 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1703
1704 err = -EIO;
1705 goto out;
1706 }
1707
1708 host->tuning_done = 0;
1709
1710 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1711 tuning_loop_counter--;
1712 timeout--;
1713 mdelay(1);
1714 } while (ctrl & SDHCI_CTRL_EXEC_TUNING);
1715
1716 /*
1717 * The Host Driver has exhausted the maximum number of loops allowed,
1718 * so use fixed sampling frequency.
1719 */
1720 if (!tuning_loop_counter || !timeout) {
1721 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1722 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1723 } else {
1724 if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) {
1725 printk(KERN_INFO DRIVER_NAME ": Tuning procedure"
1726 " failed, falling back to fixed sampling"
1727 " clock\n");
1728 err = -EIO;
1729 }
1730 }
1731
1732out:
1733 /*
1734 * If this is the very first time we are here, we start the retuning
1735 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
1736 * flag won't be set, we check this condition before actually starting
1737 * the timer.
1738 */
1739 if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
1740 (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
1741 mod_timer(&host->tuning_timer, jiffies +
1742 host->tuning_count * HZ);
1743 /* Tuning mode 1 limits the maximum data length to 4MB */
1744 mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
1745 } else {
1746 host->flags &= ~SDHCI_NEEDS_RETUNING;
1747 /* Reload the new initial value for timer */
1748 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
1749 mod_timer(&host->tuning_timer, jiffies +
1750 host->tuning_count * HZ);
1751 }
1752
1753 /*
1754 * In case tuning fails, host controllers which support re-tuning can
1755 * try tuning again at a later time, when the re-tuning timer expires.
1756 * So for these controllers, we return 0. Since there might be other
1757 * controllers who do not have this capability, we return error for
1758 * them.
1759 */
1760 if (err && host->tuning_count &&
1761 host->tuning_mode == SDHCI_TUNING_MODE_1)
1762 err = 0;
1763
1764 sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
1765 spin_unlock(&host->lock);
1766 enable_irq(host->irq);
1767
1768 return err;
1769}
1770
1771static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable)
1772{
1773 struct sdhci_host *host;
1774 u16 ctrl;
1775 unsigned long flags;
1776
1777 host = mmc_priv(mmc);
1778
1779 /* Host Controller v3.00 defines preset value registers */
1780 if (host->version < SDHCI_SPEC_300)
1781 return;
1782
1783 spin_lock_irqsave(&host->lock, flags);
1784
1785 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1786
1787 /*
1788 * We only enable or disable Preset Value if they are not already
1789 * enabled or disabled respectively. Otherwise, we bail out.
1790 */
1791 if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1792 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
1793 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1794 } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) {
1795 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
1796 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1797 }
1798
1799 spin_unlock_irqrestore(&host->lock, flags);
1800}
1801
1287static const struct mmc_host_ops sdhci_ops = { 1802static const struct mmc_host_ops sdhci_ops = {
1288 .request = sdhci_request, 1803 .request = sdhci_request,
1289 .set_ios = sdhci_set_ios, 1804 .set_ios = sdhci_set_ios,
1290 .get_ro = sdhci_get_ro, 1805 .get_ro = sdhci_get_ro,
1291 .enable_sdio_irq = sdhci_enable_sdio_irq, 1806 .enable_sdio_irq = sdhci_enable_sdio_irq,
1807 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
1808 .execute_tuning = sdhci_execute_tuning,
1809 .enable_preset_value = sdhci_enable_preset_value,
1292}; 1810};
1293 1811
1294/*****************************************************************************\ 1812/*****************************************************************************\
@@ -1345,6 +1863,9 @@ static void sdhci_tasklet_finish(unsigned long param)
1345 1863
1346 del_timer(&host->timer); 1864 del_timer(&host->timer);
1347 1865
1866 if (host->version >= SDHCI_SPEC_300)
1867 del_timer(&host->tuning_timer);
1868
1348 mrq = host->mrq; 1869 mrq = host->mrq;
1349 1870
1350 /* 1871 /*
@@ -1418,6 +1939,20 @@ static void sdhci_timeout_timer(unsigned long data)
1418 spin_unlock_irqrestore(&host->lock, flags); 1939 spin_unlock_irqrestore(&host->lock, flags);
1419} 1940}
1420 1941
1942static void sdhci_tuning_timer(unsigned long data)
1943{
1944 struct sdhci_host *host;
1945 unsigned long flags;
1946
1947 host = (struct sdhci_host *)data;
1948
1949 spin_lock_irqsave(&host->lock, flags);
1950
1951 host->flags |= SDHCI_NEEDS_RETUNING;
1952
1953 spin_unlock_irqrestore(&host->lock, flags);
1954}
1955
1421/*****************************************************************************\ 1956/*****************************************************************************\
1422 * * 1957 * *
1423 * Interrupt handling * 1958 * Interrupt handling *
@@ -1506,6 +2041,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1506{ 2041{
1507 BUG_ON(intmask == 0); 2042 BUG_ON(intmask == 0);
1508 2043
2044 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2045 if (intmask & SDHCI_INT_DATA_AVAIL) {
2046 if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) ==
2047 MMC_SEND_TUNING_BLOCK) {
2048 host->tuning_done = 1;
2049 wake_up(&host->buf_ready_int);
2050 return;
2051 }
2052 }
2053
1509 if (!host->data) { 2054 if (!host->data) {
1510 /* 2055 /*
1511 * The "data complete" interrupt is also used to 2056 * The "data complete" interrupt is also used to
@@ -1551,10 +2096,28 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1551 * We currently don't do anything fancy with DMA 2096 * We currently don't do anything fancy with DMA
1552 * boundaries, but as we can't disable the feature 2097 * boundaries, but as we can't disable the feature
1553 * we need to at least restart the transfer. 2098 * we need to at least restart the transfer.
2099 *
2100 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2101 * should return a valid address to continue from, but as
2102 * some controllers are faulty, don't trust them.
1554 */ 2103 */
1555 if (intmask & SDHCI_INT_DMA_END) 2104 if (intmask & SDHCI_INT_DMA_END) {
1556 sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), 2105 u32 dmastart, dmanow;
1557 SDHCI_DMA_ADDRESS); 2106 dmastart = sg_dma_address(host->data->sg);
2107 dmanow = dmastart + host->data->bytes_xfered;
2108 /*
2109 * Force update to the next DMA block boundary.
2110 */
2111 dmanow = (dmanow &
2112 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2113 SDHCI_DEFAULT_BOUNDARY_SIZE;
2114 host->data->bytes_xfered = dmanow - dmastart;
2115 DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes,"
2116 " next 0x%08x\n",
2117 mmc_hostname(host->mmc), dmastart,
2118 host->data->bytes_xfered, dmanow);
2119 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2120 }
1558 2121
1559 if (intmask & SDHCI_INT_DATA_END) { 2122 if (intmask & SDHCI_INT_DATA_END) {
1560 if (host->cmd) { 2123 if (host->cmd) {
@@ -1664,6 +2227,14 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1664 2227
1665 sdhci_disable_card_detection(host); 2228 sdhci_disable_card_detection(host);
1666 2229
2230 /* Disable tuning since we are suspending */
2231 if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
2232 host->tuning_mode == SDHCI_TUNING_MODE_1) {
2233 host->flags &= ~SDHCI_NEEDS_RETUNING;
2234 mod_timer(&host->tuning_timer, jiffies +
2235 host->tuning_count * HZ);
2236 }
2237
1667 ret = mmc_suspend_host(host->mmc); 2238 ret = mmc_suspend_host(host->mmc);
1668 if (ret) 2239 if (ret)
1669 return ret; 2240 return ret;
@@ -1705,6 +2276,11 @@ int sdhci_resume_host(struct sdhci_host *host)
1705 ret = mmc_resume_host(host->mmc); 2276 ret = mmc_resume_host(host->mmc);
1706 sdhci_enable_card_detection(host); 2277 sdhci_enable_card_detection(host);
1707 2278
2279 /* Set the re-tuning expiration flag */
2280 if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
2281 (host->tuning_mode == SDHCI_TUNING_MODE_1))
2282 host->flags |= SDHCI_NEEDS_RETUNING;
2283
1708 return ret; 2284 return ret;
1709} 2285}
1710 2286
@@ -1751,7 +2327,9 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1751int sdhci_add_host(struct sdhci_host *host) 2327int sdhci_add_host(struct sdhci_host *host)
1752{ 2328{
1753 struct mmc_host *mmc; 2329 struct mmc_host *mmc;
1754 unsigned int caps, ocr_avail; 2330 u32 caps[2];
2331 u32 max_current_caps;
2332 unsigned int ocr_avail;
1755 int ret; 2333 int ret;
1756 2334
1757 WARN_ON(host == NULL); 2335 WARN_ON(host == NULL);
@@ -1774,12 +2352,15 @@ int sdhci_add_host(struct sdhci_host *host)
1774 host->version); 2352 host->version);
1775 } 2353 }
1776 2354
1777 caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : 2355 caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
1778 sdhci_readl(host, SDHCI_CAPABILITIES); 2356 sdhci_readl(host, SDHCI_CAPABILITIES);
1779 2357
2358 caps[1] = (host->version >= SDHCI_SPEC_300) ?
2359 sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
2360
1780 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 2361 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
1781 host->flags |= SDHCI_USE_SDMA; 2362 host->flags |= SDHCI_USE_SDMA;
1782 else if (!(caps & SDHCI_CAN_DO_SDMA)) 2363 else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
1783 DBG("Controller doesn't have SDMA capability\n"); 2364 DBG("Controller doesn't have SDMA capability\n");
1784 else 2365 else
1785 host->flags |= SDHCI_USE_SDMA; 2366 host->flags |= SDHCI_USE_SDMA;
@@ -1790,7 +2371,8 @@ int sdhci_add_host(struct sdhci_host *host)
1790 host->flags &= ~SDHCI_USE_SDMA; 2371 host->flags &= ~SDHCI_USE_SDMA;
1791 } 2372 }
1792 2373
1793 if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) 2374 if ((host->version >= SDHCI_SPEC_200) &&
2375 (caps[0] & SDHCI_CAN_DO_ADMA2))
1794 host->flags |= SDHCI_USE_ADMA; 2376 host->flags |= SDHCI_USE_ADMA;
1795 2377
1796 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 2378 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
@@ -1840,10 +2422,10 @@ int sdhci_add_host(struct sdhci_host *host)
1840 } 2422 }
1841 2423
1842 if (host->version >= SDHCI_SPEC_300) 2424 if (host->version >= SDHCI_SPEC_300)
1843 host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) 2425 host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
1844 >> SDHCI_CLOCK_BASE_SHIFT; 2426 >> SDHCI_CLOCK_BASE_SHIFT;
1845 else 2427 else
1846 host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) 2428 host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
1847 >> SDHCI_CLOCK_BASE_SHIFT; 2429 >> SDHCI_CLOCK_BASE_SHIFT;
1848 2430
1849 host->max_clk *= 1000000; 2431 host->max_clk *= 1000000;
@@ -1859,7 +2441,7 @@ int sdhci_add_host(struct sdhci_host *host)
1859 } 2441 }
1860 2442
1861 host->timeout_clk = 2443 host->timeout_clk =
1862 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 2444 (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1863 if (host->timeout_clk == 0) { 2445 if (host->timeout_clk == 0) {
1864 if (host->ops->get_timeout_clock) { 2446 if (host->ops->get_timeout_clock) {
1865 host->timeout_clk = host->ops->get_timeout_clock(host); 2447 host->timeout_clk = host->ops->get_timeout_clock(host);
@@ -1871,22 +2453,55 @@ int sdhci_add_host(struct sdhci_host *host)
1871 return -ENODEV; 2453 return -ENODEV;
1872 } 2454 }
1873 } 2455 }
1874 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 2456 if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
1875 host->timeout_clk *= 1000; 2457 host->timeout_clk *= 1000;
1876 2458
1877 /* 2459 /*
2460 * In case of Host Controller v3.00, find out whether clock
2461 * multiplier is supported.
2462 */
2463 host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
2464 SDHCI_CLOCK_MUL_SHIFT;
2465
2466 /*
2467 * In case the value in Clock Multiplier is 0, then programmable
2468 * clock mode is not supported, otherwise the actual clock
2469 * multiplier is one more than the value of Clock Multiplier
2470 * in the Capabilities Register.
2471 */
2472 if (host->clk_mul)
2473 host->clk_mul += 1;
2474
2475 /*
1878 * Set host parameters. 2476 * Set host parameters.
1879 */ 2477 */
1880 mmc->ops = &sdhci_ops; 2478 mmc->ops = &sdhci_ops;
2479 mmc->f_max = host->max_clk;
1881 if (host->ops->get_min_clock) 2480 if (host->ops->get_min_clock)
1882 mmc->f_min = host->ops->get_min_clock(host); 2481 mmc->f_min = host->ops->get_min_clock(host);
1883 else if (host->version >= SDHCI_SPEC_300) 2482 else if (host->version >= SDHCI_SPEC_300) {
1884 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 2483 if (host->clk_mul) {
1885 else 2484 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
2485 mmc->f_max = host->max_clk * host->clk_mul;
2486 } else
2487 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
2488 } else
1886 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 2489 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
1887 2490
1888 mmc->f_max = host->max_clk; 2491 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
1889 mmc->caps |= MMC_CAP_SDIO_IRQ; 2492
2493 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
2494 host->flags |= SDHCI_AUTO_CMD12;
2495
2496 /* Auto-CMD23 stuff only works in ADMA or PIO. */
2497 if ((host->version >= SDHCI_SPEC_300) &&
2498 ((host->flags & SDHCI_USE_ADMA) ||
2499 !(host->flags & SDHCI_USE_SDMA))) {
2500 host->flags |= SDHCI_AUTO_CMD23;
2501 DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc));
2502 } else {
2503 DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc));
2504 }
1890 2505
1891 /* 2506 /*
1892 * A controller may support 8-bit width, but the board itself 2507 * A controller may support 8-bit width, but the board itself
@@ -1898,21 +2513,113 @@ int sdhci_add_host(struct sdhci_host *host)
1898 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 2513 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1899 mmc->caps |= MMC_CAP_4_BIT_DATA; 2514 mmc->caps |= MMC_CAP_4_BIT_DATA;
1900 2515
1901 if (caps & SDHCI_CAN_DO_HISPD) 2516 if (caps[0] & SDHCI_CAN_DO_HISPD)
1902 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2517 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
1903 2518
1904 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 2519 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
1905 mmc_card_is_removable(mmc)) 2520 mmc_card_is_removable(mmc))
1906 mmc->caps |= MMC_CAP_NEEDS_POLL; 2521 mmc->caps |= MMC_CAP_NEEDS_POLL;
1907 2522
2523 /* UHS-I mode(s) supported by the host controller. */
2524 if (host->version >= SDHCI_SPEC_300)
2525 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
2526
2527 /* SDR104 supports also implies SDR50 support */
2528 if (caps[1] & SDHCI_SUPPORT_SDR104)
2529 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
2530 else if (caps[1] & SDHCI_SUPPORT_SDR50)
2531 mmc->caps |= MMC_CAP_UHS_SDR50;
2532
2533 if (caps[1] & SDHCI_SUPPORT_DDR50)
2534 mmc->caps |= MMC_CAP_UHS_DDR50;
2535
2536 /* Does the host needs tuning for SDR50? */
2537 if (caps[1] & SDHCI_USE_SDR50_TUNING)
2538 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
2539
2540 /* Driver Type(s) (A, C, D) supported by the host */
2541 if (caps[1] & SDHCI_DRIVER_TYPE_A)
2542 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
2543 if (caps[1] & SDHCI_DRIVER_TYPE_C)
2544 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
2545 if (caps[1] & SDHCI_DRIVER_TYPE_D)
2546 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
2547
2548 /* Initial value for re-tuning timer count */
2549 host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
2550 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
2551
2552 /*
2553 * In case Re-tuning Timer is not disabled, the actual value of
2554 * re-tuning timer will be 2 ^ (n - 1).
2555 */
2556 if (host->tuning_count)
2557 host->tuning_count = 1 << (host->tuning_count - 1);
2558
2559 /* Re-tuning mode supported by the Host Controller */
2560 host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
2561 SDHCI_RETUNING_MODE_SHIFT;
2562
1908 ocr_avail = 0; 2563 ocr_avail = 0;
1909 if (caps & SDHCI_CAN_VDD_330) 2564 /*
2565 * According to SD Host Controller spec v3.00, if the Host System
2566 * can afford more than 150mA, Host Driver should set XPC to 1. Also
2567 * the value is meaningful only if Voltage Support in the Capabilities
2568 * register is set. The actual current value is 4 times the register
2569 * value.
2570 */
2571 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
2572
2573 if (caps[0] & SDHCI_CAN_VDD_330) {
2574 int max_current_330;
2575
1910 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 2576 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
1911 if (caps & SDHCI_CAN_VDD_300) 2577
2578 max_current_330 = ((max_current_caps &
2579 SDHCI_MAX_CURRENT_330_MASK) >>
2580 SDHCI_MAX_CURRENT_330_SHIFT) *
2581 SDHCI_MAX_CURRENT_MULTIPLIER;
2582
2583 if (max_current_330 > 150)
2584 mmc->caps |= MMC_CAP_SET_XPC_330;
2585 }
2586 if (caps[0] & SDHCI_CAN_VDD_300) {
2587 int max_current_300;
2588
1912 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 2589 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
1913 if (caps & SDHCI_CAN_VDD_180) 2590
2591 max_current_300 = ((max_current_caps &
2592 SDHCI_MAX_CURRENT_300_MASK) >>
2593 SDHCI_MAX_CURRENT_300_SHIFT) *
2594 SDHCI_MAX_CURRENT_MULTIPLIER;
2595
2596 if (max_current_300 > 150)
2597 mmc->caps |= MMC_CAP_SET_XPC_300;
2598 }
2599 if (caps[0] & SDHCI_CAN_VDD_180) {
2600 int max_current_180;
2601
1914 ocr_avail |= MMC_VDD_165_195; 2602 ocr_avail |= MMC_VDD_165_195;
1915 2603
2604 max_current_180 = ((max_current_caps &
2605 SDHCI_MAX_CURRENT_180_MASK) >>
2606 SDHCI_MAX_CURRENT_180_SHIFT) *
2607 SDHCI_MAX_CURRENT_MULTIPLIER;
2608
2609 if (max_current_180 > 150)
2610 mmc->caps |= MMC_CAP_SET_XPC_180;
2611
2612 /* Maximum current capabilities of the host at 1.8V */
2613 if (max_current_180 >= 800)
2614 mmc->caps |= MMC_CAP_MAX_CURRENT_800;
2615 else if (max_current_180 >= 600)
2616 mmc->caps |= MMC_CAP_MAX_CURRENT_600;
2617 else if (max_current_180 >= 400)
2618 mmc->caps |= MMC_CAP_MAX_CURRENT_400;
2619 else
2620 mmc->caps |= MMC_CAP_MAX_CURRENT_200;
2621 }
2622
1916 mmc->ocr_avail = ocr_avail; 2623 mmc->ocr_avail = ocr_avail;
1917 mmc->ocr_avail_sdio = ocr_avail; 2624 mmc->ocr_avail_sdio = ocr_avail;
1918 if (host->ocr_avail_sdio) 2625 if (host->ocr_avail_sdio)
@@ -1972,7 +2679,7 @@ int sdhci_add_host(struct sdhci_host *host)
1972 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 2679 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
1973 mmc->max_blk_size = 2; 2680 mmc->max_blk_size = 2;
1974 } else { 2681 } else {
1975 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> 2682 mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
1976 SDHCI_MAX_BLOCK_SHIFT; 2683 SDHCI_MAX_BLOCK_SHIFT;
1977 if (mmc->max_blk_size >= 3) { 2684 if (mmc->max_blk_size >= 3) {
1978 printk(KERN_WARNING "%s: Invalid maximum block size, " 2685 printk(KERN_WARNING "%s: Invalid maximum block size, "
@@ -1998,6 +2705,15 @@ int sdhci_add_host(struct sdhci_host *host)
1998 2705
1999 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); 2706 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
2000 2707
2708 if (host->version >= SDHCI_SPEC_300) {
2709 init_waitqueue_head(&host->buf_ready_int);
2710
2711 /* Initialize re-tuning timer */
2712 init_timer(&host->tuning_timer);
2713 host->tuning_timer.data = (unsigned long)host;
2714 host->tuning_timer.function = sdhci_tuning_timer;
2715 }
2716
2001 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, 2717 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
2002 mmc_hostname(mmc), host); 2718 mmc_hostname(mmc), host);
2003 if (ret) 2719 if (ret)
@@ -2091,6 +2807,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
2091 free_irq(host->irq, host); 2807 free_irq(host->irq, host);
2092 2808
2093 del_timer_sync(&host->timer); 2809 del_timer_sync(&host->timer);
2810 if (host->version >= SDHCI_SPEC_300)
2811 del_timer_sync(&host->tuning_timer);
2094 2812
2095 tasklet_kill(&host->card_tasklet); 2813 tasklet_kill(&host->card_tasklet);
2096 tasklet_kill(&host->finish_tasklet); 2814 tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 25e8bde600d1..745c42fa41ed 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#define SDHCI_DMA_ADDRESS 0x00 27#define SDHCI_DMA_ADDRESS 0x00
28#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS
28 29
29#define SDHCI_BLOCK_SIZE 0x04 30#define SDHCI_BLOCK_SIZE 0x04
30#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) 31#define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
@@ -36,7 +37,8 @@
36#define SDHCI_TRANSFER_MODE 0x0C 37#define SDHCI_TRANSFER_MODE 0x0C
37#define SDHCI_TRNS_DMA 0x01 38#define SDHCI_TRNS_DMA 0x01
38#define SDHCI_TRNS_BLK_CNT_EN 0x02 39#define SDHCI_TRNS_BLK_CNT_EN 0x02
39#define SDHCI_TRNS_ACMD12 0x04 40#define SDHCI_TRNS_AUTO_CMD12 0x04
41#define SDHCI_TRNS_AUTO_CMD23 0x08
40#define SDHCI_TRNS_READ 0x10 42#define SDHCI_TRNS_READ 0x10
41#define SDHCI_TRNS_MULTI 0x20 43#define SDHCI_TRNS_MULTI 0x20
42 44
@@ -68,8 +70,10 @@
68#define SDHCI_DATA_AVAILABLE 0x00000800 70#define SDHCI_DATA_AVAILABLE 0x00000800
69#define SDHCI_CARD_PRESENT 0x00010000 71#define SDHCI_CARD_PRESENT 0x00010000
70#define SDHCI_WRITE_PROTECT 0x00080000 72#define SDHCI_WRITE_PROTECT 0x00080000
73#define SDHCI_DATA_LVL_MASK 0x00F00000
74#define SDHCI_DATA_LVL_SHIFT 20
71 75
72#define SDHCI_HOST_CONTROL 0x28 76#define SDHCI_HOST_CONTROL 0x28
73#define SDHCI_CTRL_LED 0x01 77#define SDHCI_CTRL_LED 0x01
74#define SDHCI_CTRL_4BITBUS 0x02 78#define SDHCI_CTRL_4BITBUS 0x02
75#define SDHCI_CTRL_HISPD 0x04 79#define SDHCI_CTRL_HISPD 0x04
@@ -99,6 +103,7 @@
99#define SDHCI_DIV_MASK 0xFF 103#define SDHCI_DIV_MASK 0xFF
100#define SDHCI_DIV_MASK_LEN 8 104#define SDHCI_DIV_MASK_LEN 8
101#define SDHCI_DIV_HI_MASK 0x300 105#define SDHCI_DIV_HI_MASK 0x300
106#define SDHCI_PROG_CLOCK_MODE 0x0020
102#define SDHCI_CLOCK_CARD_EN 0x0004 107#define SDHCI_CLOCK_CARD_EN 0x0004
103#define SDHCI_CLOCK_INT_STABLE 0x0002 108#define SDHCI_CLOCK_INT_STABLE 0x0002
104#define SDHCI_CLOCK_INT_EN 0x0001 109#define SDHCI_CLOCK_INT_EN 0x0001
@@ -146,7 +151,22 @@
146 151
147#define SDHCI_ACMD12_ERR 0x3C 152#define SDHCI_ACMD12_ERR 0x3C
148 153
149/* 3E-3F reserved */ 154#define SDHCI_HOST_CONTROL2 0x3E
155#define SDHCI_CTRL_UHS_MASK 0x0007
156#define SDHCI_CTRL_UHS_SDR12 0x0000
157#define SDHCI_CTRL_UHS_SDR25 0x0001
158#define SDHCI_CTRL_UHS_SDR50 0x0002
159#define SDHCI_CTRL_UHS_SDR104 0x0003
160#define SDHCI_CTRL_UHS_DDR50 0x0004
161#define SDHCI_CTRL_VDD_180 0x0008
162#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
163#define SDHCI_CTRL_DRV_TYPE_B 0x0000
164#define SDHCI_CTRL_DRV_TYPE_A 0x0010
165#define SDHCI_CTRL_DRV_TYPE_C 0x0020
166#define SDHCI_CTRL_DRV_TYPE_D 0x0030
167#define SDHCI_CTRL_EXEC_TUNING 0x0040
168#define SDHCI_CTRL_TUNED_CLK 0x0080
169#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
150 170
151#define SDHCI_CAPABILITIES 0x40 171#define SDHCI_CAPABILITIES 0x40
152#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F 172#define SDHCI_TIMEOUT_CLK_MASK 0x0000003F
@@ -167,9 +187,30 @@
167#define SDHCI_CAN_VDD_180 0x04000000 187#define SDHCI_CAN_VDD_180 0x04000000
168#define SDHCI_CAN_64BIT 0x10000000 188#define SDHCI_CAN_64BIT 0x10000000
169 189
190#define SDHCI_SUPPORT_SDR50 0x00000001
191#define SDHCI_SUPPORT_SDR104 0x00000002
192#define SDHCI_SUPPORT_DDR50 0x00000004
193#define SDHCI_DRIVER_TYPE_A 0x00000010
194#define SDHCI_DRIVER_TYPE_C 0x00000020
195#define SDHCI_DRIVER_TYPE_D 0x00000040
196#define SDHCI_RETUNING_TIMER_COUNT_MASK 0x00000F00
197#define SDHCI_RETUNING_TIMER_COUNT_SHIFT 8
198#define SDHCI_USE_SDR50_TUNING 0x00002000
199#define SDHCI_RETUNING_MODE_MASK 0x0000C000
200#define SDHCI_RETUNING_MODE_SHIFT 14
201#define SDHCI_CLOCK_MUL_MASK 0x00FF0000
202#define SDHCI_CLOCK_MUL_SHIFT 16
203
170#define SDHCI_CAPABILITIES_1 0x44 204#define SDHCI_CAPABILITIES_1 0x44
171 205
172#define SDHCI_MAX_CURRENT 0x48 206#define SDHCI_MAX_CURRENT 0x48
207#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF
208#define SDHCI_MAX_CURRENT_330_SHIFT 0
209#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00
210#define SDHCI_MAX_CURRENT_300_SHIFT 8
211#define SDHCI_MAX_CURRENT_180_MASK 0xFF0000
212#define SDHCI_MAX_CURRENT_180_SHIFT 16
213#define SDHCI_MAX_CURRENT_MULTIPLIER 4
173 214
174/* 4C-4F reserved for more max current */ 215/* 4C-4F reserved for more max current */
175 216
@@ -202,6 +243,12 @@
202#define SDHCI_MAX_DIV_SPEC_200 256 243#define SDHCI_MAX_DIV_SPEC_200 256
203#define SDHCI_MAX_DIV_SPEC_300 2046 244#define SDHCI_MAX_DIV_SPEC_300 2046
204 245
246/*
247 * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2.
248 */
249#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024)
250#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12)
251
205struct sdhci_ops { 252struct sdhci_ops {
206#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 253#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
207 u32 (*read_l)(struct sdhci_host *host, int reg); 254 u32 (*read_l)(struct sdhci_host *host, int reg);
@@ -223,6 +270,10 @@ struct sdhci_ops {
223 void (*platform_send_init_74_clocks)(struct sdhci_host *host, 270 void (*platform_send_init_74_clocks)(struct sdhci_host *host,
224 u8 power_mode); 271 u8 power_mode);
225 unsigned int (*get_ro)(struct sdhci_host *host); 272 unsigned int (*get_ro)(struct sdhci_host *host);
273 void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
274 void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
275 int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
276
226}; 277};
227 278
228#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 279#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index af97015a2fc7..14f8edbaa195 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -29,6 +29,8 @@
29#include <linux/mmc/sh_mmcif.h> 29#include <linux/mmc/sh_mmcif.h>
30#include <linux/pagemap.h> 30#include <linux/pagemap.h>
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/pm_runtime.h>
33#include <linux/spinlock.h>
32 34
33#define DRIVER_NAME "sh_mmcif" 35#define DRIVER_NAME "sh_mmcif"
34#define DRIVER_VERSION "2010-04-28" 36#define DRIVER_VERSION "2010-04-28"
@@ -153,6 +155,12 @@
153#define CLKDEV_MMC_DATA 20000000 /* 20MHz */ 155#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
154#define CLKDEV_INIT 400000 /* 400 KHz */ 156#define CLKDEV_INIT 400000 /* 400 KHz */
155 157
158enum mmcif_state {
159 STATE_IDLE,
160 STATE_REQUEST,
161 STATE_IOS,
162};
163
156struct sh_mmcif_host { 164struct sh_mmcif_host {
157 struct mmc_host *mmc; 165 struct mmc_host *mmc;
158 struct mmc_data *data; 166 struct mmc_data *data;
@@ -164,6 +172,9 @@ struct sh_mmcif_host {
164 long timeout; 172 long timeout;
165 void __iomem *addr; 173 void __iomem *addr;
166 struct completion intr_wait; 174 struct completion intr_wait;
175 enum mmcif_state state;
176 spinlock_t lock;
177 bool power;
167 178
168 /* DMA support */ 179 /* DMA support */
169 struct dma_chan *chan_rx; 180 struct dma_chan *chan_rx;
@@ -798,17 +809,31 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
798static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) 809static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
799{ 810{
800 struct sh_mmcif_host *host = mmc_priv(mmc); 811 struct sh_mmcif_host *host = mmc_priv(mmc);
812 unsigned long flags;
813
814 spin_lock_irqsave(&host->lock, flags);
815 if (host->state != STATE_IDLE) {
816 spin_unlock_irqrestore(&host->lock, flags);
817 mrq->cmd->error = -EAGAIN;
818 mmc_request_done(mmc, mrq);
819 return;
820 }
821
822 host->state = STATE_REQUEST;
823 spin_unlock_irqrestore(&host->lock, flags);
801 824
802 switch (mrq->cmd->opcode) { 825 switch (mrq->cmd->opcode) {
803 /* MMCIF does not support SD/SDIO command */ 826 /* MMCIF does not support SD/SDIO command */
804 case SD_IO_SEND_OP_COND: 827 case SD_IO_SEND_OP_COND:
805 case MMC_APP_CMD: 828 case MMC_APP_CMD:
829 host->state = STATE_IDLE;
806 mrq->cmd->error = -ETIMEDOUT; 830 mrq->cmd->error = -ETIMEDOUT;
807 mmc_request_done(mmc, mrq); 831 mmc_request_done(mmc, mrq);
808 return; 832 return;
809 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ 833 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
810 if (!mrq->data) { 834 if (!mrq->data) {
811 /* send_if_cond cmd (not support) */ 835 /* send_if_cond cmd (not support) */
836 host->state = STATE_IDLE;
812 mrq->cmd->error = -ETIMEDOUT; 837 mrq->cmd->error = -ETIMEDOUT;
813 mmc_request_done(mmc, mrq); 838 mmc_request_done(mmc, mrq);
814 return; 839 return;
@@ -830,12 +855,9 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
830 sh_mmcif_start_cmd(host, mrq, mrq->cmd); 855 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
831 host->data = NULL; 856 host->data = NULL;
832 857
833 if (mrq->cmd->error != 0) { 858 if (!mrq->cmd->error && mrq->stop)
834 mmc_request_done(mmc, mrq);
835 return;
836 }
837 if (mrq->stop)
838 sh_mmcif_stop_cmd(host, mrq, mrq->stop); 859 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
860 host->state = STATE_IDLE;
839 mmc_request_done(mmc, mrq); 861 mmc_request_done(mmc, mrq);
840} 862}
841 863
@@ -843,15 +865,39 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
843{ 865{
844 struct sh_mmcif_host *host = mmc_priv(mmc); 866 struct sh_mmcif_host *host = mmc_priv(mmc);
845 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; 867 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
868 unsigned long flags;
869
870 spin_lock_irqsave(&host->lock, flags);
871 if (host->state != STATE_IDLE) {
872 spin_unlock_irqrestore(&host->lock, flags);
873 return;
874 }
875
876 host->state = STATE_IOS;
877 spin_unlock_irqrestore(&host->lock, flags);
846 878
847 if (ios->power_mode == MMC_POWER_UP) { 879 if (ios->power_mode == MMC_POWER_UP) {
848 if (p->set_pwr) 880 if (p->set_pwr)
849 p->set_pwr(host->pd, ios->power_mode); 881 p->set_pwr(host->pd, ios->power_mode);
882 if (!host->power) {
883 /* See if we also get DMA */
884 sh_mmcif_request_dma(host, host->pd->dev.platform_data);
885 pm_runtime_get_sync(&host->pd->dev);
886 host->power = true;
887 }
850 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 888 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
851 /* clock stop */ 889 /* clock stop */
852 sh_mmcif_clock_control(host, 0); 890 sh_mmcif_clock_control(host, 0);
853 if (ios->power_mode == MMC_POWER_OFF && p->down_pwr) 891 if (ios->power_mode == MMC_POWER_OFF) {
854 p->down_pwr(host->pd); 892 if (host->power) {
893 pm_runtime_put(&host->pd->dev);
894 sh_mmcif_release_dma(host);
895 host->power = false;
896 }
897 if (p->down_pwr)
898 p->down_pwr(host->pd);
899 }
900 host->state = STATE_IDLE;
855 return; 901 return;
856 } 902 }
857 903
@@ -859,6 +905,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
859 sh_mmcif_clock_control(host, ios->clock); 905 sh_mmcif_clock_control(host, ios->clock);
860 906
861 host->bus_width = ios->bus_width; 907 host->bus_width = ios->bus_width;
908 host->state = STATE_IDLE;
862} 909}
863 910
864static int sh_mmcif_get_cd(struct mmc_host *mmc) 911static int sh_mmcif_get_cd(struct mmc_host *mmc)
@@ -925,7 +972,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
925 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 972 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
926 err = 1; 973 err = 1;
927 } else { 974 } else {
928 dev_dbg(&host->pd->dev, "Not support int\n"); 975 dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
929 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); 976 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
930 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); 977 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
931 err = 1; 978 err = 1;
@@ -996,6 +1043,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
996 host->pd = pdev; 1043 host->pd = pdev;
997 1044
998 init_completion(&host->intr_wait); 1045 init_completion(&host->intr_wait);
1046 spin_lock_init(&host->lock);
999 1047
1000 mmc->ops = &sh_mmcif_ops; 1048 mmc->ops = &sh_mmcif_ops;
1001 mmc->f_max = host->clk; 1049 mmc->f_max = host->clk;
@@ -1020,24 +1068,29 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1020 sh_mmcif_sync_reset(host); 1068 sh_mmcif_sync_reset(host);
1021 platform_set_drvdata(pdev, host); 1069 platform_set_drvdata(pdev, host);
1022 1070
1023 /* See if we also get DMA */ 1071 pm_runtime_enable(&pdev->dev);
1024 sh_mmcif_request_dma(host, pd); 1072 host->power = false;
1073
1074 ret = pm_runtime_resume(&pdev->dev);
1075 if (ret < 0)
1076 goto clean_up2;
1025 1077
1026 mmc_add_host(mmc); 1078 mmc_add_host(mmc);
1027 1079
1080 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1081
1028 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); 1082 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
1029 if (ret) { 1083 if (ret) {
1030 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); 1084 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1031 goto clean_up2; 1085 goto clean_up3;
1032 } 1086 }
1033 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); 1087 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
1034 if (ret) { 1088 if (ret) {
1035 free_irq(irq[0], host); 1089 free_irq(irq[0], host);
1036 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); 1090 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1037 goto clean_up2; 1091 goto clean_up3;
1038 } 1092 }
1039 1093
1040 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1041 sh_mmcif_detect(host->mmc); 1094 sh_mmcif_detect(host->mmc);
1042 1095
1043 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); 1096 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
@@ -1045,7 +1098,11 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1045 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); 1098 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1046 return ret; 1099 return ret;
1047 1100
1101clean_up3:
1102 mmc_remove_host(mmc);
1103 pm_runtime_suspend(&pdev->dev);
1048clean_up2: 1104clean_up2:
1105 pm_runtime_disable(&pdev->dev);
1049 clk_disable(host->hclk); 1106 clk_disable(host->hclk);
1050clean_up1: 1107clean_up1:
1051 mmc_free_host(mmc); 1108 mmc_free_host(mmc);
@@ -1060,14 +1117,14 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1060 struct sh_mmcif_host *host = platform_get_drvdata(pdev); 1117 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1061 int irq[2]; 1118 int irq[2];
1062 1119
1120 pm_runtime_get_sync(&pdev->dev);
1121
1063 mmc_remove_host(host->mmc); 1122 mmc_remove_host(host->mmc);
1064 sh_mmcif_release_dma(host); 1123 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1065 1124
1066 if (host->addr) 1125 if (host->addr)
1067 iounmap(host->addr); 1126 iounmap(host->addr);
1068 1127
1069 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1070
1071 irq[0] = platform_get_irq(pdev, 0); 1128 irq[0] = platform_get_irq(pdev, 0);
1072 irq[1] = platform_get_irq(pdev, 1); 1129 irq[1] = platform_get_irq(pdev, 1);
1073 1130
@@ -1078,15 +1135,52 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1078 1135
1079 clk_disable(host->hclk); 1136 clk_disable(host->hclk);
1080 mmc_free_host(host->mmc); 1137 mmc_free_host(host->mmc);
1138 pm_runtime_put_sync(&pdev->dev);
1139 pm_runtime_disable(&pdev->dev);
1081 1140
1082 return 0; 1141 return 0;
1083} 1142}
1084 1143
1144#ifdef CONFIG_PM
1145static int sh_mmcif_suspend(struct device *dev)
1146{
1147 struct platform_device *pdev = to_platform_device(dev);
1148 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1149 int ret = mmc_suspend_host(host->mmc);
1150
1151 if (!ret) {
1152 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1153 clk_disable(host->hclk);
1154 }
1155
1156 return ret;
1157}
1158
1159static int sh_mmcif_resume(struct device *dev)
1160{
1161 struct platform_device *pdev = to_platform_device(dev);
1162 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1163
1164 clk_enable(host->hclk);
1165
1166 return mmc_resume_host(host->mmc);
1167}
1168#else
1169#define sh_mmcif_suspend NULL
1170#define sh_mmcif_resume NULL
1171#endif /* CONFIG_PM */
1172
1173static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1174 .suspend = sh_mmcif_suspend,
1175 .resume = sh_mmcif_resume,
1176};
1177
1085static struct platform_driver sh_mmcif_driver = { 1178static struct platform_driver sh_mmcif_driver = {
1086 .probe = sh_mmcif_probe, 1179 .probe = sh_mmcif_probe,
1087 .remove = sh_mmcif_remove, 1180 .remove = sh_mmcif_remove,
1088 .driver = { 1181 .driver = {
1089 .name = DRIVER_NAME, 1182 .name = DRIVER_NAME,
1183 .pm = &sh_mmcif_dev_pm_ops,
1090 }, 1184 },
1091}; 1185};
1092 1186
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index cc701236d16f..b3654293017b 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -62,7 +62,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
62 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; 62 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
63 struct tmio_mmc_host *host; 63 struct tmio_mmc_host *host;
64 char clk_name[8]; 64 char clk_name[8];
65 int ret; 65 int i, irq, ret;
66 66
67 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); 67 priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
68 if (priv == NULL) { 68 if (priv == NULL) {
@@ -71,6 +71,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
71 } 71 }
72 72
73 mmc_data = &priv->mmc_data; 73 mmc_data = &priv->mmc_data;
74 p->pdata = mmc_data;
74 75
75 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); 76 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
76 priv->clk = clk_get(&pdev->dev, clk_name); 77 priv->clk = clk_get(&pdev->dev, clk_name);
@@ -116,11 +117,36 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
116 if (ret < 0) 117 if (ret < 0)
117 goto eprobe; 118 goto eprobe;
118 119
119 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 120 for (i = 0; i < 3; i++) {
120 (unsigned long)host->ctl, host->irq); 121 irq = platform_get_irq(pdev, i);
122 if (irq < 0) {
123 if (i) {
124 continue;
125 } else {
126 ret = irq;
127 goto eirq;
128 }
129 }
130 ret = request_irq(irq, tmio_mmc_irq, 0,
131 dev_name(&pdev->dev), host);
132 if (ret) {
133 while (i--) {
134 irq = platform_get_irq(pdev, i);
135 if (irq >= 0)
136 free_irq(irq, host);
137 }
138 goto eirq;
139 }
140 }
141 dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
142 mmc_hostname(host->mmc), (unsigned long)
143 (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start),
144 mmc_data->hclk / 1000000);
121 145
122 return ret; 146 return ret;
123 147
148eirq:
149 tmio_mmc_host_remove(host);
124eprobe: 150eprobe:
125 clk_disable(priv->clk); 151 clk_disable(priv->clk);
126 clk_put(priv->clk); 152 clk_put(priv->clk);
@@ -134,6 +160,16 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
134 struct mmc_host *mmc = platform_get_drvdata(pdev); 160 struct mmc_host *mmc = platform_get_drvdata(pdev);
135 struct tmio_mmc_host *host = mmc_priv(mmc); 161 struct tmio_mmc_host *host = mmc_priv(mmc);
136 struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); 162 struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
163 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
164 int i, irq;
165
166 p->pdata = NULL;
167
168 for (i = 0; i < 3; i++) {
169 irq = platform_get_irq(pdev, i);
170 if (irq >= 0)
171 free_irq(irq, host);
172 }
137 173
138 tmio_mmc_host_remove(host); 174 tmio_mmc_host_remove(host);
139 clk_disable(priv->clk); 175 clk_disable(priv->clk);
@@ -143,10 +179,18 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
143 return 0; 179 return 0;
144} 180}
145 181
182static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
183 .suspend = tmio_mmc_host_suspend,
184 .resume = tmio_mmc_host_resume,
185 .runtime_suspend = tmio_mmc_host_runtime_suspend,
186 .runtime_resume = tmio_mmc_host_runtime_resume,
187};
188
146static struct platform_driver sh_mobile_sdhi_driver = { 189static struct platform_driver sh_mobile_sdhi_driver = {
147 .driver = { 190 .driver = {
148 .name = "sh_mobile_sdhi", 191 .name = "sh_mobile_sdhi",
149 .owner = THIS_MODULE, 192 .owner = THIS_MODULE,
193 .pm = &tmio_mmc_dev_pm_ops,
150 }, 194 },
151 .probe = sh_mobile_sdhi_probe, 195 .probe = sh_mobile_sdhi_probe,
152 .remove = __devexit_p(sh_mobile_sdhi_remove), 196 .remove = __devexit_p(sh_mobile_sdhi_remove),
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 79c568461d59..8d185de90d20 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -30,7 +30,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
30 struct mmc_host *mmc = platform_get_drvdata(dev); 30 struct mmc_host *mmc = platform_get_drvdata(dev);
31 int ret; 31 int ret;
32 32
33 ret = mmc_suspend_host(mmc); 33 ret = tmio_mmc_host_suspend(&dev->dev);
34 34
35 /* Tell MFD core it can disable us now.*/ 35 /* Tell MFD core it can disable us now.*/
36 if (!ret && cell->disable) 36 if (!ret && cell->disable)
@@ -46,15 +46,12 @@ static int tmio_mmc_resume(struct platform_device *dev)
46 int ret = 0; 46 int ret = 0;
47 47
48 /* Tell the MFD core we are ready to be enabled */ 48 /* Tell the MFD core we are ready to be enabled */
49 if (cell->resume) { 49 if (cell->resume)
50 ret = cell->resume(dev); 50 ret = cell->resume(dev);
51 if (ret)
52 goto out;
53 }
54 51
55 mmc_resume_host(mmc); 52 if (!ret)
53 ret = tmio_mmc_host_resume(&dev->dev);
56 54
57out:
58 return ret; 55 return ret;
59} 56}
60#else 57#else
@@ -67,15 +64,21 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
67 const struct mfd_cell *cell = mfd_get_cell(pdev); 64 const struct mfd_cell *cell = mfd_get_cell(pdev);
68 struct tmio_mmc_data *pdata; 65 struct tmio_mmc_data *pdata;
69 struct tmio_mmc_host *host; 66 struct tmio_mmc_host *host;
70 int ret = -EINVAL; 67 int ret = -EINVAL, irq;
71 68
72 if (pdev->num_resources != 2) 69 if (pdev->num_resources != 2)
73 goto out; 70 goto out;
74 71
75 pdata = mfd_get_data(pdev); 72 pdata = pdev->dev.platform_data;
76 if (!pdata || !pdata->hclk) 73 if (!pdata || !pdata->hclk)
77 goto out; 74 goto out;
78 75
76 irq = platform_get_irq(pdev, 0);
77 if (irq < 0) {
78 ret = irq;
79 goto out;
80 }
81
79 /* Tell the MFD core we are ready to be enabled */ 82 /* Tell the MFD core we are ready to be enabled */
80 if (cell->enable) { 83 if (cell->enable) {
81 ret = cell->enable(pdev); 84 ret = cell->enable(pdev);
@@ -87,11 +90,18 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
87 if (ret) 90 if (ret)
88 goto cell_disable; 91 goto cell_disable;
89 92
93 ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED |
94 IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host);
95 if (ret)
96 goto host_remove;
97
90 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 98 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
91 (unsigned long)host->ctl, host->irq); 99 (unsigned long)host->ctl, irq);
92 100
93 return 0; 101 return 0;
94 102
103host_remove:
104 tmio_mmc_host_remove(host);
95cell_disable: 105cell_disable:
96 if (cell->disable) 106 if (cell->disable)
97 cell->disable(pdev); 107 cell->disable(pdev);
@@ -107,7 +117,9 @@ static int __devexit tmio_mmc_remove(struct platform_device *pdev)
107 platform_set_drvdata(pdev, NULL); 117 platform_set_drvdata(pdev, NULL);
108 118
109 if (mmc) { 119 if (mmc) {
110 tmio_mmc_host_remove(mmc_priv(mmc)); 120 struct tmio_mmc_host *host = mmc_priv(mmc);
121 free_irq(platform_get_irq(pdev, 0), host);
122 tmio_mmc_host_remove(host);
111 if (cell->disable) 123 if (cell->disable)
112 cell->disable(pdev); 124 cell->disable(pdev);
113 } 125 }
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 099ed49a259b..8260bc2c34e3 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -19,6 +19,7 @@
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/mmc/tmio.h> 20#include <linux/mmc/tmio.h>
21#include <linux/pagemap.h> 21#include <linux/pagemap.h>
22#include <linux/spinlock.h>
22 23
23/* Definitions for values the CTRL_SDIO_STATUS register can take. */ 24/* Definitions for values the CTRL_SDIO_STATUS register can take. */
24#define TMIO_SDIO_STAT_IOIRQ 0x0001 25#define TMIO_SDIO_STAT_IOIRQ 0x0001
@@ -44,13 +45,14 @@ struct tmio_mmc_host {
44 struct mmc_request *mrq; 45 struct mmc_request *mrq;
45 struct mmc_data *data; 46 struct mmc_data *data;
46 struct mmc_host *mmc; 47 struct mmc_host *mmc;
47 int irq;
48 unsigned int sdio_irq_enabled; 48 unsigned int sdio_irq_enabled;
49 49
50 /* Callbacks for clock / power control */ 50 /* Callbacks for clock / power control */
51 void (*set_pwr)(struct platform_device *host, int state); 51 void (*set_pwr)(struct platform_device *host, int state);
52 void (*set_clk_div)(struct platform_device *host, int state); 52 void (*set_clk_div)(struct platform_device *host, int state);
53 53
54 int pm_error;
55
54 /* pio related stuff */ 56 /* pio related stuff */
55 struct scatterlist *sg_ptr; 57 struct scatterlist *sg_ptr;
56 struct scatterlist *sg_orig; 58 struct scatterlist *sg_orig;
@@ -83,6 +85,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
83 85
84void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 86void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
85void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); 87void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
88irqreturn_t tmio_mmc_irq(int irq, void *devid);
86 89
87static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, 90static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
88 unsigned long *flags) 91 unsigned long *flags)
@@ -120,4 +123,15 @@ static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
120} 123}
121#endif 124#endif
122 125
126#ifdef CONFIG_PM
127int tmio_mmc_host_suspend(struct device *dev);
128int tmio_mmc_host_resume(struct device *dev);
129#else
130#define tmio_mmc_host_suspend NULL
131#define tmio_mmc_host_resume NULL
132#endif
133
134int tmio_mmc_host_runtime_suspend(struct device *dev);
135int tmio_mmc_host_runtime_resume(struct device *dev);
136
123#endif 137#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index d3de74ab633e..25f1ad6cbe09 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -256,7 +256,10 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
256void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) 256void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
257{ 257{
258 /* We can only either use DMA for both Tx and Rx or not use it at all */ 258 /* We can only either use DMA for both Tx and Rx or not use it at all */
259 if (pdata->dma) { 259 if (!pdata->dma)
260 return;
261
262 if (!host->chan_tx && !host->chan_rx) {
260 dma_cap_mask_t mask; 263 dma_cap_mask_t mask;
261 264
262 dma_cap_zero(mask); 265 dma_cap_zero(mask);
@@ -284,18 +287,18 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
284 287
285 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); 288 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
286 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); 289 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
290 }
287 291
288 tmio_mmc_enable_dma(host, true); 292 tmio_mmc_enable_dma(host, true);
293
294 return;
289 295
290 return;
291ebouncebuf: 296ebouncebuf:
292 dma_release_channel(host->chan_rx); 297 dma_release_channel(host->chan_rx);
293 host->chan_rx = NULL; 298 host->chan_rx = NULL;
294ereqrx: 299ereqrx:
295 dma_release_channel(host->chan_tx); 300 dma_release_channel(host->chan_tx);
296 host->chan_tx = NULL; 301 host->chan_tx = NULL;
297 return;
298 }
299} 302}
300 303
301void tmio_mmc_release_dma(struct tmio_mmc_host *host) 304void tmio_mmc_release_dma(struct tmio_mmc_host *host)
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 710339a85c84..ad6347bb02dd 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -39,6 +39,7 @@
39#include <linux/module.h> 39#include <linux/module.h>
40#include <linux/pagemap.h> 40#include <linux/pagemap.h>
41#include <linux/platform_device.h> 41#include <linux/platform_device.h>
42#include <linux/pm_runtime.h>
42#include <linux/scatterlist.h> 43#include <linux/scatterlist.h>
43#include <linux/workqueue.h> 44#include <linux/workqueue.h>
44#include <linux/spinlock.h> 45#include <linux/spinlock.h>
@@ -243,8 +244,12 @@ static void tmio_mmc_reset_work(struct work_struct *work)
243 spin_lock_irqsave(&host->lock, flags); 244 spin_lock_irqsave(&host->lock, flags);
244 mrq = host->mrq; 245 mrq = host->mrq;
245 246
246 /* request already finished */ 247 /*
247 if (!mrq 248 * is request already finished? Since we use a non-blocking
249 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
250 * us, so, have to check for IS_ERR(host->mrq)
251 */
252 if (IS_ERR_OR_NULL(mrq)
248 || time_is_after_jiffies(host->last_req_ts + 253 || time_is_after_jiffies(host->last_req_ts +
249 msecs_to_jiffies(2000))) { 254 msecs_to_jiffies(2000))) {
250 spin_unlock_irqrestore(&host->lock, flags); 255 spin_unlock_irqrestore(&host->lock, flags);
@@ -264,16 +269,19 @@ static void tmio_mmc_reset_work(struct work_struct *work)
264 269
265 host->cmd = NULL; 270 host->cmd = NULL;
266 host->data = NULL; 271 host->data = NULL;
267 host->mrq = NULL;
268 host->force_pio = false; 272 host->force_pio = false;
269 273
270 spin_unlock_irqrestore(&host->lock, flags); 274 spin_unlock_irqrestore(&host->lock, flags);
271 275
272 tmio_mmc_reset(host); 276 tmio_mmc_reset(host);
273 277
278 /* Ready for new calls */
279 host->mrq = NULL;
280
274 mmc_request_done(host->mmc, mrq); 281 mmc_request_done(host->mmc, mrq);
275} 282}
276 283
284/* called with host->lock held, interrupts disabled */
277static void tmio_mmc_finish_request(struct tmio_mmc_host *host) 285static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
278{ 286{
279 struct mmc_request *mrq = host->mrq; 287 struct mmc_request *mrq = host->mrq;
@@ -281,13 +289,15 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
281 if (!mrq) 289 if (!mrq)
282 return; 290 return;
283 291
284 host->mrq = NULL;
285 host->cmd = NULL; 292 host->cmd = NULL;
286 host->data = NULL; 293 host->data = NULL;
287 host->force_pio = false; 294 host->force_pio = false;
288 295
289 cancel_delayed_work(&host->delayed_reset_work); 296 cancel_delayed_work(&host->delayed_reset_work);
290 297
298 host->mrq = NULL;
299
300 /* FIXME: mmc_request_done() can schedule! */
291 mmc_request_done(host->mmc, mrq); 301 mmc_request_done(host->mmc, mrq);
292} 302}
293 303
@@ -554,7 +564,7 @@ out:
554 spin_unlock(&host->lock); 564 spin_unlock(&host->lock);
555} 565}
556 566
557static irqreturn_t tmio_mmc_irq(int irq, void *devid) 567irqreturn_t tmio_mmc_irq(int irq, void *devid)
558{ 568{
559 struct tmio_mmc_host *host = devid; 569 struct tmio_mmc_host *host = devid;
560 struct tmio_mmc_data *pdata = host->pdata; 570 struct tmio_mmc_data *pdata = host->pdata;
@@ -649,6 +659,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
649out: 659out:
650 return IRQ_HANDLED; 660 return IRQ_HANDLED;
651} 661}
662EXPORT_SYMBOL(tmio_mmc_irq);
652 663
653static int tmio_mmc_start_data(struct tmio_mmc_host *host, 664static int tmio_mmc_start_data(struct tmio_mmc_host *host,
654 struct mmc_data *data) 665 struct mmc_data *data)
@@ -685,15 +696,27 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
685static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 696static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
686{ 697{
687 struct tmio_mmc_host *host = mmc_priv(mmc); 698 struct tmio_mmc_host *host = mmc_priv(mmc);
699 unsigned long flags;
688 int ret; 700 int ret;
689 701
690 if (host->mrq) 702 spin_lock_irqsave(&host->lock, flags);
703
704 if (host->mrq) {
691 pr_debug("request not null\n"); 705 pr_debug("request not null\n");
706 if (IS_ERR(host->mrq)) {
707 spin_unlock_irqrestore(&host->lock, flags);
708 mrq->cmd->error = -EAGAIN;
709 mmc_request_done(mmc, mrq);
710 return;
711 }
712 }
692 713
693 host->last_req_ts = jiffies; 714 host->last_req_ts = jiffies;
694 wmb(); 715 wmb();
695 host->mrq = mrq; 716 host->mrq = mrq;
696 717
718 spin_unlock_irqrestore(&host->lock, flags);
719
697 if (mrq->data) { 720 if (mrq->data) {
698 ret = tmio_mmc_start_data(host, mrq->data); 721 ret = tmio_mmc_start_data(host, mrq->data);
699 if (ret) 722 if (ret)
@@ -708,8 +731,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
708 } 731 }
709 732
710fail: 733fail:
711 host->mrq = NULL;
712 host->force_pio = false; 734 host->force_pio = false;
735 host->mrq = NULL;
713 mrq->cmd->error = ret; 736 mrq->cmd->error = ret;
714 mmc_request_done(mmc, mrq); 737 mmc_request_done(mmc, mrq);
715} 738}
@@ -723,19 +746,54 @@ fail:
723static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 746static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
724{ 747{
725 struct tmio_mmc_host *host = mmc_priv(mmc); 748 struct tmio_mmc_host *host = mmc_priv(mmc);
749 struct tmio_mmc_data *pdata = host->pdata;
750 unsigned long flags;
751
752 spin_lock_irqsave(&host->lock, flags);
753 if (host->mrq) {
754 if (IS_ERR(host->mrq)) {
755 dev_dbg(&host->pdev->dev,
756 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
757 current->comm, task_pid_nr(current),
758 ios->clock, ios->power_mode);
759 host->mrq = ERR_PTR(-EINTR);
760 } else {
761 dev_dbg(&host->pdev->dev,
762 "%s.%d: CMD%u active since %lu, now %lu!\n",
763 current->comm, task_pid_nr(current),
764 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
765 }
766 spin_unlock_irqrestore(&host->lock, flags);
767 return;
768 }
769
770 host->mrq = ERR_PTR(-EBUSY);
771
772 spin_unlock_irqrestore(&host->lock, flags);
726 773
727 if (ios->clock) 774 if (ios->clock)
728 tmio_mmc_set_clock(host, ios->clock); 775 tmio_mmc_set_clock(host, ios->clock);
729 776
730 /* Power sequence - OFF -> UP -> ON */ 777 /* Power sequence - OFF -> UP -> ON */
731 if (ios->power_mode == MMC_POWER_UP) { 778 if (ios->power_mode == MMC_POWER_UP) {
779 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) {
780 pm_runtime_get_sync(&host->pdev->dev);
781 pdata->power = true;
782 }
732 /* power up SD bus */ 783 /* power up SD bus */
733 if (host->set_pwr) 784 if (host->set_pwr)
734 host->set_pwr(host->pdev, 1); 785 host->set_pwr(host->pdev, 1);
735 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { 786 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
736 /* power down SD bus */ 787 /* power down SD bus */
737 if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) 788 if (ios->power_mode == MMC_POWER_OFF) {
738 host->set_pwr(host->pdev, 0); 789 if (host->set_pwr)
790 host->set_pwr(host->pdev, 0);
791 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
792 pdata->power) {
793 pdata->power = false;
794 pm_runtime_put(&host->pdev->dev);
795 }
796 }
739 tmio_mmc_clk_stop(host); 797 tmio_mmc_clk_stop(host);
740 } else { 798 } else {
741 /* start bus clock */ 799 /* start bus clock */
@@ -753,6 +811,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
753 811
754 /* Let things settle. delay taken from winCE driver */ 812 /* Let things settle. delay taken from winCE driver */
755 udelay(140); 813 udelay(140);
814 if (PTR_ERR(host->mrq) == -EINTR)
815 dev_dbg(&host->pdev->dev,
816 "%s.%d: IOS interrupted: clk %u, mode %u",
817 current->comm, task_pid_nr(current),
818 ios->clock, ios->power_mode);
819 host->mrq = NULL;
756} 820}
757 821
758static int tmio_mmc_get_ro(struct mmc_host *mmc) 822static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -801,6 +865,7 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
801 if (!mmc) 865 if (!mmc)
802 return -ENOMEM; 866 return -ENOMEM;
803 867
868 pdata->dev = &pdev->dev;
804 _host = mmc_priv(mmc); 869 _host = mmc_priv(mmc);
805 _host->pdata = pdata; 870 _host->pdata = pdata;
806 _host->mmc = mmc; 871 _host->mmc = mmc;
@@ -834,24 +899,19 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
834 else 899 else
835 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 900 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
836 901
837 tmio_mmc_clk_stop(_host); 902 pdata->power = false;
838 tmio_mmc_reset(_host); 903 pm_runtime_enable(&pdev->dev);
839 904 ret = pm_runtime_resume(&pdev->dev);
840 ret = platform_get_irq(pdev, 0);
841 if (ret < 0) 905 if (ret < 0)
842 goto unmap_ctl; 906 goto pm_disable;
843 907
844 _host->irq = ret; 908 tmio_mmc_clk_stop(_host);
909 tmio_mmc_reset(_host);
845 910
846 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); 911 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
847 if (pdata->flags & TMIO_MMC_SDIO_IRQ) 912 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
848 tmio_mmc_enable_sdio_irq(mmc, 0); 913 tmio_mmc_enable_sdio_irq(mmc, 0);
849 914
850 ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED |
851 IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host);
852 if (ret)
853 goto unmap_ctl;
854
855 spin_lock_init(&_host->lock); 915 spin_lock_init(&_host->lock);
856 916
857 /* Init delayed work for request timeouts */ 917 /* Init delayed work for request timeouts */
@@ -860,6 +920,10 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
860 /* See if we also get DMA */ 920 /* See if we also get DMA */
861 tmio_mmc_request_dma(_host, pdata); 921 tmio_mmc_request_dma(_host, pdata);
862 922
923 /* We have to keep the device powered for its card detection to work */
924 if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD))
925 pm_runtime_get_noresume(&pdev->dev);
926
863 mmc_add_host(mmc); 927 mmc_add_host(mmc);
864 928
865 /* Unmask the IRQs we want to know about */ 929 /* Unmask the IRQs we want to know about */
@@ -874,7 +938,8 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
874 938
875 return 0; 939 return 0;
876 940
877unmap_ctl: 941pm_disable:
942 pm_runtime_disable(&pdev->dev);
878 iounmap(_host->ctl); 943 iounmap(_host->ctl);
879host_free: 944host_free:
880 mmc_free_host(mmc); 945 mmc_free_host(mmc);
@@ -885,13 +950,88 @@ EXPORT_SYMBOL(tmio_mmc_host_probe);
885 950
886void tmio_mmc_host_remove(struct tmio_mmc_host *host) 951void tmio_mmc_host_remove(struct tmio_mmc_host *host)
887{ 952{
953 struct platform_device *pdev = host->pdev;
954
955 /*
956 * We don't have to manipulate pdata->power here: if there is a card in
957 * the slot, the runtime PM is active and our .runtime_resume() will not
958 * be run. If there is no card in the slot and the platform can suspend
959 * the controller, the runtime PM is suspended and pdata->power == false,
960 * so, our .runtime_resume() will not try to detect a card in the slot.
961 */
962 if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
963 pm_runtime_get_sync(&pdev->dev);
964
888 mmc_remove_host(host->mmc); 965 mmc_remove_host(host->mmc);
889 cancel_delayed_work_sync(&host->delayed_reset_work); 966 cancel_delayed_work_sync(&host->delayed_reset_work);
890 tmio_mmc_release_dma(host); 967 tmio_mmc_release_dma(host);
891 free_irq(host->irq, host); 968
969 pm_runtime_put_sync(&pdev->dev);
970 pm_runtime_disable(&pdev->dev);
971
892 iounmap(host->ctl); 972 iounmap(host->ctl);
893 mmc_free_host(host->mmc); 973 mmc_free_host(host->mmc);
894} 974}
895EXPORT_SYMBOL(tmio_mmc_host_remove); 975EXPORT_SYMBOL(tmio_mmc_host_remove);
896 976
977#ifdef CONFIG_PM
978int tmio_mmc_host_suspend(struct device *dev)
979{
980 struct mmc_host *mmc = dev_get_drvdata(dev);
981 struct tmio_mmc_host *host = mmc_priv(mmc);
982 int ret = mmc_suspend_host(mmc);
983
984 if (!ret)
985 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
986
987 host->pm_error = pm_runtime_put_sync(dev);
988
989 return ret;
990}
991EXPORT_SYMBOL(tmio_mmc_host_suspend);
992
993int tmio_mmc_host_resume(struct device *dev)
994{
995 struct mmc_host *mmc = dev_get_drvdata(dev);
996 struct tmio_mmc_host *host = mmc_priv(mmc);
997
998 /* The MMC core will perform the complete set up */
999 host->pdata->power = false;
1000
1001 if (!host->pm_error)
1002 pm_runtime_get_sync(dev);
1003
1004 tmio_mmc_reset(mmc_priv(mmc));
1005 tmio_mmc_request_dma(host, host->pdata);
1006
1007 return mmc_resume_host(mmc);
1008}
1009EXPORT_SYMBOL(tmio_mmc_host_resume);
1010
1011#endif /* CONFIG_PM */
1012
1013int tmio_mmc_host_runtime_suspend(struct device *dev)
1014{
1015 return 0;
1016}
1017EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1018
1019int tmio_mmc_host_runtime_resume(struct device *dev)
1020{
1021 struct mmc_host *mmc = dev_get_drvdata(dev);
1022 struct tmio_mmc_host *host = mmc_priv(mmc);
1023 struct tmio_mmc_data *pdata = host->pdata;
1024
1025 tmio_mmc_reset(host);
1026
1027 if (pdata->power) {
1028 /* Only entered after a card-insert interrupt */
1029 tmio_mmc_set_ios(mmc, &mmc->ios);
1030 mmc_detect_change(mmc, msecs_to_jiffies(100));
1031 }
1032
1033 return 0;
1034}
1035EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1036
897MODULE_LICENSE("GPL v2"); 1037MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
new file mode 100644
index 000000000000..cbb03305b77b
--- /dev/null
+++ b/drivers/mmc/host/vub300.c
@@ -0,0 +1,2506 @@
1/*
2 * Remote VUB300 SDIO/SDmem Host Controller Driver
3 *
4 * Copyright (C) 2010 Elan Digital Systems Limited
5 *
6 * based on USB Skeleton driver - 2.2
7 *
8 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2
13 *
14 * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot
15 * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear,
16 * by virtue of this driver, to have been plugged into a local
17 * SDIO host controller, similar to, say, a PCI Ricoh controller
18 * This is because this kernel device driver is both a USB 2.0
19 * client device driver AND an MMC host controller driver. Thus
20 * if there is an existing driver for the inserted SDIO/SDmem/MMC
21 * device then that driver will be used by the kernel to manage
22 * the device in exactly the same fashion as if it had been
23 * directly plugged into, say, a local pci bus Ricoh controller
24 *
25 * RANT: this driver was written using a display 128x48 - converting it
26 * to a line width of 80 makes it very difficult to support. In
27 * particular functions have been broken down into sub functions
28 * and the original meaningful names have been shortened into
29 * cryptic ones.
30 * The problem is that executing a fragment of code subject to
31 * two conditions means an indentation of 24, thus leaving only
32 * 56 characters for a C statement. And that is quite ridiculous!
33 *
34 * Data types: data passed to/from the VUB300 is fixed to a number of
35 * bits and driver data fields reflect that limit by using
36 * u8, u16, u32
37 */
38#include <linux/kernel.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/kref.h>
44#include <linux/uaccess.h>
45#include <linux/usb.h>
46#include <linux/mutex.h>
47#include <linux/mmc/host.h>
48#include <linux/mmc/card.h>
49#include <linux/mmc/sdio_func.h>
50#include <linux/mmc/sdio_ids.h>
51#include <linux/workqueue.h>
52#include <linux/ctype.h>
53#include <linux/firmware.h>
54#include <linux/scatterlist.h>
55
56struct host_controller_info {
57 u8 info_size;
58 u16 firmware_version;
59 u8 number_of_ports;
60} __packed;
61
62#define FIRMWARE_BLOCK_BOUNDARY 1024
63struct sd_command_header {
64 u8 header_size;
65 u8 header_type;
66 u8 port_number;
67 u8 command_type; /* Bit7 - Rd/Wr */
68 u8 command_index;
69 u8 transfer_size[4]; /* ReadSize + ReadSize */
70 u8 response_type;
71 u8 arguments[4];
72 u8 block_count[2];
73 u8 block_size[2];
74 u8 block_boundary[2];
75 u8 reserved[44]; /* to pad out to 64 bytes */
76} __packed;
77
78struct sd_irqpoll_header {
79 u8 header_size;
80 u8 header_type;
81 u8 port_number;
82 u8 command_type; /* Bit7 - Rd/Wr */
83 u8 padding[16]; /* don't ask why !! */
84 u8 poll_timeout_msb;
85 u8 poll_timeout_lsb;
86 u8 reserved[42]; /* to pad out to 64 bytes */
87} __packed;
88
89struct sd_common_header {
90 u8 header_size;
91 u8 header_type;
92 u8 port_number;
93} __packed;
94
95struct sd_response_header {
96 u8 header_size;
97 u8 header_type;
98 u8 port_number;
99 u8 command_type;
100 u8 command_index;
101 u8 command_response[0];
102} __packed;
103
104struct sd_status_header {
105 u8 header_size;
106 u8 header_type;
107 u8 port_number;
108 u16 port_flags;
109 u32 sdio_clock;
110 u16 host_header_size;
111 u16 func_header_size;
112 u16 ctrl_header_size;
113} __packed;
114
115struct sd_error_header {
116 u8 header_size;
117 u8 header_type;
118 u8 port_number;
119 u8 error_code;
120} __packed;
121
122struct sd_interrupt_header {
123 u8 header_size;
124 u8 header_type;
125 u8 port_number;
126} __packed;
127
128struct offload_registers_access {
129 u8 command_byte[4];
130 u8 Respond_Byte[4];
131} __packed;
132
133#define INTERRUPT_REGISTER_ACCESSES 15
134struct sd_offloaded_interrupt {
135 u8 header_size;
136 u8 header_type;
137 u8 port_number;
138 struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES];
139} __packed;
140
141struct sd_register_header {
142 u8 header_size;
143 u8 header_type;
144 u8 port_number;
145 u8 command_type;
146 u8 command_index;
147 u8 command_response[6];
148} __packed;
149
150#define PIGGYBACK_REGISTER_ACCESSES 14
151struct sd_offloaded_piggyback {
152 struct sd_register_header sdio;
153 struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES];
154} __packed;
155
156union sd_response {
157 struct sd_common_header common;
158 struct sd_status_header status;
159 struct sd_error_header error;
160 struct sd_interrupt_header interrupt;
161 struct sd_response_header response;
162 struct sd_offloaded_interrupt irq;
163 struct sd_offloaded_piggyback pig;
164} __packed;
165
166union sd_command {
167 struct sd_command_header head;
168 struct sd_irqpoll_header poll;
169} __packed;
170
171enum SD_RESPONSE_TYPE {
172 SDRT_UNSPECIFIED = 0,
173 SDRT_NONE,
174 SDRT_1,
175 SDRT_1B,
176 SDRT_2,
177 SDRT_3,
178 SDRT_4,
179 SDRT_5,
180 SDRT_5B,
181 SDRT_6,
182 SDRT_7,
183};
184
185#define RESPONSE_INTERRUPT 0x01
186#define RESPONSE_ERROR 0x02
187#define RESPONSE_STATUS 0x03
188#define RESPONSE_IRQ_DISABLED 0x05
189#define RESPONSE_IRQ_ENABLED 0x06
190#define RESPONSE_PIGGYBACKED 0x07
191#define RESPONSE_NO_INTERRUPT 0x08
192#define RESPONSE_PIG_DISABLED 0x09
193#define RESPONSE_PIG_ENABLED 0x0A
194#define SD_ERROR_1BIT_TIMEOUT 0x01
195#define SD_ERROR_4BIT_TIMEOUT 0x02
196#define SD_ERROR_1BIT_CRC_WRONG 0x03
197#define SD_ERROR_4BIT_CRC_WRONG 0x04
198#define SD_ERROR_1BIT_CRC_ERROR 0x05
199#define SD_ERROR_4BIT_CRC_ERROR 0x06
200#define SD_ERROR_NO_CMD_ENDBIT 0x07
201#define SD_ERROR_NO_1BIT_DATEND 0x08
202#define SD_ERROR_NO_4BIT_DATEND 0x09
203#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A
204#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B
205#define SD_ERROR_ILLEGAL_COMMAND 0x0C
206#define SD_ERROR_NO_DEVICE 0x0D
207#define SD_ERROR_TRANSFER_LENGTH 0x0E
208#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F
209#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10
210#define SD_ERROR_ILLEGAL_STATE 0x11
211#define SD_ERROR_UNKNOWN_ERROR 0x12
212#define SD_ERROR_RESERVED_ERROR 0x13
213#define SD_ERROR_INVALID_FUNCTION 0x14
214#define SD_ERROR_OUT_OF_RANGE 0x15
215#define SD_ERROR_STAT_CMD 0x16
216#define SD_ERROR_STAT_DATA 0x17
217#define SD_ERROR_STAT_CMD_TIMEOUT 0x18
218#define SD_ERROR_SDCRDY_STUCK 0x19
219#define SD_ERROR_UNHANDLED 0x1A
220#define SD_ERROR_OVERRUN 0x1B
221#define SD_ERROR_PIO_TIMEOUT 0x1C
222
223#define FUN(c) (0x000007 & (c->arg>>28))
224#define REG(c) (0x01FFFF & (c->arg>>9))
225
226static int limit_speed_to_24_MHz;
227module_param(limit_speed_to_24_MHz, bool, 0644);
228MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz");
229
230static int pad_input_to_usb_pkt;
231module_param(pad_input_to_usb_pkt, bool, 0644);
232MODULE_PARM_DESC(pad_input_to_usb_pkt,
233 "Pad USB data input transfers to whole USB Packet");
234
235static int disable_offload_processing;
236module_param(disable_offload_processing, bool, 0644);
237MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing");
238
239static int force_1_bit_data_xfers;
240module_param(force_1_bit_data_xfers, bool, 0644);
241MODULE_PARM_DESC(force_1_bit_data_xfers,
242 "Force SDIO Data Transfers to 1-bit Mode");
243
244static int force_polling_for_irqs;
245module_param(force_polling_for_irqs, bool, 0644);
246MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts");
247
248static int firmware_irqpoll_timeout = 1024;
249module_param(firmware_irqpoll_timeout, int, 0644);
250MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout");
251
252static int force_max_req_size = 128;
253module_param(force_max_req_size, int, 0644);
254MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes");
255
256#ifdef SMSC_DEVELOPMENT_BOARD
257static int firmware_rom_wait_states = 0x04;
258#else
259static int firmware_rom_wait_states = 0x1C;
260#endif
261
262module_param(firmware_rom_wait_states, bool, 0644);
263MODULE_PARM_DESC(firmware_rom_wait_states,
264 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
265
266#define ELAN_VENDOR_ID 0x2201
267#define VUB300_VENDOR_ID 0x0424
268#define VUB300_PRODUCT_ID 0x012C
269static struct usb_device_id vub300_table[] = {
270 {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)},
271 {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)},
272 {} /* Terminating entry */
273};
274MODULE_DEVICE_TABLE(usb, vub300_table);
275
276static struct workqueue_struct *cmndworkqueue;
277static struct workqueue_struct *pollworkqueue;
278static struct workqueue_struct *deadworkqueue;
279
280static inline int interface_to_InterfaceNumber(struct usb_interface *interface)
281{
282 if (!interface)
283 return -1;
284 if (!interface->cur_altsetting)
285 return -1;
286 return interface->cur_altsetting->desc.bInterfaceNumber;
287}
288
289struct sdio_register {
290 unsigned func_num:3;
291 unsigned sdio_reg:17;
292 unsigned activate:1;
293 unsigned prepared:1;
294 unsigned regvalue:8;
295 unsigned response:8;
296 unsigned sparebit:26;
297};
298
299struct vub300_mmc_host {
300 struct usb_device *udev;
301 struct usb_interface *interface;
302 struct kref kref;
303 struct mutex cmd_mutex;
304 struct mutex irq_mutex;
305 char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */
306 u8 cmnd_out_ep; /* EndPoint for commands */
307 u8 cmnd_res_ep; /* EndPoint for responses */
308 u8 data_out_ep; /* EndPoint for out data */
309 u8 data_inp_ep; /* EndPoint for inp data */
310 bool card_powered;
311 bool card_present;
312 bool read_only;
313 bool large_usb_packets;
314 bool app_spec; /* ApplicationSpecific */
315 bool irq_enabled; /* by the MMC CORE */
316 bool irq_disabled; /* in the firmware */
317 unsigned bus_width:4;
318 u8 total_offload_count;
319 u8 dynamic_register_count;
320 u8 resp_len;
321 u32 datasize;
322 int errors;
323 int usb_transport_fail;
324 int usb_timed_out;
325 int irqs_queued;
326 struct sdio_register sdio_register[16];
327 struct offload_interrupt_function_register {
328#define MAXREGBITS 4
329#define MAXREGS (1<<MAXREGBITS)
330#define MAXREGMASK (MAXREGS-1)
331 u8 offload_count;
332 u32 offload_point;
333 struct offload_registers_access reg[MAXREGS];
334 } fn[8];
335 u16 fbs[8]; /* Function Block Size */
336 struct mmc_command *cmd;
337 struct mmc_request *req;
338 struct mmc_data *data;
339 struct mmc_host *mmc;
340 struct urb *urb;
341 struct urb *command_out_urb;
342 struct urb *command_res_urb;
343 struct completion command_complete;
344 struct completion irqpoll_complete;
345 union sd_command cmnd;
346 union sd_response resp;
347 struct timer_list sg_transfer_timer;
348 struct usb_sg_request sg_request;
349 struct timer_list inactivity_timer;
350 struct work_struct deadwork;
351 struct work_struct cmndwork;
352 struct delayed_work pollwork;
353 struct host_controller_info hc_info;
354 struct sd_status_header system_port_status;
355 u8 padded_buffer[64];
356};
357
358#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref)
359#define SET_TRANSFER_PSEUDOCODE 21
360#define SET_INTERRUPT_PSEUDOCODE 20
361#define SET_FAILURE_MODE 18
362#define SET_ROM_WAIT_STATES 16
363#define SET_IRQ_ENABLE 13
364#define SET_CLOCK_SPEED 11
365#define SET_FUNCTION_BLOCK_SIZE 9
366#define SET_SD_DATA_MODE 6
367#define SET_SD_POWER 4
368#define ENTER_DFU_MODE 3
369#define GET_HC_INF0 1
370#define GET_SYSTEM_PORT_STATUS 0
371
372static void vub300_delete(struct kref *kref)
373{ /* kref callback - softirq */
374 struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref);
375 struct mmc_host *mmc = vub300->mmc;
376 usb_free_urb(vub300->command_out_urb);
377 vub300->command_out_urb = NULL;
378 usb_free_urb(vub300->command_res_urb);
379 vub300->command_res_urb = NULL;
380 usb_put_dev(vub300->udev);
381 mmc_free_host(mmc);
382 /*
383 * and hence also frees vub300
384 * which is contained at the end of struct mmc
385 */
386}
387
388static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300)
389{
390 kref_get(&vub300->kref);
391 if (queue_work(cmndworkqueue, &vub300->cmndwork)) {
392 /*
393 * then the cmndworkqueue was not previously
394 * running and the above get ref is obvious
395 * required and will be put when the thread
396 * terminates by a specific call
397 */
398 } else {
399 /*
400 * the cmndworkqueue was already running from
401 * a previous invocation and thus to keep the
402 * kref counts correct we must undo the get
403 */
404 kref_put(&vub300->kref, vub300_delete);
405 }
406}
407
408static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay)
409{
410 kref_get(&vub300->kref);
411 if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) {
412 /*
413 * then the pollworkqueue was not previously
414 * running and the above get ref is obvious
415 * required and will be put when the thread
416 * terminates by a specific call
417 */
418 } else {
419 /*
420 * the pollworkqueue was already running from
421 * a previous invocation and thus to keep the
422 * kref counts correct we must undo the get
423 */
424 kref_put(&vub300->kref, vub300_delete);
425 }
426}
427
428static void vub300_queue_dead_work(struct vub300_mmc_host *vub300)
429{
430 kref_get(&vub300->kref);
431 if (queue_work(deadworkqueue, &vub300->deadwork)) {
432 /*
433 * then the deadworkqueue was not previously
434 * running and the above get ref is obvious
435 * required and will be put when the thread
436 * terminates by a specific call
437 */
438 } else {
439 /*
440 * the deadworkqueue was already running from
441 * a previous invocation and thus to keep the
442 * kref counts correct we must undo the get
443 */
444 kref_put(&vub300->kref, vub300_delete);
445 }
446}
447
448static void irqpoll_res_completed(struct urb *urb)
449{ /* urb completion handler - hardirq */
450 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
451 if (urb->status)
452 vub300->usb_transport_fail = urb->status;
453 complete(&vub300->irqpoll_complete);
454}
455
456static void irqpoll_out_completed(struct urb *urb)
457{ /* urb completion handler - hardirq */
458 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
459 if (urb->status) {
460 vub300->usb_transport_fail = urb->status;
461 complete(&vub300->irqpoll_complete);
462 return;
463 } else {
464 int ret;
465 unsigned int pipe =
466 usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
467 usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
468 &vub300->resp, sizeof(vub300->resp),
469 irqpoll_res_completed, vub300);
470 vub300->command_res_urb->actual_length = 0;
471 ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
472 if (ret) {
473 vub300->usb_transport_fail = ret;
474 complete(&vub300->irqpoll_complete);
475 }
476 return;
477 }
478}
479
480static void send_irqpoll(struct vub300_mmc_host *vub300)
481{
482 /* cmd_mutex is held by vub300_pollwork_thread */
483 int retval;
484 int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout);
485 vub300->cmnd.poll.header_size = 22;
486 vub300->cmnd.poll.header_type = 1;
487 vub300->cmnd.poll.port_number = 0;
488 vub300->cmnd.poll.command_type = 2;
489 vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout;
490 vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8);
491 usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
492 usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep)
493 , &vub300->cmnd, sizeof(vub300->cmnd)
494 , irqpoll_out_completed, vub300);
495 retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
496 if (0 > retval) {
497 vub300->usb_transport_fail = retval;
498 vub300_queue_poll_work(vub300, 1);
499 complete(&vub300->irqpoll_complete);
500 return;
501 } else {
502 return;
503 }
504}
505
506static void new_system_port_status(struct vub300_mmc_host *vub300)
507{
508 int old_card_present = vub300->card_present;
509 int new_card_present =
510 (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
511 vub300->read_only =
512 (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
513 if (new_card_present && !old_card_present) {
514 dev_info(&vub300->udev->dev, "card just inserted\n");
515 vub300->card_present = 1;
516 vub300->bus_width = 0;
517 if (disable_offload_processing)
518 strncpy(vub300->vub_name, "EMPTY Processing Disabled",
519 sizeof(vub300->vub_name));
520 else
521 vub300->vub_name[0] = 0;
522 mmc_detect_change(vub300->mmc, 1);
523 } else if (!new_card_present && old_card_present) {
524 dev_info(&vub300->udev->dev, "card just ejected\n");
525 vub300->card_present = 0;
526 mmc_detect_change(vub300->mmc, 0);
527 } else {
528 /* no change */
529 }
530}
531
532static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300,
533 struct offload_registers_access
534 *register_access, u8 func)
535{
536 u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count;
537 memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access,
538 sizeof(struct offload_registers_access));
539 vub300->fn[func].offload_count += 1;
540 vub300->total_offload_count += 1;
541}
542
543static void add_offloaded_reg(struct vub300_mmc_host *vub300,
544 struct offload_registers_access *register_access)
545{
546 u32 Register = ((0x03 & register_access->command_byte[0]) << 15)
547 | ((0xFF & register_access->command_byte[1]) << 7)
548 | ((0xFE & register_access->command_byte[2]) >> 1);
549 u8 func = ((0x70 & register_access->command_byte[0]) >> 4);
550 u8 regs = vub300->dynamic_register_count;
551 u8 i = 0;
552 while (0 < regs-- && 1 == vub300->sdio_register[i].activate) {
553 if (vub300->sdio_register[i].func_num == func &&
554 vub300->sdio_register[i].sdio_reg == Register) {
555 if (vub300->sdio_register[i].prepared == 0)
556 vub300->sdio_register[i].prepared = 1;
557 vub300->sdio_register[i].response =
558 register_access->Respond_Byte[2];
559 vub300->sdio_register[i].regvalue =
560 register_access->Respond_Byte[3];
561 return;
562 } else {
563 i += 1;
564 continue;
565 }
566 };
567 __add_offloaded_reg_to_fifo(vub300, register_access, func);
568}
569
570static void check_vub300_port_status(struct vub300_mmc_host *vub300)
571{
572 /*
573 * cmd_mutex is held by vub300_pollwork_thread,
574 * vub300_deadwork_thread or vub300_cmndwork_thread
575 */
576 int retval;
577 retval =
578 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
579 GET_SYSTEM_PORT_STATUS,
580 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
581 0x0000, 0x0000, &vub300->system_port_status,
582 sizeof(vub300->system_port_status), HZ);
583 if (sizeof(vub300->system_port_status) == retval)
584 new_system_port_status(vub300);
585}
586
587static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300)
588{
589 /* cmd_mutex is held by vub300_pollwork_thread */
590 if (vub300->command_res_urb->actual_length == 0)
591 return;
592
593 switch (vub300->resp.common.header_type) {
594 case RESPONSE_INTERRUPT:
595 mutex_lock(&vub300->irq_mutex);
596 if (vub300->irq_enabled)
597 mmc_signal_sdio_irq(vub300->mmc);
598 else
599 vub300->irqs_queued += 1;
600 vub300->irq_disabled = 1;
601 mutex_unlock(&vub300->irq_mutex);
602 break;
603 case RESPONSE_ERROR:
604 if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE)
605 check_vub300_port_status(vub300);
606 break;
607 case RESPONSE_STATUS:
608 vub300->system_port_status = vub300->resp.status;
609 new_system_port_status(vub300);
610 if (!vub300->card_present)
611 vub300_queue_poll_work(vub300, HZ / 5);
612 break;
613 case RESPONSE_IRQ_DISABLED:
614 {
615 int offloaded_data_length = vub300->resp.common.header_size - 3;
616 int register_count = offloaded_data_length >> 3;
617 int ri = 0;
618 while (register_count--) {
619 add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
620 ri += 1;
621 }
622 mutex_lock(&vub300->irq_mutex);
623 if (vub300->irq_enabled)
624 mmc_signal_sdio_irq(vub300->mmc);
625 else
626 vub300->irqs_queued += 1;
627 vub300->irq_disabled = 1;
628 mutex_unlock(&vub300->irq_mutex);
629 break;
630 }
631 case RESPONSE_IRQ_ENABLED:
632 {
633 int offloaded_data_length = vub300->resp.common.header_size - 3;
634 int register_count = offloaded_data_length >> 3;
635 int ri = 0;
636 while (register_count--) {
637 add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]);
638 ri += 1;
639 }
640 mutex_lock(&vub300->irq_mutex);
641 if (vub300->irq_enabled)
642 mmc_signal_sdio_irq(vub300->mmc);
643 else if (vub300->irqs_queued)
644 vub300->irqs_queued += 1;
645 else
646 vub300->irqs_queued += 1;
647 vub300->irq_disabled = 0;
648 mutex_unlock(&vub300->irq_mutex);
649 break;
650 }
651 case RESPONSE_NO_INTERRUPT:
652 vub300_queue_poll_work(vub300, 1);
653 break;
654 default:
655 break;
656 }
657}
658
659static void __do_poll(struct vub300_mmc_host *vub300)
660{
661 /* cmd_mutex is held by vub300_pollwork_thread */
662 long commretval;
663 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
664 init_completion(&vub300->irqpoll_complete);
665 send_irqpoll(vub300);
666 commretval = wait_for_completion_timeout(&vub300->irqpoll_complete,
667 msecs_to_jiffies(500));
668 if (vub300->usb_transport_fail) {
669 /* no need to do anything */
670 } else if (commretval == 0) {
671 vub300->usb_timed_out = 1;
672 usb_kill_urb(vub300->command_out_urb);
673 usb_kill_urb(vub300->command_res_urb);
674 } else if (commretval < 0) {
675 vub300_queue_poll_work(vub300, 1);
676 } else { /* commretval > 0 */
677 __vub300_irqpoll_response(vub300);
678 }
679}
680
681/* this thread runs only when the driver
682 * is trying to poll the device for an IRQ
683 */
684static void vub300_pollwork_thread(struct work_struct *work)
685{ /* NOT irq */
686 struct vub300_mmc_host *vub300 = container_of(work,
687 struct vub300_mmc_host, pollwork.work);
688 if (!vub300->interface) {
689 kref_put(&vub300->kref, vub300_delete);
690 return;
691 }
692 mutex_lock(&vub300->cmd_mutex);
693 if (vub300->cmd) {
694 vub300_queue_poll_work(vub300, 1);
695 } else if (!vub300->card_present) {
696 /* no need to do anything */
697 } else { /* vub300->card_present */
698 mutex_lock(&vub300->irq_mutex);
699 if (!vub300->irq_enabled) {
700 mutex_unlock(&vub300->irq_mutex);
701 } else if (vub300->irqs_queued) {
702 vub300->irqs_queued -= 1;
703 mmc_signal_sdio_irq(vub300->mmc);
704 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
705 mutex_unlock(&vub300->irq_mutex);
706 } else { /* NOT vub300->irqs_queued */
707 mutex_unlock(&vub300->irq_mutex);
708 __do_poll(vub300);
709 }
710 }
711 mutex_unlock(&vub300->cmd_mutex);
712 kref_put(&vub300->kref, vub300_delete);
713}
714
715static void vub300_deadwork_thread(struct work_struct *work)
716{ /* NOT irq */
717 struct vub300_mmc_host *vub300 =
718 container_of(work, struct vub300_mmc_host, deadwork);
719 if (!vub300->interface) {
720 kref_put(&vub300->kref, vub300_delete);
721 return;
722 }
723 mutex_lock(&vub300->cmd_mutex);
724 if (vub300->cmd) {
725 /*
726 * a command got in as the inactivity
727 * timer expired - so we just let the
728 * processing of the command show if
729 * the device is dead
730 */
731 } else if (vub300->card_present) {
732 check_vub300_port_status(vub300);
733 } else if (vub300->mmc && vub300->mmc->card &&
734 mmc_card_present(vub300->mmc->card)) {
735 /*
736 * the MMC core must not have responded
737 * to the previous indication - lets
738 * hope that it eventually does so we
739 * will just ignore this for now
740 */
741 } else {
742 check_vub300_port_status(vub300);
743 }
744 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
745 mutex_unlock(&vub300->cmd_mutex);
746 kref_put(&vub300->kref, vub300_delete);
747}
748
749static void vub300_inactivity_timer_expired(unsigned long data)
750{ /* softirq */
751 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
752 if (!vub300->interface) {
753 kref_put(&vub300->kref, vub300_delete);
754 } else if (vub300->cmd) {
755 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
756 } else {
757 vub300_queue_dead_work(vub300);
758 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
759 }
760}
761
762static int vub300_response_error(u8 error_code)
763{
764 switch (error_code) {
765 case SD_ERROR_PIO_TIMEOUT:
766 case SD_ERROR_1BIT_TIMEOUT:
767 case SD_ERROR_4BIT_TIMEOUT:
768 return -ETIMEDOUT;
769 case SD_ERROR_STAT_DATA:
770 case SD_ERROR_OVERRUN:
771 case SD_ERROR_STAT_CMD:
772 case SD_ERROR_STAT_CMD_TIMEOUT:
773 case SD_ERROR_SDCRDY_STUCK:
774 case SD_ERROR_UNHANDLED:
775 case SD_ERROR_1BIT_CRC_WRONG:
776 case SD_ERROR_4BIT_CRC_WRONG:
777 case SD_ERROR_1BIT_CRC_ERROR:
778 case SD_ERROR_4BIT_CRC_ERROR:
779 case SD_ERROR_NO_CMD_ENDBIT:
780 case SD_ERROR_NO_1BIT_DATEND:
781 case SD_ERROR_NO_4BIT_DATEND:
782 case SD_ERROR_1BIT_DATA_TIMEOUT:
783 case SD_ERROR_4BIT_DATA_TIMEOUT:
784 case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT:
785 case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT:
786 return -EILSEQ;
787 case 33:
788 return -EILSEQ;
789 case SD_ERROR_ILLEGAL_COMMAND:
790 return -EINVAL;
791 case SD_ERROR_NO_DEVICE:
792 return -ENOMEDIUM;
793 default:
794 return -ENODEV;
795 }
796}
797
798static void command_res_completed(struct urb *urb)
799{ /* urb completion handler - hardirq */
800 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
801 if (urb->status) {
802 /* we have to let the initiator handle the error */
803 } else if (vub300->command_res_urb->actual_length == 0) {
804 /*
805 * we have seen this happen once or twice and
806 * we suspect a buggy USB host controller
807 */
808 } else if (!vub300->data) {
809 /* this means that the command (typically CMD52) suceeded */
810 } else if (vub300->resp.common.header_type != 0x02) {
811 /*
812 * this is an error response from the VUB300 chip
813 * and we let the initiator handle it
814 */
815 } else if (vub300->urb) {
816 vub300->cmd->error =
817 vub300_response_error(vub300->resp.error.error_code);
818 usb_unlink_urb(vub300->urb);
819 } else {
820 vub300->cmd->error =
821 vub300_response_error(vub300->resp.error.error_code);
822 usb_sg_cancel(&vub300->sg_request);
823 }
824 complete(&vub300->command_complete); /* got_response_in */
825}
826
827static void command_out_completed(struct urb *urb)
828{ /* urb completion handler - hardirq */
829 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context;
830 if (urb->status) {
831 complete(&vub300->command_complete);
832 } else {
833 int ret;
834 unsigned int pipe =
835 usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep);
836 usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe,
837 &vub300->resp, sizeof(vub300->resp),
838 command_res_completed, vub300);
839 vub300->command_res_urb->actual_length = 0;
840 ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC);
841 if (ret == 0) {
842 /*
843 * the urb completion handler will call
844 * our completion handler
845 */
846 } else {
847 /*
848 * and thus we only call it directly
849 * when it will not be called
850 */
851 complete(&vub300->command_complete);
852 }
853 }
854}
855
856/*
857 * the STUFF bits are masked out for the comparisons
858 */
859static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300,
860 u32 cmd_arg)
861{
862 if ((0xFBFFFE00 & cmd_arg) == 0x80022200)
863 vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]);
864 else if ((0xFBFFFE00 & cmd_arg) == 0x80022000)
865 vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]);
866 else if ((0xFBFFFE00 & cmd_arg) == 0x80042200)
867 vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]);
868 else if ((0xFBFFFE00 & cmd_arg) == 0x80042000)
869 vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]);
870 else if ((0xFBFFFE00 & cmd_arg) == 0x80062200)
871 vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]);
872 else if ((0xFBFFFE00 & cmd_arg) == 0x80062000)
873 vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]);
874 else if ((0xFBFFFE00 & cmd_arg) == 0x80082200)
875 vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]);
876 else if ((0xFBFFFE00 & cmd_arg) == 0x80082000)
877 vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]);
878 else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200)
879 vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]);
880 else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000)
881 vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]);
882 else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200)
883 vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]);
884 else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000)
885 vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]);
886 else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200)
887 vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]);
888 else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000)
889 vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]);
890 else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00)
891 vub300->bus_width = 1;
892 else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02)
893 vub300->bus_width = 4;
894}
895
896static void send_command(struct vub300_mmc_host *vub300)
897{
898 /* cmd_mutex is held by vub300_cmndwork_thread */
899 struct mmc_command *cmd = vub300->cmd;
900 struct mmc_data *data = vub300->data;
901 int retval;
902 int i;
903 u8 response_type;
904 if (vub300->app_spec) {
905 switch (cmd->opcode) {
906 case 6:
907 response_type = SDRT_1;
908 vub300->resp_len = 6;
909 if (0x00000000 == (0x00000003 & cmd->arg))
910 vub300->bus_width = 1;
911 else if (0x00000002 == (0x00000003 & cmd->arg))
912 vub300->bus_width = 4;
913 else
914 dev_err(&vub300->udev->dev,
915 "unexpected ACMD6 bus_width=%d\n",
916 0x00000003 & cmd->arg);
917 break;
918 case 13:
919 response_type = SDRT_1;
920 vub300->resp_len = 6;
921 break;
922 case 22:
923 response_type = SDRT_1;
924 vub300->resp_len = 6;
925 break;
926 case 23:
927 response_type = SDRT_1;
928 vub300->resp_len = 6;
929 break;
930 case 41:
931 response_type = SDRT_3;
932 vub300->resp_len = 6;
933 break;
934 case 42:
935 response_type = SDRT_1;
936 vub300->resp_len = 6;
937 break;
938 case 51:
939 response_type = SDRT_1;
940 vub300->resp_len = 6;
941 break;
942 case 55:
943 response_type = SDRT_1;
944 vub300->resp_len = 6;
945 break;
946 default:
947 vub300->resp_len = 0;
948 cmd->error = -EINVAL;
949 complete(&vub300->command_complete);
950 return;
951 }
952 vub300->app_spec = 0;
953 } else {
954 switch (cmd->opcode) {
955 case 0:
956 response_type = SDRT_NONE;
957 vub300->resp_len = 0;
958 break;
959 case 1:
960 response_type = SDRT_3;
961 vub300->resp_len = 6;
962 break;
963 case 2:
964 response_type = SDRT_2;
965 vub300->resp_len = 17;
966 break;
967 case 3:
968 response_type = SDRT_6;
969 vub300->resp_len = 6;
970 break;
971 case 4:
972 response_type = SDRT_NONE;
973 vub300->resp_len = 0;
974 break;
975 case 5:
976 response_type = SDRT_4;
977 vub300->resp_len = 6;
978 break;
979 case 6:
980 response_type = SDRT_1;
981 vub300->resp_len = 6;
982 break;
983 case 7:
984 response_type = SDRT_1B;
985 vub300->resp_len = 6;
986 break;
987 case 8:
988 response_type = SDRT_7;
989 vub300->resp_len = 6;
990 break;
991 case 9:
992 response_type = SDRT_2;
993 vub300->resp_len = 17;
994 break;
995 case 10:
996 response_type = SDRT_2;
997 vub300->resp_len = 17;
998 break;
999 case 12:
1000 response_type = SDRT_1B;
1001 vub300->resp_len = 6;
1002 break;
1003 case 13:
1004 response_type = SDRT_1;
1005 vub300->resp_len = 6;
1006 break;
1007 case 15:
1008 response_type = SDRT_NONE;
1009 vub300->resp_len = 0;
1010 break;
1011 case 16:
1012 for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
1013 vub300->fbs[i] = 0xFFFF & cmd->arg;
1014 response_type = SDRT_1;
1015 vub300->resp_len = 6;
1016 break;
1017 case 17:
1018 case 18:
1019 case 24:
1020 case 25:
1021 case 27:
1022 response_type = SDRT_1;
1023 vub300->resp_len = 6;
1024 break;
1025 case 28:
1026 case 29:
1027 response_type = SDRT_1B;
1028 vub300->resp_len = 6;
1029 break;
1030 case 30:
1031 case 32:
1032 case 33:
1033 response_type = SDRT_1;
1034 vub300->resp_len = 6;
1035 break;
1036 case 38:
1037 response_type = SDRT_1B;
1038 vub300->resp_len = 6;
1039 break;
1040 case 42:
1041 response_type = SDRT_1;
1042 vub300->resp_len = 6;
1043 break;
1044 case 52:
1045 response_type = SDRT_5;
1046 vub300->resp_len = 6;
1047 snoop_block_size_and_bus_width(vub300, cmd->arg);
1048 break;
1049 case 53:
1050 response_type = SDRT_5;
1051 vub300->resp_len = 6;
1052 break;
1053 case 55:
1054 response_type = SDRT_1;
1055 vub300->resp_len = 6;
1056 vub300->app_spec = 1;
1057 break;
1058 case 56:
1059 response_type = SDRT_1;
1060 vub300->resp_len = 6;
1061 break;
1062 default:
1063 vub300->resp_len = 0;
1064 cmd->error = -EINVAL;
1065 complete(&vub300->command_complete);
1066 return;
1067 }
1068 }
1069 /*
1070 * it is a shame that we can not use "sizeof(struct sd_command_header)"
1071 * this is because the packet _must_ be padded to 64 bytes
1072 */
1073 vub300->cmnd.head.header_size = 20;
1074 vub300->cmnd.head.header_type = 0x00;
1075 vub300->cmnd.head.port_number = 0; /* "0" means port 1 */
1076 vub300->cmnd.head.command_type = 0x00; /* standard read command */
1077 vub300->cmnd.head.response_type = response_type;
1078 vub300->cmnd.head.command_index = cmd->opcode;
1079 vub300->cmnd.head.arguments[0] = cmd->arg >> 24;
1080 vub300->cmnd.head.arguments[1] = cmd->arg >> 16;
1081 vub300->cmnd.head.arguments[2] = cmd->arg >> 8;
1082 vub300->cmnd.head.arguments[3] = cmd->arg >> 0;
1083 if (cmd->opcode == 52) {
1084 int fn = 0x7 & (cmd->arg >> 28);
1085 vub300->cmnd.head.block_count[0] = 0;
1086 vub300->cmnd.head.block_count[1] = 0;
1087 vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF;
1088 vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF;
1089 vub300->cmnd.head.command_type = 0x00;
1090 vub300->cmnd.head.transfer_size[0] = 0;
1091 vub300->cmnd.head.transfer_size[1] = 0;
1092 vub300->cmnd.head.transfer_size[2] = 0;
1093 vub300->cmnd.head.transfer_size[3] = 0;
1094 } else if (!data) {
1095 vub300->cmnd.head.block_count[0] = 0;
1096 vub300->cmnd.head.block_count[1] = 0;
1097 vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF;
1098 vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF;
1099 vub300->cmnd.head.command_type = 0x00;
1100 vub300->cmnd.head.transfer_size[0] = 0;
1101 vub300->cmnd.head.transfer_size[1] = 0;
1102 vub300->cmnd.head.transfer_size[2] = 0;
1103 vub300->cmnd.head.transfer_size[3] = 0;
1104 } else if (cmd->opcode == 53) {
1105 int fn = 0x7 & (cmd->arg >> 28);
1106 if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */
1107 vub300->cmnd.head.block_count[0] =
1108 (data->blocks >> 8) & 0xFF;
1109 vub300->cmnd.head.block_count[1] =
1110 (data->blocks >> 0) & 0xFF;
1111 vub300->cmnd.head.block_size[0] =
1112 (data->blksz >> 8) & 0xFF;
1113 vub300->cmnd.head.block_size[1] =
1114 (data->blksz >> 0) & 0xFF;
1115 } else { /* BYTE MODE */
1116 vub300->cmnd.head.block_count[0] = 0;
1117 vub300->cmnd.head.block_count[1] = 0;
1118 vub300->cmnd.head.block_size[0] =
1119 (vub300->datasize >> 8) & 0xFF;
1120 vub300->cmnd.head.block_size[1] =
1121 (vub300->datasize >> 0) & 0xFF;
1122 }
1123 vub300->cmnd.head.command_type =
1124 (MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
1125 vub300->cmnd.head.transfer_size[0] =
1126 (vub300->datasize >> 24) & 0xFF;
1127 vub300->cmnd.head.transfer_size[1] =
1128 (vub300->datasize >> 16) & 0xFF;
1129 vub300->cmnd.head.transfer_size[2] =
1130 (vub300->datasize >> 8) & 0xFF;
1131 vub300->cmnd.head.transfer_size[3] =
1132 (vub300->datasize >> 0) & 0xFF;
1133 if (vub300->datasize < vub300->fbs[fn]) {
1134 vub300->cmnd.head.block_count[0] = 0;
1135 vub300->cmnd.head.block_count[1] = 0;
1136 }
1137 } else {
1138 vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF;
1139 vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF;
1140 vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF;
1141 vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF;
1142 vub300->cmnd.head.command_type =
1143 (MMC_DATA_READ & data->flags) ? 0x00 : 0x80;
1144 vub300->cmnd.head.transfer_size[0] =
1145 (vub300->datasize >> 24) & 0xFF;
1146 vub300->cmnd.head.transfer_size[1] =
1147 (vub300->datasize >> 16) & 0xFF;
1148 vub300->cmnd.head.transfer_size[2] =
1149 (vub300->datasize >> 8) & 0xFF;
1150 vub300->cmnd.head.transfer_size[3] =
1151 (vub300->datasize >> 0) & 0xFF;
1152 if (vub300->datasize < vub300->fbs[0]) {
1153 vub300->cmnd.head.block_count[0] = 0;
1154 vub300->cmnd.head.block_count[1] = 0;
1155 }
1156 }
1157 if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) {
1158 u16 block_size = vub300->cmnd.head.block_size[1] |
1159 (vub300->cmnd.head.block_size[0] << 8);
1160 u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY -
1161 (FIRMWARE_BLOCK_BOUNDARY % block_size);
1162 vub300->cmnd.head.block_boundary[0] =
1163 (block_boundary >> 8) & 0xFF;
1164 vub300->cmnd.head.block_boundary[1] =
1165 (block_boundary >> 0) & 0xFF;
1166 } else {
1167 vub300->cmnd.head.block_boundary[0] = 0;
1168 vub300->cmnd.head.block_boundary[1] = 0;
1169 }
1170 usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev,
1171 usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep),
1172 &vub300->cmnd, sizeof(vub300->cmnd),
1173 command_out_completed, vub300);
1174 retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL);
1175 if (retval < 0) {
1176 cmd->error = retval;
1177 complete(&vub300->command_complete);
1178 return;
1179 } else {
1180 return;
1181 }
1182}
1183
1184/*
1185 * timer callback runs in atomic mode
1186 * so it cannot call usb_kill_urb()
1187 */
1188static void vub300_sg_timed_out(unsigned long data)
1189{
1190 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
1191 vub300->usb_timed_out = 1;
1192 usb_sg_cancel(&vub300->sg_request);
1193 usb_unlink_urb(vub300->command_out_urb);
1194 usb_unlink_urb(vub300->command_res_urb);
1195}
1196
1197static u16 roundup_to_multiple_of_64(u16 number)
1198{
1199 return 0xFFC0 & (0x3F + number);
1200}
1201
1202/*
1203 * this is a separate function to solve the 80 column width restriction
1204 */
1205static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
1206 const struct firmware *fw)
1207{
1208 u8 register_count = 0;
1209 u16 ts = 0;
1210 u16 interrupt_size = 0;
1211 const u8 *data = fw->data;
1212 int size = fw->size;
1213 u8 c;
1214 dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n",
1215 vub300->vub_name);
1216 do {
1217 c = *data++;
1218 } while (size-- && c); /* skip comment */
1219 dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data,
1220 vub300->vub_name);
1221 if (size < 4) {
1222 dev_err(&vub300->udev->dev,
1223 "corrupt offload pseudocode in firmware %s\n",
1224 vub300->vub_name);
1225 strncpy(vub300->vub_name, "corrupt offload pseudocode",
1226 sizeof(vub300->vub_name));
1227 return;
1228 }
1229 interrupt_size += *data++;
1230 size -= 1;
1231 interrupt_size <<= 8;
1232 interrupt_size += *data++;
1233 size -= 1;
1234 if (interrupt_size < size) {
1235 u16 xfer_length = roundup_to_multiple_of_64(interrupt_size);
1236 u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
1237 if (xfer_buffer) {
1238 int retval;
1239 memcpy(xfer_buffer, data, interrupt_size);
1240 memset(xfer_buffer + interrupt_size, 0,
1241 xfer_length - interrupt_size);
1242 size -= interrupt_size;
1243 data += interrupt_size;
1244 retval =
1245 usb_control_msg(vub300->udev,
1246 usb_sndctrlpipe(vub300->udev, 0),
1247 SET_INTERRUPT_PSEUDOCODE,
1248 USB_DIR_OUT | USB_TYPE_VENDOR |
1249 USB_RECIP_DEVICE, 0x0000, 0x0000,
1250 xfer_buffer, xfer_length, HZ);
1251 kfree(xfer_buffer);
1252 if (retval < 0) {
1253 strncpy(vub300->vub_name,
1254 "SDIO pseudocode download failed",
1255 sizeof(vub300->vub_name));
1256 return;
1257 }
1258 } else {
1259 dev_err(&vub300->udev->dev,
1260 "not enough memory for xfer buffer to send"
1261 " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data,
1262 vub300->vub_name);
1263 strncpy(vub300->vub_name,
1264 "SDIO interrupt pseudocode download failed",
1265 sizeof(vub300->vub_name));
1266 return;
1267 }
1268 } else {
1269 dev_err(&vub300->udev->dev,
1270 "corrupt interrupt pseudocode in firmware %s %s\n",
1271 fw->data, vub300->vub_name);
1272 strncpy(vub300->vub_name, "corrupt interrupt pseudocode",
1273 sizeof(vub300->vub_name));
1274 return;
1275 }
1276 ts += *data++;
1277 size -= 1;
1278 ts <<= 8;
1279 ts += *data++;
1280 size -= 1;
1281 if (ts < size) {
1282 u16 xfer_length = roundup_to_multiple_of_64(ts);
1283 u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL);
1284 if (xfer_buffer) {
1285 int retval;
1286 memcpy(xfer_buffer, data, ts);
1287 memset(xfer_buffer + ts, 0,
1288 xfer_length - ts);
1289 size -= ts;
1290 data += ts;
1291 retval =
1292 usb_control_msg(vub300->udev,
1293 usb_sndctrlpipe(vub300->udev, 0),
1294 SET_TRANSFER_PSEUDOCODE,
1295 USB_DIR_OUT | USB_TYPE_VENDOR |
1296 USB_RECIP_DEVICE, 0x0000, 0x0000,
1297 xfer_buffer, xfer_length, HZ);
1298 kfree(xfer_buffer);
1299 if (retval < 0) {
1300 strncpy(vub300->vub_name,
1301 "SDIO pseudocode download failed",
1302 sizeof(vub300->vub_name));
1303 return;
1304 }
1305 } else {
1306 dev_err(&vub300->udev->dev,
1307 "not enough memory for xfer buffer to send"
1308 " TRANSFER_PSEUDOCODE for %s %s\n", fw->data,
1309 vub300->vub_name);
1310 strncpy(vub300->vub_name,
1311 "SDIO transfer pseudocode download failed",
1312 sizeof(vub300->vub_name));
1313 return;
1314 }
1315 } else {
1316 dev_err(&vub300->udev->dev,
1317 "corrupt transfer pseudocode in firmware %s %s\n",
1318 fw->data, vub300->vub_name);
1319 strncpy(vub300->vub_name, "corrupt transfer pseudocode",
1320 sizeof(vub300->vub_name));
1321 return;
1322 }
1323 register_count += *data++;
1324 size -= 1;
1325 if (register_count * 4 == size) {
1326 int I = vub300->dynamic_register_count = register_count;
1327 int i = 0;
1328 while (I--) {
1329 unsigned int func_num = 0;
1330 vub300->sdio_register[i].func_num = *data++;
1331 size -= 1;
1332 func_num += *data++;
1333 size -= 1;
1334 func_num <<= 8;
1335 func_num += *data++;
1336 size -= 1;
1337 func_num <<= 8;
1338 func_num += *data++;
1339 size -= 1;
1340 vub300->sdio_register[i].sdio_reg = func_num;
1341 vub300->sdio_register[i].activate = 1;
1342 vub300->sdio_register[i].prepared = 0;
1343 i += 1;
1344 }
1345 dev_info(&vub300->udev->dev,
1346 "initialized %d dynamic pseudocode registers\n",
1347 vub300->dynamic_register_count);
1348 return;
1349 } else {
1350 dev_err(&vub300->udev->dev,
1351 "corrupt dynamic registers in firmware %s\n",
1352 vub300->vub_name);
1353 strncpy(vub300->vub_name, "corrupt dynamic registers",
1354 sizeof(vub300->vub_name));
1355 return;
1356 }
1357}
1358
1359/*
1360 * if the binary containing the EMPTY PseudoCode can not be found
1361 * vub300->vub_name is set anyway in order to prevent an automatic retry
1362 */
1363static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
1364{
1365 struct mmc_card *card = vub300->mmc->card;
1366 int sdio_funcs = card->sdio_funcs;
1367 const struct firmware *fw = NULL;
1368 int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name),
1369 "vub_%04X%04X", card->cis.vendor, card->cis.device);
1370 int n = 0;
1371 int retval;
1372 for (n = 0; n < sdio_funcs; n++) {
1373 struct sdio_func *sf = card->sdio_func[n];
1374 l += snprintf(vub300->vub_name + l,
1375 sizeof(vub300->vub_name) - l, "_%04X%04X",
1376 sf->vendor, sf->device);
1377 };
1378 snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
1379 dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
1380 vub300->vub_name);
1381 retval = request_firmware(&fw, vub300->vub_name, &card->dev);
1382 if (retval < 0) {
1383 strncpy(vub300->vub_name, "vub_default.bin",
1384 sizeof(vub300->vub_name));
1385 retval = request_firmware(&fw, vub300->vub_name, &card->dev);
1386 if (retval < 0) {
1387 strncpy(vub300->vub_name,
1388 "no SDIO offload firmware found",
1389 sizeof(vub300->vub_name));
1390 } else {
1391 __download_offload_pseudocode(vub300, fw);
1392 release_firmware(fw);
1393 }
1394 } else {
1395 __download_offload_pseudocode(vub300, fw);
1396 release_firmware(fw);
1397 }
1398}
1399
1400static void vub300_usb_bulk_msg_completion(struct urb *urb)
1401{ /* urb completion handler - hardirq */
1402 complete((struct completion *)urb->context);
1403}
1404
1405static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300,
1406 unsigned int pipe, void *data, int len,
1407 int *actual_length, int timeout_msecs)
1408{
1409 /* cmd_mutex is held by vub300_cmndwork_thread */
1410 struct usb_device *usb_dev = vub300->udev;
1411 struct completion done;
1412 int retval;
1413 vub300->urb = usb_alloc_urb(0, GFP_KERNEL);
1414 if (!vub300->urb)
1415 return -ENOMEM;
1416 usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len,
1417 vub300_usb_bulk_msg_completion, NULL);
1418 init_completion(&done);
1419 vub300->urb->context = &done;
1420 vub300->urb->actual_length = 0;
1421 retval = usb_submit_urb(vub300->urb, GFP_KERNEL);
1422 if (unlikely(retval))
1423 goto out;
1424 if (!wait_for_completion_timeout
1425 (&done, msecs_to_jiffies(timeout_msecs))) {
1426 retval = -ETIMEDOUT;
1427 usb_kill_urb(vub300->urb);
1428 } else {
1429 retval = vub300->urb->status;
1430 }
1431out:
1432 *actual_length = vub300->urb->actual_length;
1433 usb_free_urb(vub300->urb);
1434 vub300->urb = NULL;
1435 return retval;
1436}
1437
1438static int __command_read_data(struct vub300_mmc_host *vub300,
1439 struct mmc_command *cmd, struct mmc_data *data)
1440{
1441 /* cmd_mutex is held by vub300_cmndwork_thread */
1442 int linear_length = vub300->datasize;
1443 int padded_length = vub300->large_usb_packets ?
1444 ((511 + linear_length) >> 9) << 9 :
1445 ((63 + linear_length) >> 6) << 6;
1446 if ((padded_length == linear_length) || !pad_input_to_usb_pkt) {
1447 int result;
1448 unsigned pipe;
1449 pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep);
1450 result = usb_sg_init(&vub300->sg_request, vub300->udev,
1451 pipe, 0, data->sg,
1452 data->sg_len, 0, GFP_KERNEL);
1453 if (result < 0) {
1454 usb_unlink_urb(vub300->command_out_urb);
1455 usb_unlink_urb(vub300->command_res_urb);
1456 cmd->error = result;
1457 data->bytes_xfered = 0;
1458 return 0;
1459 } else {
1460 vub300->sg_transfer_timer.expires =
1461 jiffies + msecs_to_jiffies(2000 +
1462 (linear_length / 16384));
1463 add_timer(&vub300->sg_transfer_timer);
1464 usb_sg_wait(&vub300->sg_request);
1465 del_timer(&vub300->sg_transfer_timer);
1466 if (vub300->sg_request.status < 0) {
1467 cmd->error = vub300->sg_request.status;
1468 data->bytes_xfered = 0;
1469 return 0;
1470 } else {
1471 data->bytes_xfered = vub300->datasize;
1472 return linear_length;
1473 }
1474 }
1475 } else {
1476 u8 *buf = kmalloc(padded_length, GFP_KERNEL);
1477 if (buf) {
1478 int result;
1479 unsigned pipe = usb_rcvbulkpipe(vub300->udev,
1480 vub300->data_inp_ep);
1481 int actual_length = 0;
1482 result = vub300_usb_bulk_msg(vub300, pipe, buf,
1483 padded_length, &actual_length,
1484 2000 + (padded_length / 16384));
1485 if (result < 0) {
1486 cmd->error = result;
1487 data->bytes_xfered = 0;
1488 kfree(buf);
1489 return 0;
1490 } else if (actual_length < linear_length) {
1491 cmd->error = -EREMOTEIO;
1492 data->bytes_xfered = 0;
1493 kfree(buf);
1494 return 0;
1495 } else {
1496 sg_copy_from_buffer(data->sg, data->sg_len, buf,
1497 linear_length);
1498 kfree(buf);
1499 data->bytes_xfered = vub300->datasize;
1500 return linear_length;
1501 }
1502 } else {
1503 cmd->error = -ENOMEM;
1504 data->bytes_xfered = 0;
1505 return 0;
1506 }
1507 }
1508}
1509
1510static int __command_write_data(struct vub300_mmc_host *vub300,
1511 struct mmc_command *cmd, struct mmc_data *data)
1512{
1513 /* cmd_mutex is held by vub300_cmndwork_thread */
1514 unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep);
1515 int linear_length = vub300->datasize;
1516 int modulo_64_length = linear_length & 0x003F;
1517 int modulo_512_length = linear_length & 0x01FF;
1518 if (linear_length < 64) {
1519 int result;
1520 int actual_length;
1521 sg_copy_to_buffer(data->sg, data->sg_len,
1522 vub300->padded_buffer,
1523 sizeof(vub300->padded_buffer));
1524 memset(vub300->padded_buffer + linear_length, 0,
1525 sizeof(vub300->padded_buffer) - linear_length);
1526 result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer,
1527 sizeof(vub300->padded_buffer),
1528 &actual_length, 2000 +
1529 (sizeof(vub300->padded_buffer) /
1530 16384));
1531 if (result < 0) {
1532 cmd->error = result;
1533 data->bytes_xfered = 0;
1534 } else {
1535 data->bytes_xfered = vub300->datasize;
1536 }
1537 } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) ||
1538 (vub300->large_usb_packets && (64 > modulo_512_length))
1539 ) { /* don't you just love these work-rounds */
1540 int padded_length = ((63 + linear_length) >> 6) << 6;
1541 u8 *buf = kmalloc(padded_length, GFP_KERNEL);
1542 if (buf) {
1543 int result;
1544 int actual_length;
1545 sg_copy_to_buffer(data->sg, data->sg_len, buf,
1546 padded_length);
1547 memset(buf + linear_length, 0,
1548 padded_length - linear_length);
1549 result =
1550 vub300_usb_bulk_msg(vub300, pipe, buf,
1551 padded_length, &actual_length,
1552 2000 + padded_length / 16384);
1553 kfree(buf);
1554 if (result < 0) {
1555 cmd->error = result;
1556 data->bytes_xfered = 0;
1557 } else {
1558 data->bytes_xfered = vub300->datasize;
1559 }
1560 } else {
1561 cmd->error = -ENOMEM;
1562 data->bytes_xfered = 0;
1563 }
1564 } else { /* no data padding required */
1565 int result;
1566 unsigned char buf[64 * 4];
1567 sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf));
1568 result = usb_sg_init(&vub300->sg_request, vub300->udev,
1569 pipe, 0, data->sg,
1570 data->sg_len, 0, GFP_KERNEL);
1571 if (result < 0) {
1572 usb_unlink_urb(vub300->command_out_urb);
1573 usb_unlink_urb(vub300->command_res_urb);
1574 cmd->error = result;
1575 data->bytes_xfered = 0;
1576 } else {
1577 vub300->sg_transfer_timer.expires =
1578 jiffies + msecs_to_jiffies(2000 +
1579 linear_length / 16384);
1580 add_timer(&vub300->sg_transfer_timer);
1581 usb_sg_wait(&vub300->sg_request);
1582 if (cmd->error) {
1583 data->bytes_xfered = 0;
1584 } else {
1585 del_timer(&vub300->sg_transfer_timer);
1586 if (vub300->sg_request.status < 0) {
1587 cmd->error = vub300->sg_request.status;
1588 data->bytes_xfered = 0;
1589 } else {
1590 data->bytes_xfered = vub300->datasize;
1591 }
1592 }
1593 }
1594 }
1595 return linear_length;
1596}
1597
1598static void __vub300_command_response(struct vub300_mmc_host *vub300,
1599 struct mmc_command *cmd,
1600 struct mmc_data *data, int data_length)
1601{
1602 /* cmd_mutex is held by vub300_cmndwork_thread */
1603 long respretval;
1604 int msec_timeout = 1000 + data_length / 4;
1605 respretval =
1606 wait_for_completion_timeout(&vub300->command_complete,
1607 msecs_to_jiffies(msec_timeout));
1608 if (respretval == 0) { /* TIMED OUT */
1609 /* we don't know which of "out" and "res" if any failed */
1610 int result;
1611 vub300->usb_timed_out = 1;
1612 usb_kill_urb(vub300->command_out_urb);
1613 usb_kill_urb(vub300->command_res_urb);
1614 cmd->error = -ETIMEDOUT;
1615 result = usb_lock_device_for_reset(vub300->udev,
1616 vub300->interface);
1617 if (result == 0) {
1618 result = usb_reset_device(vub300->udev);
1619 usb_unlock_device(vub300->udev);
1620 }
1621 } else if (respretval < 0) {
1622 /* we don't know which of "out" and "res" if any failed */
1623 usb_kill_urb(vub300->command_out_urb);
1624 usb_kill_urb(vub300->command_res_urb);
1625 cmd->error = respretval;
1626 } else if (cmd->error) {
1627 /*
1628 * the error occured sending the command
1629 * or recieving the response
1630 */
1631 } else if (vub300->command_out_urb->status) {
1632 vub300->usb_transport_fail = vub300->command_out_urb->status;
1633 cmd->error = -EPROTO == vub300->command_out_urb->status ?
1634 -ESHUTDOWN : vub300->command_out_urb->status;
1635 } else if (vub300->command_res_urb->status) {
1636 vub300->usb_transport_fail = vub300->command_res_urb->status;
1637 cmd->error = -EPROTO == vub300->command_res_urb->status ?
1638 -ESHUTDOWN : vub300->command_res_urb->status;
1639 } else if (vub300->resp.common.header_type == 0x00) {
1640 /*
1641 * the command completed successfully
1642 * and there was no piggybacked data
1643 */
1644 } else if (vub300->resp.common.header_type == RESPONSE_ERROR) {
1645 cmd->error =
1646 vub300_response_error(vub300->resp.error.error_code);
1647 if (vub300->data)
1648 usb_sg_cancel(&vub300->sg_request);
1649 } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) {
1650 int offloaded_data_length =
1651 vub300->resp.common.header_size -
1652 sizeof(struct sd_register_header);
1653 int register_count = offloaded_data_length >> 3;
1654 int ri = 0;
1655 while (register_count--) {
1656 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1657 ri += 1;
1658 }
1659 vub300->resp.common.header_size =
1660 sizeof(struct sd_register_header);
1661 vub300->resp.common.header_type = 0x00;
1662 cmd->error = 0;
1663 } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) {
1664 int offloaded_data_length =
1665 vub300->resp.common.header_size -
1666 sizeof(struct sd_register_header);
1667 int register_count = offloaded_data_length >> 3;
1668 int ri = 0;
1669 while (register_count--) {
1670 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1671 ri += 1;
1672 }
1673 mutex_lock(&vub300->irq_mutex);
1674 if (vub300->irqs_queued) {
1675 vub300->irqs_queued += 1;
1676 } else if (vub300->irq_enabled) {
1677 vub300->irqs_queued += 1;
1678 vub300_queue_poll_work(vub300, 0);
1679 } else {
1680 vub300->irqs_queued += 1;
1681 }
1682 vub300->irq_disabled = 1;
1683 mutex_unlock(&vub300->irq_mutex);
1684 vub300->resp.common.header_size =
1685 sizeof(struct sd_register_header);
1686 vub300->resp.common.header_type = 0x00;
1687 cmd->error = 0;
1688 } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) {
1689 int offloaded_data_length =
1690 vub300->resp.common.header_size -
1691 sizeof(struct sd_register_header);
1692 int register_count = offloaded_data_length >> 3;
1693 int ri = 0;
1694 while (register_count--) {
1695 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]);
1696 ri += 1;
1697 }
1698 mutex_lock(&vub300->irq_mutex);
1699 if (vub300->irqs_queued) {
1700 vub300->irqs_queued += 1;
1701 } else if (vub300->irq_enabled) {
1702 vub300->irqs_queued += 1;
1703 vub300_queue_poll_work(vub300, 0);
1704 } else {
1705 vub300->irqs_queued += 1;
1706 }
1707 vub300->irq_disabled = 0;
1708 mutex_unlock(&vub300->irq_mutex);
1709 vub300->resp.common.header_size =
1710 sizeof(struct sd_register_header);
1711 vub300->resp.common.header_type = 0x00;
1712 cmd->error = 0;
1713 } else {
1714 cmd->error = -EINVAL;
1715 }
1716}
1717
1718static void construct_request_response(struct vub300_mmc_host *vub300,
1719 struct mmc_command *cmd)
1720{
1721 int resp_len = vub300->resp_len;
1722 int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1;
1723 int bytes = 3 & less_cmd;
1724 int words = less_cmd >> 2;
1725 u8 *r = vub300->resp.response.command_response;
1726 if (bytes == 3) {
1727 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1728 | (r[2 + (words << 2)] << 16)
1729 | (r[3 + (words << 2)] << 8);
1730 } else if (bytes == 2) {
1731 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1732 | (r[2 + (words << 2)] << 16);
1733 } else if (bytes == 1) {
1734 cmd->resp[words] = (r[1 + (words << 2)] << 24);
1735 }
1736 while (words-- > 0) {
1737 cmd->resp[words] = (r[1 + (words << 2)] << 24)
1738 | (r[2 + (words << 2)] << 16)
1739 | (r[3 + (words << 2)] << 8)
1740 | (r[4 + (words << 2)] << 0);
1741 }
1742 if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0]))
1743 cmd->resp[0] &= 0xFFFFFF00;
1744}
1745
1746/* this thread runs only when there is an upper level command req outstanding */
1747static void vub300_cmndwork_thread(struct work_struct *work)
1748{
1749 struct vub300_mmc_host *vub300 =
1750 container_of(work, struct vub300_mmc_host, cmndwork);
1751 if (!vub300->interface) {
1752 kref_put(&vub300->kref, vub300_delete);
1753 return;
1754 } else {
1755 struct mmc_request *req = vub300->req;
1756 struct mmc_command *cmd = vub300->cmd;
1757 struct mmc_data *data = vub300->data;
1758 int data_length;
1759 mutex_lock(&vub300->cmd_mutex);
1760 init_completion(&vub300->command_complete);
1761 if (likely(vub300->vub_name[0]) || !vub300->mmc->card ||
1762 !mmc_card_present(vub300->mmc->card)) {
1763 /*
1764 * the name of the EMPTY Pseudo firmware file
1765 * is used as a flag to indicate that the file
1766 * has been already downloaded to the VUB300 chip
1767 */
1768 } else if (0 == vub300->mmc->card->sdio_funcs) {
1769 strncpy(vub300->vub_name, "SD memory device",
1770 sizeof(vub300->vub_name));
1771 } else {
1772 download_offload_pseudocode(vub300);
1773 }
1774 send_command(vub300);
1775 if (!data)
1776 data_length = 0;
1777 else if (MMC_DATA_READ & data->flags)
1778 data_length = __command_read_data(vub300, cmd, data);
1779 else
1780 data_length = __command_write_data(vub300, cmd, data);
1781 __vub300_command_response(vub300, cmd, data, data_length);
1782 vub300->req = NULL;
1783 vub300->cmd = NULL;
1784 vub300->data = NULL;
1785 if (cmd->error) {
1786 if (cmd->error == -ENOMEDIUM)
1787 check_vub300_port_status(vub300);
1788 mutex_unlock(&vub300->cmd_mutex);
1789 mmc_request_done(vub300->mmc, req);
1790 kref_put(&vub300->kref, vub300_delete);
1791 return;
1792 } else {
1793 construct_request_response(vub300, cmd);
1794 vub300->resp_len = 0;
1795 mutex_unlock(&vub300->cmd_mutex);
1796 kref_put(&vub300->kref, vub300_delete);
1797 mmc_request_done(vub300->mmc, req);
1798 return;
1799 }
1800 }
1801}
1802
1803static int examine_cyclic_buffer(struct vub300_mmc_host *vub300,
1804 struct mmc_command *cmd, u8 Function)
1805{
1806 /* cmd_mutex is held by vub300_mmc_request */
1807 u8 cmd0 = 0xFF & (cmd->arg >> 24);
1808 u8 cmd1 = 0xFF & (cmd->arg >> 16);
1809 u8 cmd2 = 0xFF & (cmd->arg >> 8);
1810 u8 cmd3 = 0xFF & (cmd->arg >> 0);
1811 int first = MAXREGMASK & vub300->fn[Function].offload_point;
1812 struct offload_registers_access *rf = &vub300->fn[Function].reg[first];
1813 if (cmd0 == rf->command_byte[0] &&
1814 cmd1 == rf->command_byte[1] &&
1815 cmd2 == rf->command_byte[2] &&
1816 cmd3 == rf->command_byte[3]) {
1817 u8 checksum = 0x00;
1818 cmd->resp[1] = checksum << 24;
1819 cmd->resp[0] = (rf->Respond_Byte[0] << 24)
1820 | (rf->Respond_Byte[1] << 16)
1821 | (rf->Respond_Byte[2] << 8)
1822 | (rf->Respond_Byte[3] << 0);
1823 vub300->fn[Function].offload_point += 1;
1824 vub300->fn[Function].offload_count -= 1;
1825 vub300->total_offload_count -= 1;
1826 return 1;
1827 } else {
1828 int delta = 1; /* because it does not match the first one */
1829 u8 register_count = vub300->fn[Function].offload_count - 1;
1830 u32 register_point = vub300->fn[Function].offload_point + 1;
1831 while (0 < register_count) {
1832 int point = MAXREGMASK & register_point;
1833 struct offload_registers_access *r =
1834 &vub300->fn[Function].reg[point];
1835 if (cmd0 == r->command_byte[0] &&
1836 cmd1 == r->command_byte[1] &&
1837 cmd2 == r->command_byte[2] &&
1838 cmd3 == r->command_byte[3]) {
1839 u8 checksum = 0x00;
1840 cmd->resp[1] = checksum << 24;
1841 cmd->resp[0] = (r->Respond_Byte[0] << 24)
1842 | (r->Respond_Byte[1] << 16)
1843 | (r->Respond_Byte[2] << 8)
1844 | (r->Respond_Byte[3] << 0);
1845 vub300->fn[Function].offload_point += delta;
1846 vub300->fn[Function].offload_count -= delta;
1847 vub300->total_offload_count -= delta;
1848 return 1;
1849 } else {
1850 register_point += 1;
1851 register_count -= 1;
1852 delta += 1;
1853 continue;
1854 }
1855 }
1856 return 0;
1857 }
1858}
1859
1860static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
1861 struct mmc_command *cmd)
1862{
1863 /* cmd_mutex is held by vub300_mmc_request */
1864 u8 regs = vub300->dynamic_register_count;
1865 u8 i = 0;
1866 u8 func = FUN(cmd);
1867 u32 reg = REG(cmd);
1868 while (0 < regs--) {
1869 if ((vub300->sdio_register[i].func_num == func) &&
1870 (vub300->sdio_register[i].sdio_reg == reg)) {
1871 if (!vub300->sdio_register[i].prepared) {
1872 return 0;
1873 } else if ((0x80000000 & cmd->arg) == 0x80000000) {
1874 /*
1875 * a write to a dynamic register
1876 * nullifies our offloaded value
1877 */
1878 vub300->sdio_register[i].prepared = 0;
1879 return 0;
1880 } else {
1881 u8 checksum = 0x00;
1882 u8 rsp0 = 0x00;
1883 u8 rsp1 = 0x00;
1884 u8 rsp2 = vub300->sdio_register[i].response;
1885 u8 rsp3 = vub300->sdio_register[i].regvalue;
1886 vub300->sdio_register[i].prepared = 0;
1887 cmd->resp[1] = checksum << 24;
1888 cmd->resp[0] = (rsp0 << 24)
1889 | (rsp1 << 16)
1890 | (rsp2 << 8)
1891 | (rsp3 << 0);
1892 return 1;
1893 }
1894 } else {
1895 i += 1;
1896 continue;
1897 }
1898 };
1899 if (vub300->total_offload_count == 0)
1900 return 0;
1901 else if (vub300->fn[func].offload_count == 0)
1902 return 0;
1903 else
1904 return examine_cyclic_buffer(vub300, cmd, func);
1905}
1906
1907static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
1908{ /* NOT irq */
1909 struct mmc_command *cmd = req->cmd;
1910 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
1911 if (!vub300->interface) {
1912 cmd->error = -ESHUTDOWN;
1913 mmc_request_done(mmc, req);
1914 return;
1915 } else {
1916 struct mmc_data *data = req->data;
1917 if (!vub300->card_powered) {
1918 cmd->error = -ENOMEDIUM;
1919 mmc_request_done(mmc, req);
1920 return;
1921 }
1922 if (!vub300->card_present) {
1923 cmd->error = -ENOMEDIUM;
1924 mmc_request_done(mmc, req);
1925 return;
1926 }
1927 if (vub300->usb_transport_fail) {
1928 cmd->error = vub300->usb_transport_fail;
1929 mmc_request_done(mmc, req);
1930 return;
1931 }
1932 if (!vub300->interface) {
1933 cmd->error = -ENODEV;
1934 mmc_request_done(mmc, req);
1935 return;
1936 }
1937 kref_get(&vub300->kref);
1938 mutex_lock(&vub300->cmd_mutex);
1939 mod_timer(&vub300->inactivity_timer, jiffies + HZ);
1940 /*
1941 * for performance we have to return immediately
1942 * if the requested data has been offloaded
1943 */
1944 if (cmd->opcode == 52 &&
1945 satisfy_request_from_offloaded_data(vub300, cmd)) {
1946 cmd->error = 0;
1947 mutex_unlock(&vub300->cmd_mutex);
1948 kref_put(&vub300->kref, vub300_delete);
1949 mmc_request_done(mmc, req);
1950 return;
1951 } else {
1952 vub300->cmd = cmd;
1953 vub300->req = req;
1954 vub300->data = data;
1955 if (data)
1956 vub300->datasize = data->blksz * data->blocks;
1957 else
1958 vub300->datasize = 0;
1959 vub300_queue_cmnd_work(vub300);
1960 mutex_unlock(&vub300->cmd_mutex);
1961 kref_put(&vub300->kref, vub300_delete);
1962 /*
1963 * the kernel lock diagnostics complain
1964 * if the cmd_mutex * is "passed on"
1965 * to the cmndwork thread,
1966 * so we must release it now
1967 * and re-acquire it in the cmndwork thread
1968 */
1969 }
1970 }
1971}
1972
1973static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
1974 struct mmc_ios *ios)
1975{
1976 int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */
1977 int retval;
1978 u32 kHzClock;
1979 if (ios->clock >= 48000000)
1980 kHzClock = 48000;
1981 else if (ios->clock >= 24000000)
1982 kHzClock = 24000;
1983 else if (ios->clock >= 20000000)
1984 kHzClock = 20000;
1985 else if (ios->clock >= 15000000)
1986 kHzClock = 15000;
1987 else if (ios->clock >= 200000)
1988 kHzClock = 200;
1989 else
1990 kHzClock = 0;
1991 {
1992 int i;
1993 u64 c = kHzClock;
1994 for (i = 0; i < buf_array_size; i++) {
1995 buf[i] = c;
1996 c >>= 8;
1997 }
1998 }
1999 retval =
2000 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2001 SET_CLOCK_SPEED,
2002 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2003 0x00, 0x00, buf, buf_array_size, HZ);
2004 if (retval != 8) {
2005 dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
2006 " %dkHz failed with retval=%d\n", kHzClock, retval);
2007 } else {
2008 dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED"
2009 " %dkHz\n", kHzClock);
2010 }
2011}
2012
2013static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2014{ /* NOT irq */
2015 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2016 if (!vub300->interface)
2017 return;
2018 kref_get(&vub300->kref);
2019 mutex_lock(&vub300->cmd_mutex);
2020 if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) {
2021 vub300->card_powered = 0;
2022 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2023 SET_SD_POWER,
2024 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2025 0x0000, 0x0000, NULL, 0, HZ);
2026 /* must wait for the VUB300 u-proc to boot up */
2027 msleep(600);
2028 } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
2029 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
2030 SET_SD_POWER,
2031 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2032 0x0001, 0x0000, NULL, 0, HZ);
2033 msleep(600);
2034 vub300->card_powered = 1;
2035 } else if (ios->power_mode == MMC_POWER_ON) {
2036 u8 *buf = kmalloc(8, GFP_KERNEL);
2037 if (buf) {
2038 __set_clock_speed(vub300, buf, ios);
2039 kfree(buf);
2040 }
2041 } else {
2042 /* this should mean no change of state */
2043 }
2044 mutex_unlock(&vub300->cmd_mutex);
2045 kref_put(&vub300->kref, vub300_delete);
2046}
2047
2048static int vub300_mmc_get_ro(struct mmc_host *mmc)
2049{
2050 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2051 return vub300->read_only;
2052}
2053
2054static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
2055{ /* NOT irq */
2056 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2057 if (!vub300->interface)
2058 return;
2059 kref_get(&vub300->kref);
2060 if (enable) {
2061 mutex_lock(&vub300->irq_mutex);
2062 if (vub300->irqs_queued) {
2063 vub300->irqs_queued -= 1;
2064 mmc_signal_sdio_irq(vub300->mmc);
2065 } else if (vub300->irq_disabled) {
2066 vub300->irq_disabled = 0;
2067 vub300->irq_enabled = 1;
2068 vub300_queue_poll_work(vub300, 0);
2069 } else if (vub300->irq_enabled) {
2070 /* this should not happen, so we will just ignore it */
2071 } else {
2072 vub300->irq_enabled = 1;
2073 vub300_queue_poll_work(vub300, 0);
2074 }
2075 mutex_unlock(&vub300->irq_mutex);
2076 } else {
2077 vub300->irq_enabled = 0;
2078 }
2079 kref_put(&vub300->kref, vub300_delete);
2080}
2081
2082void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
2083{ /* NOT irq */
2084 struct vub300_mmc_host *vub300 = mmc_priv(mmc);
2085 dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
2086}
2087
2088static struct mmc_host_ops vub300_mmc_ops = {
2089 .request = vub300_mmc_request,
2090 .set_ios = vub300_mmc_set_ios,
2091 .get_ro = vub300_mmc_get_ro,
2092 .enable_sdio_irq = vub300_enable_sdio_irq,
2093 .init_card = vub300_init_card,
2094};
2095
2096static int vub300_probe(struct usb_interface *interface,
2097 const struct usb_device_id *id)
2098{ /* NOT irq */
2099 struct vub300_mmc_host *vub300 = NULL;
2100 struct usb_host_interface *iface_desc;
2101 struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
2102 int i;
2103 int retval = -ENOMEM;
2104 struct urb *command_out_urb;
2105 struct urb *command_res_urb;
2106 struct mmc_host *mmc;
2107 char manufacturer[48];
2108 char product[32];
2109 char serial_number[32];
2110 usb_string(udev, udev->descriptor.iManufacturer, manufacturer,
2111 sizeof(manufacturer));
2112 usb_string(udev, udev->descriptor.iProduct, product, sizeof(product));
2113 usb_string(udev, udev->descriptor.iSerialNumber, serial_number,
2114 sizeof(serial_number));
2115 dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n",
2116 udev->descriptor.idVendor, udev->descriptor.idProduct,
2117 manufacturer, product, serial_number);
2118 command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
2119 if (!command_out_urb) {
2120 retval = -ENOMEM;
2121 dev_err(&vub300->udev->dev,
2122 "not enough memory for the command_out_urb\n");
2123 goto error0;
2124 }
2125 command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
2126 if (!command_res_urb) {
2127 retval = -ENOMEM;
2128 dev_err(&vub300->udev->dev,
2129 "not enough memory for the command_res_urb\n");
2130 goto error1;
2131 }
2132 /* this also allocates memory for our VUB300 mmc host device */
2133 mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
2134 if (!mmc) {
2135 retval = -ENOMEM;
2136 dev_err(&vub300->udev->dev,
2137 "not enough memory for the mmc_host\n");
2138 goto error4;
2139 }
2140 /* MMC core transfer sizes tunable parameters */
2141 mmc->caps = 0;
2142 if (!force_1_bit_data_xfers)
2143 mmc->caps |= MMC_CAP_4_BIT_DATA;
2144 if (!force_polling_for_irqs)
2145 mmc->caps |= MMC_CAP_SDIO_IRQ;
2146 mmc->caps &= ~MMC_CAP_NEEDS_POLL;
2147 /*
2148 * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll
2149 * for devices which results in spurious CMD7's being
2150 * issued which stops some SDIO cards from working
2151 */
2152 if (limit_speed_to_24_MHz) {
2153 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2154 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2155 mmc->f_max = 24000000;
2156 dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n");
2157 } else {
2158 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2159 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2160 mmc->f_max = 48000000;
2161 }
2162 mmc->f_min = 200000;
2163 mmc->max_blk_count = 511;
2164 mmc->max_blk_size = 512;
2165 mmc->max_segs = 128;
2166 if (force_max_req_size)
2167 mmc->max_req_size = force_max_req_size * 1024;
2168 else
2169 mmc->max_req_size = 64 * 1024;
2170 mmc->max_seg_size = mmc->max_req_size;
2171 mmc->ocr_avail = 0;
2172 mmc->ocr_avail |= MMC_VDD_165_195;
2173 mmc->ocr_avail |= MMC_VDD_20_21;
2174 mmc->ocr_avail |= MMC_VDD_21_22;
2175 mmc->ocr_avail |= MMC_VDD_22_23;
2176 mmc->ocr_avail |= MMC_VDD_23_24;
2177 mmc->ocr_avail |= MMC_VDD_24_25;
2178 mmc->ocr_avail |= MMC_VDD_25_26;
2179 mmc->ocr_avail |= MMC_VDD_26_27;
2180 mmc->ocr_avail |= MMC_VDD_27_28;
2181 mmc->ocr_avail |= MMC_VDD_28_29;
2182 mmc->ocr_avail |= MMC_VDD_29_30;
2183 mmc->ocr_avail |= MMC_VDD_30_31;
2184 mmc->ocr_avail |= MMC_VDD_31_32;
2185 mmc->ocr_avail |= MMC_VDD_32_33;
2186 mmc->ocr_avail |= MMC_VDD_33_34;
2187 mmc->ocr_avail |= MMC_VDD_34_35;
2188 mmc->ocr_avail |= MMC_VDD_35_36;
2189 mmc->ops = &vub300_mmc_ops;
2190 vub300 = mmc_priv(mmc);
2191 vub300->mmc = mmc;
2192 vub300->card_powered = 0;
2193 vub300->bus_width = 0;
2194 vub300->cmnd.head.block_size[0] = 0x00;
2195 vub300->cmnd.head.block_size[1] = 0x00;
2196 vub300->app_spec = 0;
2197 mutex_init(&vub300->cmd_mutex);
2198 mutex_init(&vub300->irq_mutex);
2199 vub300->command_out_urb = command_out_urb;
2200 vub300->command_res_urb = command_res_urb;
2201 vub300->usb_timed_out = 0;
2202 vub300->dynamic_register_count = 0;
2203
2204 for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) {
2205 vub300->fn[i].offload_point = 0;
2206 vub300->fn[i].offload_count = 0;
2207 }
2208
2209 vub300->total_offload_count = 0;
2210 vub300->irq_enabled = 0;
2211 vub300->irq_disabled = 0;
2212 vub300->irqs_queued = 0;
2213
2214 for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++)
2215 vub300->sdio_register[i++].activate = 0;
2216
2217 vub300->udev = udev;
2218 vub300->interface = interface;
2219 vub300->cmnd_res_ep = 0;
2220 vub300->cmnd_out_ep = 0;
2221 vub300->data_inp_ep = 0;
2222 vub300->data_out_ep = 0;
2223
2224 for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++)
2225 vub300->fbs[i] = 512;
2226
2227 /*
2228 * set up the endpoint information
2229 *
2230 * use the first pair of bulk-in and bulk-out
2231 * endpoints for Command/Response+Interrupt
2232 *
2233 * use the second pair of bulk-in and bulk-out
2234 * endpoints for Data In/Out
2235 */
2236 vub300->large_usb_packets = 0;
2237 iface_desc = interface->cur_altsetting;
2238 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
2239 struct usb_endpoint_descriptor *endpoint =
2240 &iface_desc->endpoint[i].desc;
2241 dev_info(&vub300->udev->dev,
2242 "vub300 testing %s EndPoint(%d) %02X\n",
2243 usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" :
2244 usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" :
2245 "UNKNOWN", i, endpoint->bEndpointAddress);
2246 if (endpoint->wMaxPacketSize > 64)
2247 vub300->large_usb_packets = 1;
2248 if (usb_endpoint_is_bulk_in(endpoint)) {
2249 if (!vub300->cmnd_res_ep) {
2250 vub300->cmnd_res_ep =
2251 endpoint->bEndpointAddress;
2252 } else if (!vub300->data_inp_ep) {
2253 vub300->data_inp_ep =
2254 endpoint->bEndpointAddress;
2255 } else {
2256 dev_warn(&vub300->udev->dev,
2257 "ignoring"
2258 " unexpected bulk_in endpoint");
2259 }
2260 } else if (usb_endpoint_is_bulk_out(endpoint)) {
2261 if (!vub300->cmnd_out_ep) {
2262 vub300->cmnd_out_ep =
2263 endpoint->bEndpointAddress;
2264 } else if (!vub300->data_out_ep) {
2265 vub300->data_out_ep =
2266 endpoint->bEndpointAddress;
2267 } else {
2268 dev_warn(&vub300->udev->dev,
2269 "ignoring"
2270 " unexpected bulk_out endpoint");
2271 }
2272 } else {
2273 dev_warn(&vub300->udev->dev,
2274 "vub300 ignoring EndPoint(%d) %02X", i,
2275 endpoint->bEndpointAddress);
2276 }
2277 }
2278 if (vub300->cmnd_res_ep && vub300->cmnd_out_ep &&
2279 vub300->data_inp_ep && vub300->data_out_ep) {
2280 dev_info(&vub300->udev->dev,
2281 "vub300 %s packets"
2282 " using EndPoints %02X %02X %02X %02X\n",
2283 vub300->large_usb_packets ? "LARGE" : "SMALL",
2284 vub300->cmnd_out_ep, vub300->cmnd_res_ep,
2285 vub300->data_out_ep, vub300->data_inp_ep);
2286 /* we have the expected EndPoints */
2287 } else {
2288 dev_err(&vub300->udev->dev,
2289 "Could not find two sets of bulk-in/out endpoint pairs\n");
2290 retval = -EINVAL;
2291 goto error5;
2292 }
2293 retval =
2294 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2295 GET_HC_INF0,
2296 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2297 0x0000, 0x0000, &vub300->hc_info,
2298 sizeof(vub300->hc_info), HZ);
2299 if (retval < 0)
2300 goto error5;
2301 retval =
2302 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2303 SET_ROM_WAIT_STATES,
2304 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2305 firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
2306 if (retval < 0)
2307 goto error5;
2308 dev_info(&vub300->udev->dev,
2309 "operating_mode = %s %s %d MHz %s %d byte USB packets\n",
2310 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL",
2311 (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit",
2312 mmc->f_max / 1000000,
2313 pad_input_to_usb_pkt ? "padding input data to" : "with",
2314 vub300->large_usb_packets ? 512 : 64);
2315 retval =
2316 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0),
2317 GET_SYSTEM_PORT_STATUS,
2318 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
2319 0x0000, 0x0000, &vub300->system_port_status,
2320 sizeof(vub300->system_port_status), HZ);
2321 if (retval < 0) {
2322 goto error4;
2323 } else if (sizeof(vub300->system_port_status) == retval) {
2324 vub300->card_present =
2325 (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0;
2326 vub300->read_only =
2327 (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
2328 } else {
2329 goto error4;
2330 }
2331 usb_set_intfdata(interface, vub300);
2332 INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread);
2333 INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
2334 INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
2335 kref_init(&vub300->kref);
2336 init_timer(&vub300->sg_transfer_timer);
2337 vub300->sg_transfer_timer.data = (unsigned long)vub300;
2338 vub300->sg_transfer_timer.function = vub300_sg_timed_out;
2339 kref_get(&vub300->kref);
2340 init_timer(&vub300->inactivity_timer);
2341 vub300->inactivity_timer.data = (unsigned long)vub300;
2342 vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
2343 vub300->inactivity_timer.expires = jiffies + HZ;
2344 add_timer(&vub300->inactivity_timer);
2345 if (vub300->card_present)
2346 dev_info(&vub300->udev->dev,
2347 "USB vub300 remote SDIO host controller[%d]"
2348 "connected with SD/SDIO card inserted\n",
2349 interface_to_InterfaceNumber(interface));
2350 else
2351 dev_info(&vub300->udev->dev,
2352 "USB vub300 remote SDIO host controller[%d]"
2353 "connected with no SD/SDIO card inserted\n",
2354 interface_to_InterfaceNumber(interface));
2355 mmc_add_host(mmc);
2356 return 0;
2357error5:
2358 mmc_free_host(mmc);
2359 /*
2360 * and hence also frees vub300
2361 * which is contained at the end of struct mmc
2362 */
2363error4:
2364 usb_free_urb(command_out_urb);
2365error1:
2366 usb_free_urb(command_res_urb);
2367error0:
2368 return retval;
2369}
2370
2371static void vub300_disconnect(struct usb_interface *interface)
2372{ /* NOT irq */
2373 struct vub300_mmc_host *vub300 = usb_get_intfdata(interface);
2374 if (!vub300 || !vub300->mmc) {
2375 return;
2376 } else {
2377 struct mmc_host *mmc = vub300->mmc;
2378 if (!vub300->mmc) {
2379 return;
2380 } else {
2381 int ifnum = interface_to_InterfaceNumber(interface);
2382 usb_set_intfdata(interface, NULL);
2383 /* prevent more I/O from starting */
2384 vub300->interface = NULL;
2385 kref_put(&vub300->kref, vub300_delete);
2386 mmc_remove_host(mmc);
2387 pr_info("USB vub300 remote SDIO host controller[%d]"
2388 " now disconnected", ifnum);
2389 return;
2390 }
2391 }
2392}
2393
2394#ifdef CONFIG_PM
2395static int vub300_suspend(struct usb_interface *intf, pm_message_t message)
2396{
2397 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2398 if (!vub300 || !vub300->mmc) {
2399 return 0;
2400 } else {
2401 struct mmc_host *mmc = vub300->mmc;
2402 mmc_suspend_host(mmc);
2403 return 0;
2404 }
2405}
2406
2407static int vub300_resume(struct usb_interface *intf)
2408{
2409 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2410 if (!vub300 || !vub300->mmc) {
2411 return 0;
2412 } else {
2413 struct mmc_host *mmc = vub300->mmc;
2414 mmc_resume_host(mmc);
2415 return 0;
2416 }
2417}
2418#else
2419#define vub300_suspend NULL
2420#define vub300_resume NULL
2421#endif
2422static int vub300_pre_reset(struct usb_interface *intf)
2423{ /* NOT irq */
2424 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2425 mutex_lock(&vub300->cmd_mutex);
2426 return 0;
2427}
2428
2429static int vub300_post_reset(struct usb_interface *intf)
2430{ /* NOT irq */
2431 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf);
2432 /* we are sure no URBs are active - no locking needed */
2433 vub300->errors = -EPIPE;
2434 mutex_unlock(&vub300->cmd_mutex);
2435 return 0;
2436}
2437
2438static struct usb_driver vub300_driver = {
2439 .name = "vub300",
2440 .probe = vub300_probe,
2441 .disconnect = vub300_disconnect,
2442 .suspend = vub300_suspend,
2443 .resume = vub300_resume,
2444 .pre_reset = vub300_pre_reset,
2445 .post_reset = vub300_post_reset,
2446 .id_table = vub300_table,
2447 .supports_autosuspend = 1,
2448};
2449
2450static int __init vub300_init(void)
2451{ /* NOT irq */
2452 int result;
2453
2454 pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X",
2455 firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout);
2456 cmndworkqueue = create_singlethread_workqueue("kvub300c");
2457 if (!cmndworkqueue) {
2458 pr_err("not enough memory for the REQUEST workqueue");
2459 result = -ENOMEM;
2460 goto out1;
2461 }
2462 pollworkqueue = create_singlethread_workqueue("kvub300p");
2463 if (!pollworkqueue) {
2464 pr_err("not enough memory for the IRQPOLL workqueue");
2465 result = -ENOMEM;
2466 goto out2;
2467 }
2468 deadworkqueue = create_singlethread_workqueue("kvub300d");
2469 if (!deadworkqueue) {
2470 pr_err("not enough memory for the EXPIRED workqueue");
2471 result = -ENOMEM;
2472 goto out3;
2473 }
2474 result = usb_register(&vub300_driver);
2475 if (result) {
2476 pr_err("usb_register failed. Error number %d", result);
2477 goto out4;
2478 }
2479 return 0;
2480out4:
2481 destroy_workqueue(deadworkqueue);
2482out3:
2483 destroy_workqueue(pollworkqueue);
2484out2:
2485 destroy_workqueue(cmndworkqueue);
2486out1:
2487 return result;
2488}
2489
2490static void __exit vub300_exit(void)
2491{
2492 usb_deregister(&vub300_driver);
2493 flush_workqueue(cmndworkqueue);
2494 flush_workqueue(pollworkqueue);
2495 flush_workqueue(deadworkqueue);
2496 destroy_workqueue(cmndworkqueue);
2497 destroy_workqueue(pollworkqueue);
2498 destroy_workqueue(deadworkqueue);
2499}
2500
2501module_init(vub300_init);
2502module_exit(vub300_exit);
2503
2504MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>");
2505MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver");
2506MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 14c578707824..c004e474631b 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
372 372
373static int tmio_probe(struct platform_device *dev) 373static int tmio_probe(struct platform_device *dev)
374{ 374{
375 struct tmio_nand_data *data = mfd_get_data(dev); 375 struct tmio_nand_data *data = dev->dev.platform_data;
376 struct resource *fcr = platform_get_resource(dev, 376 struct resource *fcr = platform_get_resource(dev,
377 IORESOURCE_MEM, 0); 377 IORESOURCE_MEM, 0);
378 struct resource *ccr = platform_get_resource(dev, 378 struct resource *ccr = platform_get_resource(dev,
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 209fbb70619b..776a478e6296 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_ATL2) += atlx/
31obj-$(CONFIG_ATL1E) += atl1e/ 31obj-$(CONFIG_ATL1E) += atl1e/
32obj-$(CONFIG_ATL1C) += atl1c/ 32obj-$(CONFIG_ATL1C) += atl1c/
33obj-$(CONFIG_GIANFAR) += gianfar_driver.o 33obj-$(CONFIG_GIANFAR) += gianfar_driver.o
34obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
34obj-$(CONFIG_TEHUTI) += tehuti.o 35obj-$(CONFIG_TEHUTI) += tehuti.o
35obj-$(CONFIG_ENIC) += enic/ 36obj-$(CONFIG_ENIC) += enic/
36obj-$(CONFIG_JME) += jme.o 37obj-$(CONFIG_JME) += jme.o
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 9eb9b98a7ae3..de51e8453c13 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -30,9 +30,12 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/net_tstamp.h>
33#include <linux/phy.h> 34#include <linux/phy.h>
34#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36#include <linux/ptp_classify.h>
35#include <linux/slab.h> 37#include <linux/slab.h>
38#include <mach/ixp46x_ts.h>
36#include <mach/npe.h> 39#include <mach/npe.h>
37#include <mach/qmgr.h> 40#include <mach/qmgr.h>
38 41
@@ -67,6 +70,10 @@
67#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) 70#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
68#define TXDONE_QUEUE 31 71#define TXDONE_QUEUE 31
69 72
73#define PTP_SLAVE_MODE 1
74#define PTP_MASTER_MODE 2
75#define PORT2CHANNEL(p) NPE_ID(p->id)
76
70/* TX Control Registers */ 77/* TX Control Registers */
71#define TX_CNTRL0_TX_EN 0x01 78#define TX_CNTRL0_TX_EN 0x01
72#define TX_CNTRL0_HALFDUPLEX 0x02 79#define TX_CNTRL0_HALFDUPLEX 0x02
@@ -171,6 +178,8 @@ struct port {
171 int id; /* logical port ID */ 178 int id; /* logical port ID */
172 int speed, duplex; 179 int speed, duplex;
173 u8 firmware[4]; 180 u8 firmware[4];
181 int hwts_tx_en;
182 int hwts_rx_en;
174}; 183};
175 184
176/* NPE message structure */ 185/* NPE message structure */
@@ -246,6 +255,172 @@ static int ports_open;
246static struct port *npe_port_tab[MAX_NPES]; 255static struct port *npe_port_tab[MAX_NPES];
247static struct dma_pool *dma_pool; 256static struct dma_pool *dma_pool;
248 257
258static struct sock_filter ptp_filter[] = {
259 PTP_FILTER
260};
261
262static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
263{
264 u8 *data = skb->data;
265 unsigned int offset;
266 u16 *hi, *id;
267 u32 lo;
268
269 if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
270 return 0;
271
272 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
273
274 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
275 return 0;
276
277 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
278 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
279
280 memcpy(&lo, &hi[1], sizeof(lo));
281
282 return (uid_hi == ntohs(*hi) &&
283 uid_lo == ntohl(lo) &&
284 seqid == ntohs(*id));
285}
286
287static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
288{
289 struct skb_shared_hwtstamps *shhwtstamps;
290 struct ixp46x_ts_regs *regs;
291 u64 ns;
292 u32 ch, hi, lo, val;
293 u16 uid, seq;
294
295 if (!port->hwts_rx_en)
296 return;
297
298 ch = PORT2CHANNEL(port);
299
300 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
301
302 val = __raw_readl(&regs->channel[ch].ch_event);
303
304 if (!(val & RX_SNAPSHOT_LOCKED))
305 return;
306
307 lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
308 hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
309
310 uid = hi & 0xffff;
311 seq = (hi >> 16) & 0xffff;
312
313 if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
314 goto out;
315
316 lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
317 hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
318 ns = ((u64) hi) << 32;
319 ns |= lo;
320 ns <<= TICKS_NS_SHIFT;
321
322 shhwtstamps = skb_hwtstamps(skb);
323 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
324 shhwtstamps->hwtstamp = ns_to_ktime(ns);
325out:
326 __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
327}
328
329static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
330{
331 struct skb_shared_hwtstamps shhwtstamps;
332 struct ixp46x_ts_regs *regs;
333 struct skb_shared_info *shtx;
334 u64 ns;
335 u32 ch, cnt, hi, lo, val;
336
337 shtx = skb_shinfo(skb);
338 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
339 shtx->tx_flags |= SKBTX_IN_PROGRESS;
340 else
341 return;
342
343 ch = PORT2CHANNEL(port);
344
345 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
346
347 /*
348 * This really stinks, but we have to poll for the Tx time stamp.
349 * Usually, the time stamp is ready after 4 to 6 microseconds.
350 */
351 for (cnt = 0; cnt < 100; cnt++) {
352 val = __raw_readl(&regs->channel[ch].ch_event);
353 if (val & TX_SNAPSHOT_LOCKED)
354 break;
355 udelay(1);
356 }
357 if (!(val & TX_SNAPSHOT_LOCKED)) {
358 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
359 return;
360 }
361
362 lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
363 hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
364 ns = ((u64) hi) << 32;
365 ns |= lo;
366 ns <<= TICKS_NS_SHIFT;
367
368 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
369 shhwtstamps.hwtstamp = ns_to_ktime(ns);
370 skb_tstamp_tx(skb, &shhwtstamps);
371
372 __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
373}
374
375static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
376{
377 struct hwtstamp_config cfg;
378 struct ixp46x_ts_regs *regs;
379 struct port *port = netdev_priv(netdev);
380 int ch;
381
382 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
383 return -EFAULT;
384
385 if (cfg.flags) /* reserved for future extensions */
386 return -EINVAL;
387
388 ch = PORT2CHANNEL(port);
389 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
390
391 switch (cfg.tx_type) {
392 case HWTSTAMP_TX_OFF:
393 port->hwts_tx_en = 0;
394 break;
395 case HWTSTAMP_TX_ON:
396 port->hwts_tx_en = 1;
397 break;
398 default:
399 return -ERANGE;
400 }
401
402 switch (cfg.rx_filter) {
403 case HWTSTAMP_FILTER_NONE:
404 port->hwts_rx_en = 0;
405 break;
406 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
407 port->hwts_rx_en = PTP_SLAVE_MODE;
408 __raw_writel(0, &regs->channel[ch].ch_control);
409 break;
410 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
411 port->hwts_rx_en = PTP_MASTER_MODE;
412 __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
413 break;
414 default:
415 return -ERANGE;
416 }
417
418 /* Clear out any old time stamps. */
419 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
420 &regs->channel[ch].ch_event);
421
422 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
423}
249 424
250static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, 425static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
251 int write, u16 cmd) 426 int write, u16 cmd)
@@ -573,6 +748,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
573 748
574 debug_pkt(dev, "eth_poll", skb->data, skb->len); 749 debug_pkt(dev, "eth_poll", skb->data, skb->len);
575 750
751 ixp_rx_timestamp(port, skb);
576 skb->protocol = eth_type_trans(skb, dev); 752 skb->protocol = eth_type_trans(skb, dev);
577 dev->stats.rx_packets++; 753 dev->stats.rx_packets++;
578 dev->stats.rx_bytes += skb->len; 754 dev->stats.rx_bytes += skb->len;
@@ -679,14 +855,12 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
679 return NETDEV_TX_OK; 855 return NETDEV_TX_OK;
680 } 856 }
681 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 857 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
682 dev_kfree_skb(skb);
683#endif 858#endif
684 859
685 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 860 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
686 if (dma_mapping_error(&dev->dev, phys)) { 861 if (dma_mapping_error(&dev->dev, phys)) {
687#ifdef __ARMEB__
688 dev_kfree_skb(skb); 862 dev_kfree_skb(skb);
689#else 863#ifndef __ARMEB__
690 kfree(mem); 864 kfree(mem);
691#endif 865#endif
692 dev->stats.tx_dropped++; 866 dev->stats.tx_dropped++;
@@ -728,6 +902,13 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
728#if DEBUG_TX 902#if DEBUG_TX
729 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); 903 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
730#endif 904#endif
905
906 ixp_tx_timestamp(port, skb);
907 skb_tx_timestamp(skb);
908
909#ifndef __ARMEB__
910 dev_kfree_skb(skb);
911#endif
731 return NETDEV_TX_OK; 912 return NETDEV_TX_OK;
732} 913}
733 914
@@ -783,6 +964,9 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
783 if (!netif_running(dev)) 964 if (!netif_running(dev))
784 return -EINVAL; 965 return -EINVAL;
785 966
967 if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
968 return hwtstamp_ioctl(dev, req, cmd);
969
786 return phy_mii_ioctl(port->phydev, req, cmd); 970 return phy_mii_ioctl(port->phydev, req, cmd);
787} 971}
788 972
@@ -1171,6 +1355,11 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1171 char phy_id[MII_BUS_ID_SIZE + 3]; 1355 char phy_id[MII_BUS_ID_SIZE + 3];
1172 int err; 1356 int err;
1173 1357
1358 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
1359 pr_err("ixp4xx_eth: bad ptp filter\n");
1360 return -EINVAL;
1361 }
1362
1174 if (!(dev = alloc_etherdev(sizeof(struct port)))) 1363 if (!(dev = alloc_etherdev(sizeof(struct port))))
1175 return -ENOMEM; 1364 return -ENOMEM;
1176 1365
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 2463b1c97922..81654ae16c63 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1703,7 +1703,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1703{ 1703{
1704 struct be_mcc_wrb *wrb; 1704 struct be_mcc_wrb *wrb;
1705 struct be_cmd_req_rss_config *req; 1705 struct be_cmd_req_rss_config *req;
1706 u32 myhash[10]; 1706 u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
1707 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
1707 int status; 1708 int status;
1708 1709
1709 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1710 if (mutex_lock_interruptible(&adapter->mbox_lock))
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index d5bd35b7f2e1..289044332ed8 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -2675,7 +2675,7 @@ alloc_mem_err:
2675 * Min size diferent for TPA and non-TPA queues 2675 * Min size diferent for TPA and non-TPA queues
2676 */ 2676 */
2677 if (ring_size < (fp->disable_tpa ? 2677 if (ring_size < (fp->disable_tpa ?
2678 MIN_RX_SIZE_TPA : MIN_RX_SIZE_NONTPA)) { 2678 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
2679 /* release memory allocated for this queue */ 2679 /* release memory allocated for this queue */
2680 bnx2x_free_fp_mem_at(bp, index); 2680 bnx2x_free_fp_mem_at(bp, index);
2681 return -ENOMEM; 2681 return -ENOMEM;
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index a97d9be331d1..4b70311a11ef 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -2222,12 +2222,13 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
2222u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2222u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2223{ 2223{
2224 int mb_idx = BP_FW_MB_IDX(bp); 2224 int mb_idx = BP_FW_MB_IDX(bp);
2225 u32 seq = ++bp->fw_seq; 2225 u32 seq;
2226 u32 rc = 0; 2226 u32 rc = 0;
2227 u32 cnt = 1; 2227 u32 cnt = 1;
2228 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2228 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2229 2229
2230 mutex_lock(&bp->fw_mb_mutex); 2230 mutex_lock(&bp->fw_mb_mutex);
2231 seq = ++bp->fw_seq;
2231 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 2232 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2232 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 2233 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2233 2234
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 8f2d2e7c70e5..2df9276720a0 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -163,8 +163,6 @@ static int tlb_initialize(struct bonding *bond)
163 struct tlb_client_info *new_hashtbl; 163 struct tlb_client_info *new_hashtbl;
164 int i; 164 int i;
165 165
166 spin_lock_init(&(bond_info->tx_hashtbl_lock));
167
168 new_hashtbl = kzalloc(size, GFP_KERNEL); 166 new_hashtbl = kzalloc(size, GFP_KERNEL);
169 if (!new_hashtbl) { 167 if (!new_hashtbl) {
170 pr_err("%s: Error: Failed to allocate TLB hash table\n", 168 pr_err("%s: Error: Failed to allocate TLB hash table\n",
@@ -747,8 +745,6 @@ static int rlb_initialize(struct bonding *bond)
747 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); 745 int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
748 int i; 746 int i;
749 747
750 spin_lock_init(&(bond_info->rx_hashtbl_lock));
751
752 new_hashtbl = kmalloc(size, GFP_KERNEL); 748 new_hashtbl = kmalloc(size, GFP_KERNEL);
753 if (!new_hashtbl) { 749 if (!new_hashtbl) {
754 pr_err("%s: Error: Failed to allocate RLB hash table\n", 750 pr_err("%s: Error: Failed to allocate RLB hash table\n",
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6dc428461541..6141667c5fb7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -852,7 +852,7 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
852static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) 852static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
853{ 853{
854 struct bonding *bond = container_of(work, struct bonding, 854 struct bonding *bond = container_of(work, struct bonding,
855 mcast_work.work); 855 mcast_work.work);
856 bond_resend_igmp_join_requests(bond); 856 bond_resend_igmp_join_requests(bond);
857} 857}
858 858
@@ -1172,10 +1172,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1172 } 1172 }
1173 1173
1174 /* resend IGMP joins since active slave has changed or 1174 /* resend IGMP joins since active slave has changed or
1175 * all were sent on curr_active_slave */ 1175 * all were sent on curr_active_slave.
1176 if (((USES_PRIMARY(bond->params.mode) && new_active) || 1176 * resend only if bond is brought up with the affected
1177 bond->params.mode == BOND_MODE_ROUNDROBIN) && 1177 * bonding modes and the retransmission is enabled */
1178 netif_running(bond->dev)) { 1178 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1179 ((USES_PRIMARY(bond->params.mode) && new_active) ||
1180 bond->params.mode == BOND_MODE_ROUNDROBIN)) {
1179 bond->igmp_retrans = bond->params.resend_igmp; 1181 bond->igmp_retrans = bond->params.resend_igmp;
1180 queue_delayed_work(bond->wq, &bond->mcast_work, 0); 1182 queue_delayed_work(bond->wq, &bond->mcast_work, 0);
1181 } 1183 }
@@ -1542,12 +1544,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1542 bond_dev->name, slave_dev->name); 1544 bond_dev->name, slave_dev->name);
1543 } 1545 }
1544 1546
1545 /* bond must be initialized by bond_open() before enslaving */
1546 if (!(bond_dev->flags & IFF_UP)) {
1547 pr_warning("%s: master_dev is not up in bond_enslave\n",
1548 bond_dev->name);
1549 }
1550
1551 /* already enslaved */ 1547 /* already enslaved */
1552 if (slave_dev->flags & IFF_SLAVE) { 1548 if (slave_dev->flags & IFF_SLAVE) {
1553 pr_debug("Error, Device was already enslaved\n"); 1549 pr_debug("Error, Device was already enslaved\n");
@@ -4834,9 +4830,19 @@ static int bond_init(struct net_device *bond_dev)
4834{ 4830{
4835 struct bonding *bond = netdev_priv(bond_dev); 4831 struct bonding *bond = netdev_priv(bond_dev);
4836 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); 4832 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
4833 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
4837 4834
4838 pr_debug("Begin bond_init for %s\n", bond_dev->name); 4835 pr_debug("Begin bond_init for %s\n", bond_dev->name);
4839 4836
4837 /*
4838 * Initialize locks that may be required during
4839 * en/deslave operations. All of the bond_open work
4840 * (of which this is part) should really be moved to
4841 * a phase prior to dev_open
4842 */
4843 spin_lock_init(&(bond_info->tx_hashtbl_lock));
4844 spin_lock_init(&(bond_info->rx_hashtbl_lock));
4845
4840 bond->wq = create_singlethread_workqueue(bond_dev->name); 4846 bond->wq = create_singlethread_workqueue(bond_dev->name);
4841 if (!bond->wq) 4847 if (!bond->wq)
4842 return -ENOMEM; 4848 return -ENOMEM;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 4059bfc73dbf..88fcb25e554a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -227,12 +227,6 @@ static ssize_t bonding_store_slaves(struct device *d,
227 struct net_device *dev; 227 struct net_device *dev;
228 struct bonding *bond = to_bond(d); 228 struct bonding *bond = to_bond(d);
229 229
230 /* Quick sanity check -- is the bond interface up? */
231 if (!(bond->dev->flags & IFF_UP)) {
232 pr_warning("%s: doing slave updates when interface is down.\n",
233 bond->dev->name);
234 }
235
236 if (!rtnl_trylock()) 230 if (!rtnl_trylock())
237 return restart_syscall(); 231 return restart_syscall();
238 232
@@ -1539,8 +1533,8 @@ static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
1539 * Show and set the number of IGMP membership reports to send on link failure 1533 * Show and set the number of IGMP membership reports to send on link failure
1540 */ 1534 */
1541static ssize_t bonding_show_resend_igmp(struct device *d, 1535static ssize_t bonding_show_resend_igmp(struct device *d,
1542 struct device_attribute *attr, 1536 struct device_attribute *attr,
1543 char *buf) 1537 char *buf)
1544{ 1538{
1545 struct bonding *bond = to_bond(d); 1539 struct bonding *bond = to_bond(d);
1546 1540
@@ -1548,8 +1542,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
1548} 1542}
1549 1543
1550static ssize_t bonding_store_resend_igmp(struct device *d, 1544static ssize_t bonding_store_resend_igmp(struct device *d,
1551 struct device_attribute *attr, 1545 struct device_attribute *attr,
1552 const char *buf, size_t count) 1546 const char *buf, size_t count)
1553{ 1547{
1554 int new_value, ret = count; 1548 int new_value, ret = count;
1555 struct bonding *bond = to_bond(d); 1549 struct bonding *bond = to_bond(d);
@@ -1561,7 +1555,7 @@ static ssize_t bonding_store_resend_igmp(struct device *d,
1561 goto out; 1555 goto out;
1562 } 1556 }
1563 1557
1564 if (new_value < 0) { 1558 if (new_value < 0 || new_value > 255) {
1565 pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n", 1559 pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
1566 bond->dev->name, new_value); 1560 bond->dev->name, new_value);
1567 ret = -EINVAL; 1561 ret = -EINVAL;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 587fba48cdd9..f1942cab35f6 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -15,7 +15,6 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/mfd/core.h>
19 18
20#include <linux/netdevice.h> 19#include <linux/netdevice.h>
21#include <linux/can.h> 20#include <linux/can.h>
@@ -1644,7 +1643,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
1644 struct device *dev; 1643 struct device *dev;
1645 int ret; 1644 int ret;
1646 1645
1647 pdata = mfd_get_data(pdev); 1646 pdata = pdev->dev.platform_data;
1648 if (!pdata) 1647 if (!pdata)
1649 return -ENXIO; 1648 return -ENXIO;
1650 1649
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 6a0a8fca62bc..3fd5a2400348 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2083,7 +2083,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
2083 struct netdev_hw_addr *ha; 2083 struct netdev_hw_addr *ha;
2084 int ret; 2084 int ret;
2085 2085
2086 if (dev->flags & IFF_PROMISC) { 2086 if (port->promisc) {
2087 ehea_promiscuous(dev, 1); 2087 ehea_promiscuous(dev, 1);
2088 return; 2088 return;
2089 } 2089 }
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c
new file mode 100644
index 000000000000..d8e175382d1d
--- /dev/null
+++ b/drivers/net/gianfar_ptp.c
@@ -0,0 +1,588 @@
1/*
2 * PTP 1588 clock using the eTSEC
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/device.h>
21#include <linux/hrtimer.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/of_platform.h>
28#include <linux/timex.h>
29#include <linux/io.h>
30
31#include <linux/ptp_clock_kernel.h>
32
33#include "gianfar.h"
34
35/*
36 * gianfar ptp registers
37 * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
38 */
39struct gianfar_ptp_registers {
40 u32 tmr_ctrl; /* Timer control register */
41 u32 tmr_tevent; /* Timestamp event register */
42 u32 tmr_temask; /* Timer event mask register */
43 u32 tmr_pevent; /* Timestamp event register */
44 u32 tmr_pemask; /* Timer event mask register */
45 u32 tmr_stat; /* Timestamp status register */
46 u32 tmr_cnt_h; /* Timer counter high register */
47 u32 tmr_cnt_l; /* Timer counter low register */
48 u32 tmr_add; /* Timer drift compensation addend register */
49 u32 tmr_acc; /* Timer accumulator register */
50 u32 tmr_prsc; /* Timer prescale */
51 u8 res1[4];
52 u32 tmroff_h; /* Timer offset high */
53 u32 tmroff_l; /* Timer offset low */
54 u8 res2[8];
55 u32 tmr_alarm1_h; /* Timer alarm 1 high register */
56 u32 tmr_alarm1_l; /* Timer alarm 1 high register */
57 u32 tmr_alarm2_h; /* Timer alarm 2 high register */
58 u32 tmr_alarm2_l; /* Timer alarm 2 high register */
59 u8 res3[48];
60 u32 tmr_fiper1; /* Timer fixed period interval */
61 u32 tmr_fiper2; /* Timer fixed period interval */
62 u32 tmr_fiper3; /* Timer fixed period interval */
63 u8 res4[20];
64 u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
65 u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
66 u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
67 u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
68};
69
70/* Bit definitions for the TMR_CTRL register */
71#define ALM1P (1<<31) /* Alarm1 output polarity */
72#define ALM2P (1<<30) /* Alarm2 output polarity */
73#define FS (1<<28) /* FIPER start indication */
74#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
75#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
76#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
77#define TCLK_PERIOD_MASK (0x3ff)
78#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
79#define FRD (1<<14) /* FIPER Realignment Disable */
80#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
81#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
82#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
83#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
84#define COPH (1<<7) /* Generated clock output phase. */
85#define CIPH (1<<6) /* External oscillator input clock phase */
86#define TMSR (1<<5) /* Timer soft reset. */
87#define BYP (1<<3) /* Bypass drift compensated clock */
88#define TE (1<<2) /* 1588 timer enable. */
89#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
90#define CKSEL_MASK (0x3)
91
92/* Bit definitions for the TMR_TEVENT register */
93#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
94#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
95#define ALM2 (1<<17) /* Current time = alarm time register 2 */
96#define ALM1 (1<<16) /* Current time = alarm time register 1 */
97#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
98#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
99#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
100
101/* Bit definitions for the TMR_TEMASK register */
102#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
103#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
104#define ALM2EN (1<<17) /* Timer ALM2 event enable */
105#define ALM1EN (1<<16) /* Timer ALM1 event enable */
106#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
107#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
108
109/* Bit definitions for the TMR_PEVENT register */
110#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
111#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
112#define RXP (1<<0) /* PTP frame has been received */
113
114/* Bit definitions for the TMR_PEMASK register */
115#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
116#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
117#define RXPEN (1<<0) /* Receive PTP packet event enable */
118
119/* Bit definitions for the TMR_STAT register */
120#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
121#define STAT_VEC_MASK (0x3f)
122
123/* Bit definitions for the TMR_PRSC register */
124#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
125#define PRSC_OCK_MASK (0xffff)
126
127
128#define DRIVER "gianfar_ptp"
129#define DEFAULT_CKSEL 1
130#define N_ALARM 1 /* first alarm is used internally to reset fipers */
131#define N_EXT_TS 2
132#define REG_SIZE sizeof(struct gianfar_ptp_registers)
133
134struct etsects {
135 struct gianfar_ptp_registers *regs;
136 spinlock_t lock; /* protects regs */
137 struct ptp_clock *clock;
138 struct ptp_clock_info caps;
139 struct resource *rsrc;
140 int irq;
141 u64 alarm_interval; /* for periodic alarm */
142 u64 alarm_value;
143 u32 tclk_period; /* nanoseconds */
144 u32 tmr_prsc;
145 u32 tmr_add;
146 u32 cksel;
147 u32 tmr_fiper1;
148 u32 tmr_fiper2;
149};
150
151/*
152 * Register access functions
153 */
154
155/* Caller must hold etsects->lock. */
156static u64 tmr_cnt_read(struct etsects *etsects)
157{
158 u64 ns;
159 u32 lo, hi;
160
161 lo = gfar_read(&etsects->regs->tmr_cnt_l);
162 hi = gfar_read(&etsects->regs->tmr_cnt_h);
163 ns = ((u64) hi) << 32;
164 ns |= lo;
165 return ns;
166}
167
168/* Caller must hold etsects->lock. */
169static void tmr_cnt_write(struct etsects *etsects, u64 ns)
170{
171 u32 hi = ns >> 32;
172 u32 lo = ns & 0xffffffff;
173
174 gfar_write(&etsects->regs->tmr_cnt_l, lo);
175 gfar_write(&etsects->regs->tmr_cnt_h, hi);
176}
177
178/* Caller must hold etsects->lock. */
179static void set_alarm(struct etsects *etsects)
180{
181 u64 ns;
182 u32 lo, hi;
183
184 ns = tmr_cnt_read(etsects) + 1500000000ULL;
185 ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
186 ns -= etsects->tclk_period;
187 hi = ns >> 32;
188 lo = ns & 0xffffffff;
189 gfar_write(&etsects->regs->tmr_alarm1_l, lo);
190 gfar_write(&etsects->regs->tmr_alarm1_h, hi);
191}
192
193/* Caller must hold etsects->lock. */
194static void set_fipers(struct etsects *etsects)
195{
196 u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl);
197
198 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE));
199 gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
200 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
201 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
202 set_alarm(etsects);
203 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE);
204}
205
206/*
207 * Interrupt service routine
208 */
209
210static irqreturn_t isr(int irq, void *priv)
211{
212 struct etsects *etsects = priv;
213 struct ptp_clock_event event;
214 u64 ns;
215 u32 ack = 0, lo, hi, mask, val;
216
217 val = gfar_read(&etsects->regs->tmr_tevent);
218
219 if (val & ETS1) {
220 ack |= ETS1;
221 hi = gfar_read(&etsects->regs->tmr_etts1_h);
222 lo = gfar_read(&etsects->regs->tmr_etts1_l);
223 event.type = PTP_CLOCK_EXTTS;
224 event.index = 0;
225 event.timestamp = ((u64) hi) << 32;
226 event.timestamp |= lo;
227 ptp_clock_event(etsects->clock, &event);
228 }
229
230 if (val & ETS2) {
231 ack |= ETS2;
232 hi = gfar_read(&etsects->regs->tmr_etts2_h);
233 lo = gfar_read(&etsects->regs->tmr_etts2_l);
234 event.type = PTP_CLOCK_EXTTS;
235 event.index = 1;
236 event.timestamp = ((u64) hi) << 32;
237 event.timestamp |= lo;
238 ptp_clock_event(etsects->clock, &event);
239 }
240
241 if (val & ALM2) {
242 ack |= ALM2;
243 if (etsects->alarm_value) {
244 event.type = PTP_CLOCK_ALARM;
245 event.index = 0;
246 event.timestamp = etsects->alarm_value;
247 ptp_clock_event(etsects->clock, &event);
248 }
249 if (etsects->alarm_interval) {
250 ns = etsects->alarm_value + etsects->alarm_interval;
251 hi = ns >> 32;
252 lo = ns & 0xffffffff;
253 spin_lock(&etsects->lock);
254 gfar_write(&etsects->regs->tmr_alarm2_l, lo);
255 gfar_write(&etsects->regs->tmr_alarm2_h, hi);
256 spin_unlock(&etsects->lock);
257 etsects->alarm_value = ns;
258 } else {
259 gfar_write(&etsects->regs->tmr_tevent, ALM2);
260 spin_lock(&etsects->lock);
261 mask = gfar_read(&etsects->regs->tmr_temask);
262 mask &= ~ALM2EN;
263 gfar_write(&etsects->regs->tmr_temask, mask);
264 spin_unlock(&etsects->lock);
265 etsects->alarm_value = 0;
266 etsects->alarm_interval = 0;
267 }
268 }
269
270 if (val & PP1) {
271 ack |= PP1;
272 event.type = PTP_CLOCK_PPS;
273 ptp_clock_event(etsects->clock, &event);
274 }
275
276 if (ack) {
277 gfar_write(&etsects->regs->tmr_tevent, ack);
278 return IRQ_HANDLED;
279 } else
280 return IRQ_NONE;
281}
282
283/*
284 * PTP clock operations
285 */
286
287static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
288{
289 u64 adj;
290 u32 diff, tmr_add;
291 int neg_adj = 0;
292 struct etsects *etsects = container_of(ptp, struct etsects, caps);
293
294 if (ppb < 0) {
295 neg_adj = 1;
296 ppb = -ppb;
297 }
298 tmr_add = etsects->tmr_add;
299 adj = tmr_add;
300 adj *= ppb;
301 diff = div_u64(adj, 1000000000ULL);
302
303 tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
304
305 gfar_write(&etsects->regs->tmr_add, tmr_add);
306
307 return 0;
308}
309
310static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
311{
312 s64 now;
313 unsigned long flags;
314 struct etsects *etsects = container_of(ptp, struct etsects, caps);
315
316 spin_lock_irqsave(&etsects->lock, flags);
317
318 now = tmr_cnt_read(etsects);
319 now += delta;
320 tmr_cnt_write(etsects, now);
321
322 spin_unlock_irqrestore(&etsects->lock, flags);
323
324 set_fipers(etsects);
325
326 return 0;
327}
328
329static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
330{
331 u64 ns;
332 u32 remainder;
333 unsigned long flags;
334 struct etsects *etsects = container_of(ptp, struct etsects, caps);
335
336 spin_lock_irqsave(&etsects->lock, flags);
337
338 ns = tmr_cnt_read(etsects);
339
340 spin_unlock_irqrestore(&etsects->lock, flags);
341
342 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
343 ts->tv_nsec = remainder;
344 return 0;
345}
346
347static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
348 const struct timespec *ts)
349{
350 u64 ns;
351 unsigned long flags;
352 struct etsects *etsects = container_of(ptp, struct etsects, caps);
353
354 ns = ts->tv_sec * 1000000000ULL;
355 ns += ts->tv_nsec;
356
357 spin_lock_irqsave(&etsects->lock, flags);
358
359 tmr_cnt_write(etsects, ns);
360 set_fipers(etsects);
361
362 spin_unlock_irqrestore(&etsects->lock, flags);
363
364 return 0;
365}
366
367static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
368 struct ptp_clock_request *rq, int on)
369{
370 struct etsects *etsects = container_of(ptp, struct etsects, caps);
371 unsigned long flags;
372 u32 bit, mask;
373
374 switch (rq->type) {
375 case PTP_CLK_REQ_EXTTS:
376 switch (rq->extts.index) {
377 case 0:
378 bit = ETS1EN;
379 break;
380 case 1:
381 bit = ETS2EN;
382 break;
383 default:
384 return -EINVAL;
385 }
386 spin_lock_irqsave(&etsects->lock, flags);
387 mask = gfar_read(&etsects->regs->tmr_temask);
388 if (on)
389 mask |= bit;
390 else
391 mask &= ~bit;
392 gfar_write(&etsects->regs->tmr_temask, mask);
393 spin_unlock_irqrestore(&etsects->lock, flags);
394 return 0;
395
396 case PTP_CLK_REQ_PPS:
397 spin_lock_irqsave(&etsects->lock, flags);
398 mask = gfar_read(&etsects->regs->tmr_temask);
399 if (on)
400 mask |= PP1EN;
401 else
402 mask &= ~PP1EN;
403 gfar_write(&etsects->regs->tmr_temask, mask);
404 spin_unlock_irqrestore(&etsects->lock, flags);
405 return 0;
406
407 default:
408 break;
409 }
410
411 return -EOPNOTSUPP;
412}
413
414static struct ptp_clock_info ptp_gianfar_caps = {
415 .owner = THIS_MODULE,
416 .name = "gianfar clock",
417 .max_adj = 512000,
418 .n_alarm = N_ALARM,
419 .n_ext_ts = N_EXT_TS,
420 .n_per_out = 0,
421 .pps = 1,
422 .adjfreq = ptp_gianfar_adjfreq,
423 .adjtime = ptp_gianfar_adjtime,
424 .gettime = ptp_gianfar_gettime,
425 .settime = ptp_gianfar_settime,
426 .enable = ptp_gianfar_enable,
427};
428
429/* OF device tree */
430
431static int get_of_u32(struct device_node *node, char *str, u32 *val)
432{
433 int plen;
434 const u32 *prop = of_get_property(node, str, &plen);
435
436 if (!prop || plen != sizeof(*prop))
437 return -1;
438 *val = *prop;
439 return 0;
440}
441
442static int gianfar_ptp_probe(struct platform_device *dev)
443{
444 struct device_node *node = dev->dev.of_node;
445 struct etsects *etsects;
446 struct timespec now;
447 int err = -ENOMEM;
448 u32 tmr_ctrl;
449 unsigned long flags;
450
451 etsects = kzalloc(sizeof(*etsects), GFP_KERNEL);
452 if (!etsects)
453 goto no_memory;
454
455 err = -ENODEV;
456
457 etsects->caps = ptp_gianfar_caps;
458 etsects->cksel = DEFAULT_CKSEL;
459
460 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
461 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
462 get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
463 get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
464 get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
465 get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
466 pr_err("device tree node missing required elements\n");
467 goto no_node;
468 }
469
470 etsects->irq = platform_get_irq(dev, 0);
471
472 if (etsects->irq == NO_IRQ) {
473 pr_err("irq not in device tree\n");
474 goto no_node;
475 }
476 if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) {
477 pr_err("request_irq failed\n");
478 goto no_node;
479 }
480
481 etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
482 if (!etsects->rsrc) {
483 pr_err("no resource\n");
484 goto no_resource;
485 }
486 if (request_resource(&ioport_resource, etsects->rsrc)) {
487 pr_err("resource busy\n");
488 goto no_resource;
489 }
490
491 spin_lock_init(&etsects->lock);
492
493 etsects->regs = ioremap(etsects->rsrc->start,
494 1 + etsects->rsrc->end - etsects->rsrc->start);
495 if (!etsects->regs) {
496 pr_err("ioremap ptp registers failed\n");
497 goto no_ioremap;
498 }
499 getnstimeofday(&now);
500 ptp_gianfar_settime(&etsects->caps, &now);
501
502 tmr_ctrl =
503 (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
504 (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT;
505
506 spin_lock_irqsave(&etsects->lock, flags);
507
508 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl);
509 gfar_write(&etsects->regs->tmr_add, etsects->tmr_add);
510 gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
511 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
512 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
513 set_alarm(etsects);
514 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE);
515
516 spin_unlock_irqrestore(&etsects->lock, flags);
517
518 etsects->clock = ptp_clock_register(&etsects->caps);
519 if (IS_ERR(etsects->clock)) {
520 err = PTR_ERR(etsects->clock);
521 goto no_clock;
522 }
523
524 dev_set_drvdata(&dev->dev, etsects);
525
526 return 0;
527
528no_clock:
529no_ioremap:
530 release_resource(etsects->rsrc);
531no_resource:
532 free_irq(etsects->irq, etsects);
533no_node:
534 kfree(etsects);
535no_memory:
536 return err;
537}
538
539static int gianfar_ptp_remove(struct platform_device *dev)
540{
541 struct etsects *etsects = dev_get_drvdata(&dev->dev);
542
543 gfar_write(&etsects->regs->tmr_temask, 0);
544 gfar_write(&etsects->regs->tmr_ctrl, 0);
545
546 ptp_clock_unregister(etsects->clock);
547 iounmap(etsects->regs);
548 release_resource(etsects->rsrc);
549 free_irq(etsects->irq, etsects);
550 kfree(etsects);
551
552 return 0;
553}
554
555static struct of_device_id match_table[] = {
556 { .compatible = "fsl,etsec-ptp" },
557 {},
558};
559
560static struct platform_driver gianfar_ptp_driver = {
561 .driver = {
562 .name = "gianfar_ptp",
563 .of_match_table = match_table,
564 .owner = THIS_MODULE,
565 },
566 .probe = gianfar_ptp_probe,
567 .remove = gianfar_ptp_remove,
568};
569
570/* module operations */
571
572static int __init ptp_gianfar_init(void)
573{
574 return platform_driver_register(&gianfar_ptp_driver);
575}
576
577module_init(ptp_gianfar_init);
578
579static void __exit ptp_gianfar_exit(void)
580{
581 platform_driver_unregister(&gianfar_ptp_driver);
582}
583
584module_exit(ptp_gianfar_exit);
585
586MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
587MODULE_DESCRIPTION("PTP clock using the eTSEC");
588MODULE_LICENSE("GPL");
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index a3c0dc9d8b98..9537aaa50c2f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -69,7 +69,7 @@ static const char paranoia_str[] = KERN_ERR
69 69
70static const char bc_drvname[] = "baycom_epp"; 70static const char bc_drvname[] = "baycom_epp";
71static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n" 71static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n"
72"baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n"; 72"baycom_epp: version 0.7\n";
73 73
74/* --------------------------------------------------------------------- */ 74/* --------------------------------------------------------------------- */
75 75
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 5f5af9a606f8..279d2296290a 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -102,7 +102,7 @@
102 102
103static const char bc_drvname[] = "baycom_par"; 103static const char bc_drvname[] = "baycom_par";
104static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 104static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
105"baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n"; 105"baycom_par: version 0.9\n";
106 106
107/* --------------------------------------------------------------------- */ 107/* --------------------------------------------------------------------- */
108 108
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 3e25f10cabd6..99cdce33df8b 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -92,7 +92,7 @@
92 92
93static const char bc_drvname[] = "baycom_ser_fdx"; 93static const char bc_drvname[] = "baycom_ser_fdx";
94static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 94static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
95"baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; 95"baycom_ser_fdx: version 0.10\n";
96 96
97/* --------------------------------------------------------------------- */ 97/* --------------------------------------------------------------------- */
98 98
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 1686f6dcbbce..d92fe6ca788f 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -80,7 +80,7 @@
80 80
81static const char bc_drvname[] = "baycom_ser_hdx"; 81static const char bc_drvname[] = "baycom_ser_hdx";
82static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" 82static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
83"baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; 83"baycom_ser_hdx: version 0.10\n";
84 84
85/* --------------------------------------------------------------------- */ 85/* --------------------------------------------------------------------- */
86 86
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 5b37579e84b7..a4a3516b6bbf 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -749,7 +749,7 @@ EXPORT_SYMBOL(hdlcdrv_unregister);
749static int __init hdlcdrv_init_driver(void) 749static int __init hdlcdrv_init_driver(void)
750{ 750{
751 printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n"); 751 printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n");
752 printk(KERN_INFO "hdlcdrv: version 0.8 compiled " __TIME__ " " __DATE__ "\n"); 752 printk(KERN_INFO "hdlcdrv: version 0.8\n");
753 return 0; 753 return 0;
754} 754}
755 755
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 96c95617195f..32f07f868d89 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -915,7 +915,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
915 915
916 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 916 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
917 if (!skb) { 917 if (!skb) {
918 show_free_areas(); 918 show_free_areas(0);
919 continue; 919 continue;
920 } 920 }
921 921
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index f940dfa1f7f8..9d4ce1aba10c 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -67,27 +67,27 @@ static void bfin_sir_stop_tx(struct bfin_sir_port *port)
67 disable_dma(port->tx_dma_channel); 67 disable_dma(port->tx_dma_channel);
68#endif 68#endif
69 69
70 while (!(SIR_UART_GET_LSR(port) & THRE)) { 70 while (!(UART_GET_LSR(port) & THRE)) {
71 cpu_relax(); 71 cpu_relax();
72 continue; 72 continue;
73 } 73 }
74 74
75 SIR_UART_STOP_TX(port); 75 UART_CLEAR_IER(port, ETBEI);
76} 76}
77 77
78static void bfin_sir_enable_tx(struct bfin_sir_port *port) 78static void bfin_sir_enable_tx(struct bfin_sir_port *port)
79{ 79{
80 SIR_UART_ENABLE_TX(port); 80 UART_SET_IER(port, ETBEI);
81} 81}
82 82
83static void bfin_sir_stop_rx(struct bfin_sir_port *port) 83static void bfin_sir_stop_rx(struct bfin_sir_port *port)
84{ 84{
85 SIR_UART_STOP_RX(port); 85 UART_CLEAR_IER(port, ERBFI);
86} 86}
87 87
88static void bfin_sir_enable_rx(struct bfin_sir_port *port) 88static void bfin_sir_enable_rx(struct bfin_sir_port *port)
89{ 89{
90 SIR_UART_ENABLE_RX(port); 90 UART_SET_IER(port, ERBFI);
91} 91}
92 92
93static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) 93static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
@@ -116,7 +116,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
116 116
117 do { 117 do {
118 udelay(utime); 118 udelay(utime);
119 lsr = SIR_UART_GET_LSR(port); 119 lsr = UART_GET_LSR(port);
120 } while (!(lsr & TEMT) && count--); 120 } while (!(lsr & TEMT) && count--);
121 121
122 /* The useconds for 1 bits to transmit */ 122 /* The useconds for 1 bits to transmit */
@@ -125,27 +125,27 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
125 /* Clear UCEN bit to reset the UART state machine 125 /* Clear UCEN bit to reset the UART state machine
126 * and control registers 126 * and control registers
127 */ 127 */
128 val = SIR_UART_GET_GCTL(port); 128 val = UART_GET_GCTL(port);
129 val &= ~UCEN; 129 val &= ~UCEN;
130 SIR_UART_PUT_GCTL(port, val); 130 UART_PUT_GCTL(port, val);
131 131
132 /* Set DLAB in LCR to Access THR RBR IER */ 132 /* Set DLAB in LCR to Access THR RBR IER */
133 SIR_UART_SET_DLAB(port); 133 UART_SET_DLAB(port);
134 SSYNC(); 134 SSYNC();
135 135
136 SIR_UART_PUT_DLL(port, quot & 0xFF); 136 UART_PUT_DLL(port, quot & 0xFF);
137 SIR_UART_PUT_DLH(port, (quot >> 8) & 0xFF); 137 UART_PUT_DLH(port, (quot >> 8) & 0xFF);
138 SSYNC(); 138 SSYNC();
139 139
140 /* Clear DLAB in LCR */ 140 /* Clear DLAB in LCR */
141 SIR_UART_CLEAR_DLAB(port); 141 UART_CLEAR_DLAB(port);
142 SSYNC(); 142 SSYNC();
143 143
144 SIR_UART_PUT_LCR(port, lcr); 144 UART_PUT_LCR(port, lcr);
145 145
146 val = SIR_UART_GET_GCTL(port); 146 val = UART_GET_GCTL(port);
147 val |= UCEN; 147 val |= UCEN;
148 SIR_UART_PUT_GCTL(port, val); 148 UART_PUT_GCTL(port, val);
149 149
150 ret = 0; 150 ret = 0;
151 break; 151 break;
@@ -154,12 +154,12 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
154 break; 154 break;
155 } 155 }
156 156
157 val = SIR_UART_GET_GCTL(port); 157 val = UART_GET_GCTL(port);
158 /* If not add the 'RPOLC', we can't catch the receive interrupt. 158 /* If not add the 'RPOLC', we can't catch the receive interrupt.
159 * It's related with the HW layout and the IR transiver. 159 * It's related with the HW layout and the IR transiver.
160 */ 160 */
161 val |= IREN | RPOLC; 161 val |= IREN | RPOLC;
162 SIR_UART_PUT_GCTL(port, val); 162 UART_PUT_GCTL(port, val);
163 return ret; 163 return ret;
164} 164}
165 165
@@ -168,7 +168,7 @@ static int bfin_sir_is_receiving(struct net_device *dev)
168 struct bfin_sir_self *self = netdev_priv(dev); 168 struct bfin_sir_self *self = netdev_priv(dev);
169 struct bfin_sir_port *port = self->sir_port; 169 struct bfin_sir_port *port = self->sir_port;
170 170
171 if (!(SIR_UART_GET_IER(port) & ERBFI)) 171 if (!(UART_GET_IER(port) & ERBFI))
172 return 0; 172 return 0;
173 return self->rx_buff.state != OUTSIDE_FRAME; 173 return self->rx_buff.state != OUTSIDE_FRAME;
174} 174}
@@ -182,7 +182,7 @@ static void bfin_sir_tx_chars(struct net_device *dev)
182 182
183 if (self->tx_buff.len != 0) { 183 if (self->tx_buff.len != 0) {
184 chr = *(self->tx_buff.data); 184 chr = *(self->tx_buff.data);
185 SIR_UART_PUT_CHAR(port, chr); 185 UART_PUT_CHAR(port, chr);
186 self->tx_buff.data++; 186 self->tx_buff.data++;
187 self->tx_buff.len--; 187 self->tx_buff.len--;
188 } else { 188 } else {
@@ -206,8 +206,8 @@ static void bfin_sir_rx_chars(struct net_device *dev)
206 struct bfin_sir_port *port = self->sir_port; 206 struct bfin_sir_port *port = self->sir_port;
207 unsigned char ch; 207 unsigned char ch;
208 208
209 SIR_UART_CLEAR_LSR(port); 209 UART_CLEAR_LSR(port);
210 ch = SIR_UART_GET_CHAR(port); 210 ch = UART_GET_CHAR(port);
211 async_unwrap_char(dev, &self->stats, &self->rx_buff, ch); 211 async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
212 dev->last_rx = jiffies; 212 dev->last_rx = jiffies;
213} 213}
@@ -219,7 +219,7 @@ static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
219 struct bfin_sir_port *port = self->sir_port; 219 struct bfin_sir_port *port = self->sir_port;
220 220
221 spin_lock(&self->lock); 221 spin_lock(&self->lock);
222 while ((SIR_UART_GET_LSR(port) & DR)) 222 while ((UART_GET_LSR(port) & DR))
223 bfin_sir_rx_chars(dev); 223 bfin_sir_rx_chars(dev);
224 spin_unlock(&self->lock); 224 spin_unlock(&self->lock);
225 225
@@ -233,7 +233,7 @@ static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
233 struct bfin_sir_port *port = self->sir_port; 233 struct bfin_sir_port *port = self->sir_port;
234 234
235 spin_lock(&self->lock); 235 spin_lock(&self->lock);
236 if (SIR_UART_GET_LSR(port) & THRE) 236 if (UART_GET_LSR(port) & THRE)
237 bfin_sir_tx_chars(dev); 237 bfin_sir_tx_chars(dev);
238 spin_unlock(&self->lock); 238 spin_unlock(&self->lock);
239 239
@@ -312,7 +312,7 @@ static void bfin_sir_dma_rx_chars(struct net_device *dev)
312 struct bfin_sir_port *port = self->sir_port; 312 struct bfin_sir_port *port = self->sir_port;
313 int i; 313 int i;
314 314
315 SIR_UART_CLEAR_LSR(port); 315 UART_CLEAR_LSR(port);
316 316
317 for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++) 317 for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
318 async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]); 318 async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
@@ -430,11 +430,10 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev
430 unsigned short val; 430 unsigned short val;
431 431
432 bfin_sir_stop_rx(port); 432 bfin_sir_stop_rx(port);
433 SIR_UART_DISABLE_INTS(port);
434 433
435 val = SIR_UART_GET_GCTL(port); 434 val = UART_GET_GCTL(port);
436 val &= ~(UCEN | IREN | RPOLC); 435 val &= ~(UCEN | IREN | RPOLC);
437 SIR_UART_PUT_GCTL(port, val); 436 UART_PUT_GCTL(port, val);
438 437
439#ifdef CONFIG_SIR_BFIN_DMA 438#ifdef CONFIG_SIR_BFIN_DMA
440 disable_dma(port->tx_dma_channel); 439 disable_dma(port->tx_dma_channel);
@@ -518,12 +517,12 @@ static void bfin_sir_send_work(struct work_struct *work)
518 * sending data. We also can set the speed, which will 517 * sending data. We also can set the speed, which will
519 * reset all the UART. 518 * reset all the UART.
520 */ 519 */
521 val = SIR_UART_GET_GCTL(port); 520 val = UART_GET_GCTL(port);
522 val &= ~(IREN | RPOLC); 521 val &= ~(IREN | RPOLC);
523 SIR_UART_PUT_GCTL(port, val); 522 UART_PUT_GCTL(port, val);
524 SSYNC(); 523 SSYNC();
525 val |= IREN | RPOLC; 524 val |= IREN | RPOLC;
526 SIR_UART_PUT_GCTL(port, val); 525 UART_PUT_GCTL(port, val);
527 SSYNC(); 526 SSYNC();
528 /* bfin_sir_set_speed(port, self->speed); */ 527 /* bfin_sir_set_speed(port, self->speed); */
529 528
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index e3b285a67734..29cbde8501ed 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -26,7 +26,6 @@
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/dma.h> 27#include <asm/dma.h>
28#include <asm/portmux.h> 28#include <asm/portmux.h>
29#include <mach/bfin_serial_5xx.h>
30#undef DRIVER_NAME 29#undef DRIVER_NAME
31 30
32#ifdef CONFIG_SIR_BFIN_DMA 31#ifdef CONFIG_SIR_BFIN_DMA
@@ -83,64 +82,10 @@ struct bfin_sir_self {
83 82
84#define DRIVER_NAME "bfin_sir" 83#define DRIVER_NAME "bfin_sir"
85 84
86#define SIR_UART_GET_CHAR(port) bfin_read16((port)->membase + OFFSET_RBR) 85#define port_membase(port) (((struct bfin_sir_port *)(port))->membase)
87#define SIR_UART_GET_DLL(port) bfin_read16((port)->membase + OFFSET_DLL) 86#define get_lsr_cache(port) (((struct bfin_sir_port *)(port))->lsr)
88#define SIR_UART_GET_DLH(port) bfin_read16((port)->membase + OFFSET_DLH) 87#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v))
89#define SIR_UART_GET_LCR(port) bfin_read16((port)->membase + OFFSET_LCR) 88#include <asm/bfin_serial.h>
90#define SIR_UART_GET_GCTL(port) bfin_read16((port)->membase + OFFSET_GCTL)
91
92#define SIR_UART_PUT_CHAR(port, v) bfin_write16(((port)->membase + OFFSET_THR), v)
93#define SIR_UART_PUT_DLL(port, v) bfin_write16(((port)->membase + OFFSET_DLL), v)
94#define SIR_UART_PUT_DLH(port, v) bfin_write16(((port)->membase + OFFSET_DLH), v)
95#define SIR_UART_PUT_LCR(port, v) bfin_write16(((port)->membase + OFFSET_LCR), v)
96#define SIR_UART_PUT_GCTL(port, v) bfin_write16(((port)->membase + OFFSET_GCTL), v)
97
98#ifdef CONFIG_BF54x
99#define SIR_UART_GET_LSR(port) bfin_read16((port)->membase + OFFSET_LSR)
100#define SIR_UART_GET_IER(port) bfin_read16((port)->membase + OFFSET_IER_SET)
101#define SIR_UART_SET_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER_SET), v)
102#define SIR_UART_CLEAR_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER_CLEAR), v)
103#define SIR_UART_PUT_LSR(port, v) bfin_write16(((port)->membase + OFFSET_LSR), v)
104#define SIR_UART_CLEAR_LSR(port) bfin_write16(((port)->membase + OFFSET_LSR), -1)
105
106#define SIR_UART_SET_DLAB(port)
107#define SIR_UART_CLEAR_DLAB(port)
108
109#define SIR_UART_ENABLE_INTS(port, v) SIR_UART_SET_IER(port, v)
110#define SIR_UART_DISABLE_INTS(port) SIR_UART_CLEAR_IER(port, 0xF)
111#define SIR_UART_STOP_TX(port) do { SIR_UART_PUT_LSR(port, TFI); SIR_UART_CLEAR_IER(port, ETBEI); } while (0)
112#define SIR_UART_ENABLE_TX(port) do { SIR_UART_SET_IER(port, ETBEI); } while (0)
113#define SIR_UART_STOP_RX(port) do { SIR_UART_CLEAR_IER(port, ERBFI); } while (0)
114#define SIR_UART_ENABLE_RX(port) do { SIR_UART_SET_IER(port, ERBFI); } while (0)
115#else
116
117#define SIR_UART_GET_IIR(port) bfin_read16((port)->membase + OFFSET_IIR)
118#define SIR_UART_GET_IER(port) bfin_read16((port)->membase + OFFSET_IER)
119#define SIR_UART_PUT_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER), v)
120
121#define SIR_UART_SET_DLAB(port) do { SIR_UART_PUT_LCR(port, SIR_UART_GET_LCR(port) | DLAB); } while (0)
122#define SIR_UART_CLEAR_DLAB(port) do { SIR_UART_PUT_LCR(port, SIR_UART_GET_LCR(port) & ~DLAB); } while (0)
123
124#define SIR_UART_ENABLE_INTS(port, v) SIR_UART_PUT_IER(port, v)
125#define SIR_UART_DISABLE_INTS(port) SIR_UART_PUT_IER(port, 0)
126#define SIR_UART_STOP_TX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) & ~ETBEI); } while (0)
127#define SIR_UART_ENABLE_TX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) | ETBEI); } while (0)
128#define SIR_UART_STOP_RX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) & ~ERBFI); } while (0)
129#define SIR_UART_ENABLE_RX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) | ERBFI); } while (0)
130
131static inline unsigned int SIR_UART_GET_LSR(struct bfin_sir_port *port)
132{
133 unsigned int lsr = bfin_read16(port->membase + OFFSET_LSR);
134 port->lsr |= (lsr & (BI|FE|PE|OE));
135 return lsr | port->lsr;
136}
137
138static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port)
139{
140 port->lsr = 0;
141 bfin_read16(port->membase + OFFSET_LSR);
142}
143#endif
144 89
145static const unsigned short per[][4] = { 90static const unsigned short per[][4] = {
146 /* rx pin tx pin NULL uart_number */ 91 /* rx pin tx pin NULL uart_number */
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index f0d8346d0fa5..4d40626b3bfa 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -26,7 +26,6 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/mfd/core.h>
30#include <linux/netdevice.h> 29#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
32#include <linux/ethtool.h> 31#include <linux/ethtool.h>
@@ -1146,7 +1145,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
1146 struct resource *iomem; 1145 struct resource *iomem;
1147 struct net_device *netdev; 1146 struct net_device *netdev;
1148 struct ks8842_adapter *adapter; 1147 struct ks8842_adapter *adapter;
1149 struct ks8842_platform_data *pdata = mfd_get_data(pdev); 1148 struct ks8842_platform_data *pdata = pdev->dev.platform_data;
1150 u16 id; 1149 u16 id;
1151 unsigned i; 1150 unsigned i;
1152 1151
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 13bebab65d02..2333215bbb32 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_FIXED_PHY) += fixed.o
19obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o 19obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o 20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
21obj-$(CONFIG_NATIONAL_PHY) += national.o 21obj-$(CONFIG_NATIONAL_PHY) += national.o
22obj-$(CONFIG_DP83640_PHY) += dp83640.o
22obj-$(CONFIG_STE10XP) += ste10Xp.o 23obj-$(CONFIG_STE10XP) += ste10Xp.o
23obj-$(CONFIG_MICREL_PHY) += micrel.o 24obj-$(CONFIG_MICREL_PHY) += micrel.o
24obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 25obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
new file mode 100644
index 000000000000..b0c9522bb535
--- /dev/null
+++ b/drivers/net/phy/dp83640.c
@@ -0,0 +1,1100 @@
1/*
2 * Driver for the National Semiconductor DP83640 PHYTER
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/ethtool.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/mii.h>
24#include <linux/module.h>
25#include <linux/net_tstamp.h>
26#include <linux/netdevice.h>
27#include <linux/phy.h>
28#include <linux/ptp_classify.h>
29#include <linux/ptp_clock_kernel.h>
30
31#include "dp83640_reg.h"
32
33#define DP83640_PHY_ID 0x20005ce1
34#define PAGESEL 0x13
35#define LAYER4 0x02
36#define LAYER2 0x01
37#define MAX_RXTS 4
38#define MAX_TXTS 4
39#define N_EXT_TS 1
40#define PSF_PTPVER 2
41#define PSF_EVNT 0x4000
42#define PSF_RX 0x2000
43#define PSF_TX 0x1000
44#define EXT_EVENT 1
45#define EXT_GPIO 1
46#define CAL_EVENT 2
47#define CAL_GPIO 9
48#define CAL_TRIGGER 2
49
50/* phyter seems to miss the mark by 16 ns */
51#define ADJTIME_FIX 16
52
53#if defined(__BIG_ENDIAN)
54#define ENDIAN_FLAG 0
55#elif defined(__LITTLE_ENDIAN)
56#define ENDIAN_FLAG PSF_ENDIAN
57#endif
58
59#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
60
61struct phy_rxts {
62 u16 ns_lo; /* ns[15:0] */
63 u16 ns_hi; /* overflow[1:0], ns[29:16] */
64 u16 sec_lo; /* sec[15:0] */
65 u16 sec_hi; /* sec[31:16] */
66 u16 seqid; /* sequenceId[15:0] */
67 u16 msgtype; /* messageType[3:0], hash[11:0] */
68};
69
70struct phy_txts {
71 u16 ns_lo; /* ns[15:0] */
72 u16 ns_hi; /* overflow[1:0], ns[29:16] */
73 u16 sec_lo; /* sec[15:0] */
74 u16 sec_hi; /* sec[31:16] */
75};
76
77struct rxts {
78 struct list_head list;
79 unsigned long tmo;
80 u64 ns;
81 u16 seqid;
82 u8 msgtype;
83 u16 hash;
84};
85
86struct dp83640_clock;
87
88struct dp83640_private {
89 struct list_head list;
90 struct dp83640_clock *clock;
91 struct phy_device *phydev;
92 struct work_struct ts_work;
93 int hwts_tx_en;
94 int hwts_rx_en;
95 int layer;
96 int version;
97 /* remember state of cfg0 during calibration */
98 int cfg0;
99 /* remember the last event time stamp */
100 struct phy_txts edata;
101 /* list of rx timestamps */
102 struct list_head rxts;
103 struct list_head rxpool;
104 struct rxts rx_pool_data[MAX_RXTS];
105 /* protects above three fields from concurrent access */
106 spinlock_t rx_lock;
107 /* queues of incoming and outgoing packets */
108 struct sk_buff_head rx_queue;
109 struct sk_buff_head tx_queue;
110};
111
112struct dp83640_clock {
113 /* keeps the instance in the 'phyter_clocks' list */
114 struct list_head list;
115 /* we create one clock instance per MII bus */
116 struct mii_bus *bus;
117 /* protects extended registers from concurrent access */
118 struct mutex extreg_lock;
119 /* remembers which page was last selected */
120 int page;
121 /* our advertised capabilities */
122 struct ptp_clock_info caps;
123 /* protects the three fields below from concurrent access */
124 struct mutex clock_lock;
125 /* the one phyter from which we shall read */
126 struct dp83640_private *chosen;
127 /* list of the other attached phyters, not chosen */
128 struct list_head phylist;
129 /* reference to our PTP hardware clock */
130 struct ptp_clock *ptp_clock;
131};
132
133/* globals */
134
135static int chosen_phy = -1;
136static ushort cal_gpio = 4;
137
138module_param(chosen_phy, int, 0444);
139module_param(cal_gpio, ushort, 0444);
140
141MODULE_PARM_DESC(chosen_phy, \
142 "The address of the PHY to use for the ancillary clock features");
143MODULE_PARM_DESC(cal_gpio, \
144 "Which GPIO line to use for synchronizing multiple PHYs");
145
146/* a list of clocks and a mutex to protect it */
147static LIST_HEAD(phyter_clocks);
148static DEFINE_MUTEX(phyter_clocks_lock);
149
150static void rx_timestamp_work(struct work_struct *work);
151
152/* extended register access functions */
153
154#define BROADCAST_ADDR 31
155
156static inline int broadcast_write(struct mii_bus *bus, u32 regnum, u16 val)
157{
158 return mdiobus_write(bus, BROADCAST_ADDR, regnum, val);
159}
160
161/* Caller must hold extreg_lock. */
162static int ext_read(struct phy_device *phydev, int page, u32 regnum)
163{
164 struct dp83640_private *dp83640 = phydev->priv;
165 int val;
166
167 if (dp83640->clock->page != page) {
168 broadcast_write(phydev->bus, PAGESEL, page);
169 dp83640->clock->page = page;
170 }
171 val = phy_read(phydev, regnum);
172
173 return val;
174}
175
176/* Caller must hold extreg_lock. */
177static void ext_write(int broadcast, struct phy_device *phydev,
178 int page, u32 regnum, u16 val)
179{
180 struct dp83640_private *dp83640 = phydev->priv;
181
182 if (dp83640->clock->page != page) {
183 broadcast_write(phydev->bus, PAGESEL, page);
184 dp83640->clock->page = page;
185 }
186 if (broadcast)
187 broadcast_write(phydev->bus, regnum, val);
188 else
189 phy_write(phydev, regnum, val);
190}
191
192/* Caller must hold extreg_lock. */
193static int tdr_write(int bc, struct phy_device *dev,
194 const struct timespec *ts, u16 cmd)
195{
196 ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec & 0xffff);/* ns[15:0] */
197 ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec >> 16); /* ns[31:16] */
198 ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec & 0xffff); /* sec[15:0] */
199 ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec >> 16); /* sec[31:16]*/
200
201 ext_write(bc, dev, PAGE4, PTP_CTL, cmd);
202
203 return 0;
204}
205
206/* convert phy timestamps into driver timestamps */
207
208static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
209{
210 u32 sec;
211
212 sec = p->sec_lo;
213 sec |= p->sec_hi << 16;
214
215 rxts->ns = p->ns_lo;
216 rxts->ns |= (p->ns_hi & 0x3fff) << 16;
217 rxts->ns += ((u64)sec) * 1000000000ULL;
218 rxts->seqid = p->seqid;
219 rxts->msgtype = (p->msgtype >> 12) & 0xf;
220 rxts->hash = p->msgtype & 0x0fff;
221 rxts->tmo = jiffies + HZ;
222}
223
224static u64 phy2txts(struct phy_txts *p)
225{
226 u64 ns;
227 u32 sec;
228
229 sec = p->sec_lo;
230 sec |= p->sec_hi << 16;
231
232 ns = p->ns_lo;
233 ns |= (p->ns_hi & 0x3fff) << 16;
234 ns += ((u64)sec) * 1000000000ULL;
235
236 return ns;
237}
238
239/* ptp clock methods */
240
241static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
242{
243 struct dp83640_clock *clock =
244 container_of(ptp, struct dp83640_clock, caps);
245 struct phy_device *phydev = clock->chosen->phydev;
246 u64 rate;
247 int neg_adj = 0;
248 u16 hi, lo;
249
250 if (ppb < 0) {
251 neg_adj = 1;
252 ppb = -ppb;
253 }
254 rate = ppb;
255 rate <<= 26;
256 rate = div_u64(rate, 1953125);
257
258 hi = (rate >> 16) & PTP_RATE_HI_MASK;
259 if (neg_adj)
260 hi |= PTP_RATE_DIR;
261
262 lo = rate & 0xffff;
263
264 mutex_lock(&clock->extreg_lock);
265
266 ext_write(1, phydev, PAGE4, PTP_RATEH, hi);
267 ext_write(1, phydev, PAGE4, PTP_RATEL, lo);
268
269 mutex_unlock(&clock->extreg_lock);
270
271 return 0;
272}
273
274static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta)
275{
276 struct dp83640_clock *clock =
277 container_of(ptp, struct dp83640_clock, caps);
278 struct phy_device *phydev = clock->chosen->phydev;
279 struct timespec ts;
280 int err;
281
282 delta += ADJTIME_FIX;
283
284 ts = ns_to_timespec(delta);
285
286 mutex_lock(&clock->extreg_lock);
287
288 err = tdr_write(1, phydev, &ts, PTP_STEP_CLK);
289
290 mutex_unlock(&clock->extreg_lock);
291
292 return err;
293}
294
295static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
296{
297 struct dp83640_clock *clock =
298 container_of(ptp, struct dp83640_clock, caps);
299 struct phy_device *phydev = clock->chosen->phydev;
300 unsigned int val[4];
301
302 mutex_lock(&clock->extreg_lock);
303
304 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_RD_CLK);
305
306 val[0] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[15:0] */
307 val[1] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[31:16] */
308 val[2] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[15:0] */
309 val[3] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[31:16] */
310
311 mutex_unlock(&clock->extreg_lock);
312
313 ts->tv_nsec = val[0] | (val[1] << 16);
314 ts->tv_sec = val[2] | (val[3] << 16);
315
316 return 0;
317}
318
319static int ptp_dp83640_settime(struct ptp_clock_info *ptp,
320 const struct timespec *ts)
321{
322 struct dp83640_clock *clock =
323 container_of(ptp, struct dp83640_clock, caps);
324 struct phy_device *phydev = clock->chosen->phydev;
325 int err;
326
327 mutex_lock(&clock->extreg_lock);
328
329 err = tdr_write(1, phydev, ts, PTP_LOAD_CLK);
330
331 mutex_unlock(&clock->extreg_lock);
332
333 return err;
334}
335
336static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
337 struct ptp_clock_request *rq, int on)
338{
339 struct dp83640_clock *clock =
340 container_of(ptp, struct dp83640_clock, caps);
341 struct phy_device *phydev = clock->chosen->phydev;
342 u16 evnt;
343
344 switch (rq->type) {
345 case PTP_CLK_REQ_EXTTS:
346 if (rq->extts.index != 0)
347 return -EINVAL;
348 evnt = EVNT_WR | (EXT_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
349 if (on) {
350 evnt |= (EXT_GPIO & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
351 evnt |= EVNT_RISE;
352 }
353 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
354 return 0;
355 default:
356 break;
357 }
358
359 return -EOPNOTSUPP;
360}
361
362static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
363static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
364
365static void enable_status_frames(struct phy_device *phydev, bool on)
366{
367 u16 cfg0 = 0, ver;
368
369 if (on)
370 cfg0 = PSF_EVNT_EN | PSF_RXTS_EN | PSF_TXTS_EN | ENDIAN_FLAG;
371
372 ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
373
374 ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
375 ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
376
377 if (!phydev->attached_dev) {
378 pr_warning("dp83640: expected to find an attached netdevice\n");
379 return;
380 }
381
382 if (on) {
383 if (dev_mc_add(phydev->attached_dev, status_frame_dst))
384 pr_warning("dp83640: failed to add mc address\n");
385 } else {
386 if (dev_mc_del(phydev->attached_dev, status_frame_dst))
387 pr_warning("dp83640: failed to delete mc address\n");
388 }
389}
390
391static bool is_status_frame(struct sk_buff *skb, int type)
392{
393 struct ethhdr *h = eth_hdr(skb);
394
395 if (PTP_CLASS_V2_L2 == type &&
396 !memcmp(h->h_source, status_frame_src, sizeof(status_frame_src)))
397 return true;
398 else
399 return false;
400}
401
402static int expired(struct rxts *rxts)
403{
404 return time_after(jiffies, rxts->tmo);
405}
406
407/* Caller must hold rx_lock. */
408static void prune_rx_ts(struct dp83640_private *dp83640)
409{
410 struct list_head *this, *next;
411 struct rxts *rxts;
412
413 list_for_each_safe(this, next, &dp83640->rxts) {
414 rxts = list_entry(this, struct rxts, list);
415 if (expired(rxts)) {
416 list_del_init(&rxts->list);
417 list_add(&rxts->list, &dp83640->rxpool);
418 }
419 }
420}
421
422/* synchronize the phyters so they act as one clock */
423
424static void enable_broadcast(struct phy_device *phydev, int init_page, int on)
425{
426 int val;
427 phy_write(phydev, PAGESEL, 0);
428 val = phy_read(phydev, PHYCR2);
429 if (on)
430 val |= BC_WRITE;
431 else
432 val &= ~BC_WRITE;
433 phy_write(phydev, PHYCR2, val);
434 phy_write(phydev, PAGESEL, init_page);
435}
436
437static void recalibrate(struct dp83640_clock *clock)
438{
439 s64 now, diff;
440 struct phy_txts event_ts;
441 struct timespec ts;
442 struct list_head *this;
443 struct dp83640_private *tmp;
444 struct phy_device *master = clock->chosen->phydev;
445 u16 cfg0, evnt, ptp_trig, trigger, val;
446
447 trigger = CAL_TRIGGER;
448
449 mutex_lock(&clock->extreg_lock);
450
451 /*
452 * enable broadcast, disable status frames, enable ptp clock
453 */
454 list_for_each(this, &clock->phylist) {
455 tmp = list_entry(this, struct dp83640_private, list);
456 enable_broadcast(tmp->phydev, clock->page, 1);
457 tmp->cfg0 = ext_read(tmp->phydev, PAGE5, PSF_CFG0);
458 ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, 0);
459 ext_write(0, tmp->phydev, PAGE4, PTP_CTL, PTP_ENABLE);
460 }
461 enable_broadcast(master, clock->page, 1);
462 cfg0 = ext_read(master, PAGE5, PSF_CFG0);
463 ext_write(0, master, PAGE5, PSF_CFG0, 0);
464 ext_write(0, master, PAGE4, PTP_CTL, PTP_ENABLE);
465
466 /*
467 * enable an event timestamp
468 */
469 evnt = EVNT_WR | EVNT_RISE | EVNT_SINGLE;
470 evnt |= (CAL_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
471 evnt |= (cal_gpio & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
472
473 list_for_each(this, &clock->phylist) {
474 tmp = list_entry(this, struct dp83640_private, list);
475 ext_write(0, tmp->phydev, PAGE5, PTP_EVNT, evnt);
476 }
477 ext_write(0, master, PAGE5, PTP_EVNT, evnt);
478
479 /*
480 * configure a trigger
481 */
482 ptp_trig = TRIG_WR | TRIG_IF_LATE | TRIG_PULSE;
483 ptp_trig |= (trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT;
484 ptp_trig |= (cal_gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT;
485 ext_write(0, master, PAGE5, PTP_TRIG, ptp_trig);
486
487 /* load trigger */
488 val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
489 val |= TRIG_LOAD;
490 ext_write(0, master, PAGE4, PTP_CTL, val);
491
492 /* enable trigger */
493 val &= ~TRIG_LOAD;
494 val |= TRIG_EN;
495 ext_write(0, master, PAGE4, PTP_CTL, val);
496
497 /* disable trigger */
498 val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
499 val |= TRIG_DIS;
500 ext_write(0, master, PAGE4, PTP_CTL, val);
501
502 /*
503 * read out and correct offsets
504 */
505 val = ext_read(master, PAGE4, PTP_STS);
506 pr_info("master PTP_STS 0x%04hx", val);
507 val = ext_read(master, PAGE4, PTP_ESTS);
508 pr_info("master PTP_ESTS 0x%04hx", val);
509 event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
510 event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
511 event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
512 event_ts.sec_hi = ext_read(master, PAGE4, PTP_EDATA);
513 now = phy2txts(&event_ts);
514
515 list_for_each(this, &clock->phylist) {
516 tmp = list_entry(this, struct dp83640_private, list);
517 val = ext_read(tmp->phydev, PAGE4, PTP_STS);
518 pr_info("slave PTP_STS 0x%04hx", val);
519 val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
520 pr_info("slave PTP_ESTS 0x%04hx", val);
521 event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
522 event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
523 event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
524 event_ts.sec_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
525 diff = now - (s64) phy2txts(&event_ts);
526 pr_info("slave offset %lld nanoseconds\n", diff);
527 diff += ADJTIME_FIX;
528 ts = ns_to_timespec(diff);
529 tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK);
530 }
531
532 /*
533 * restore status frames
534 */
535 list_for_each(this, &clock->phylist) {
536 tmp = list_entry(this, struct dp83640_private, list);
537 ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, tmp->cfg0);
538 }
539 ext_write(0, master, PAGE5, PSF_CFG0, cfg0);
540
541 mutex_unlock(&clock->extreg_lock);
542}
543
544/* time stamping methods */
545
546static void decode_evnt(struct dp83640_private *dp83640,
547 struct phy_txts *phy_txts, u16 ests)
548{
549 struct ptp_clock_event event;
550 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
551
552 switch (words) { /* fall through in every case */
553 case 3:
554 dp83640->edata.sec_hi = phy_txts->sec_hi;
555 case 2:
556 dp83640->edata.sec_lo = phy_txts->sec_lo;
557 case 1:
558 dp83640->edata.ns_hi = phy_txts->ns_hi;
559 case 0:
560 dp83640->edata.ns_lo = phy_txts->ns_lo;
561 }
562
563 event.type = PTP_CLOCK_EXTTS;
564 event.index = 0;
565 event.timestamp = phy2txts(&dp83640->edata);
566
567 ptp_clock_event(dp83640->clock->ptp_clock, &event);
568}
569
570static void decode_rxts(struct dp83640_private *dp83640,
571 struct phy_rxts *phy_rxts)
572{
573 struct rxts *rxts;
574 unsigned long flags;
575
576 spin_lock_irqsave(&dp83640->rx_lock, flags);
577
578 prune_rx_ts(dp83640);
579
580 if (list_empty(&dp83640->rxpool)) {
581 pr_warning("dp83640: rx timestamp pool is empty\n");
582 goto out;
583 }
584 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
585 list_del_init(&rxts->list);
586 phy2rxts(phy_rxts, rxts);
587 list_add_tail(&rxts->list, &dp83640->rxts);
588out:
589 spin_unlock_irqrestore(&dp83640->rx_lock, flags);
590}
591
592static void decode_txts(struct dp83640_private *dp83640,
593 struct phy_txts *phy_txts)
594{
595 struct skb_shared_hwtstamps shhwtstamps;
596 struct sk_buff *skb;
597 u64 ns;
598
599 /* We must already have the skb that triggered this. */
600
601 skb = skb_dequeue(&dp83640->tx_queue);
602
603 if (!skb) {
604 pr_warning("dp83640: have timestamp but tx_queue empty\n");
605 return;
606 }
607 ns = phy2txts(phy_txts);
608 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
609 shhwtstamps.hwtstamp = ns_to_ktime(ns);
610 skb_complete_tx_timestamp(skb, &shhwtstamps);
611}
612
613static void decode_status_frame(struct dp83640_private *dp83640,
614 struct sk_buff *skb)
615{
616 struct phy_rxts *phy_rxts;
617 struct phy_txts *phy_txts;
618 u8 *ptr;
619 int len, size;
620 u16 ests, type;
621
622 ptr = skb->data + 2;
623
624 for (len = skb_headlen(skb) - 2; len > sizeof(type); len -= size) {
625
626 type = *(u16 *)ptr;
627 ests = type & 0x0fff;
628 type = type & 0xf000;
629 len -= sizeof(type);
630 ptr += sizeof(type);
631
632 if (PSF_RX == type && len >= sizeof(*phy_rxts)) {
633
634 phy_rxts = (struct phy_rxts *) ptr;
635 decode_rxts(dp83640, phy_rxts);
636 size = sizeof(*phy_rxts);
637
638 } else if (PSF_TX == type && len >= sizeof(*phy_txts)) {
639
640 phy_txts = (struct phy_txts *) ptr;
641 decode_txts(dp83640, phy_txts);
642 size = sizeof(*phy_txts);
643
644 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
645
646 phy_txts = (struct phy_txts *) ptr;
647 decode_evnt(dp83640, phy_txts, ests);
648 size = sizeof(*phy_txts);
649
650 } else {
651 size = 0;
652 break;
653 }
654 ptr += size;
655 }
656}
657
658static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
659{
660 u16 *seqid;
661 unsigned int offset;
662 u8 *msgtype, *data = skb_mac_header(skb);
663
664 /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
665
666 switch (type) {
667 case PTP_CLASS_V1_IPV4:
668 case PTP_CLASS_V2_IPV4:
669 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
670 break;
671 case PTP_CLASS_V1_IPV6:
672 case PTP_CLASS_V2_IPV6:
673 offset = OFF_PTP6;
674 break;
675 case PTP_CLASS_V2_L2:
676 offset = ETH_HLEN;
677 break;
678 case PTP_CLASS_V2_VLAN:
679 offset = ETH_HLEN + VLAN_HLEN;
680 break;
681 default:
682 return 0;
683 }
684
685 if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
686 return 0;
687
688 if (unlikely(type & PTP_CLASS_V1))
689 msgtype = data + offset + OFF_PTP_CONTROL;
690 else
691 msgtype = data + offset;
692
693 seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
694
695 return (rxts->msgtype == (*msgtype & 0xf) &&
696 rxts->seqid == ntohs(*seqid));
697}
698
699static void dp83640_free_clocks(void)
700{
701 struct dp83640_clock *clock;
702 struct list_head *this, *next;
703
704 mutex_lock(&phyter_clocks_lock);
705
706 list_for_each_safe(this, next, &phyter_clocks) {
707 clock = list_entry(this, struct dp83640_clock, list);
708 if (!list_empty(&clock->phylist)) {
709 pr_warning("phy list non-empty while unloading");
710 BUG();
711 }
712 list_del(&clock->list);
713 mutex_destroy(&clock->extreg_lock);
714 mutex_destroy(&clock->clock_lock);
715 put_device(&clock->bus->dev);
716 kfree(clock);
717 }
718
719 mutex_unlock(&phyter_clocks_lock);
720}
721
722static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
723{
724 INIT_LIST_HEAD(&clock->list);
725 clock->bus = bus;
726 mutex_init(&clock->extreg_lock);
727 mutex_init(&clock->clock_lock);
728 INIT_LIST_HEAD(&clock->phylist);
729 clock->caps.owner = THIS_MODULE;
730 sprintf(clock->caps.name, "dp83640 timer");
731 clock->caps.max_adj = 1953124;
732 clock->caps.n_alarm = 0;
733 clock->caps.n_ext_ts = N_EXT_TS;
734 clock->caps.n_per_out = 0;
735 clock->caps.pps = 0;
736 clock->caps.adjfreq = ptp_dp83640_adjfreq;
737 clock->caps.adjtime = ptp_dp83640_adjtime;
738 clock->caps.gettime = ptp_dp83640_gettime;
739 clock->caps.settime = ptp_dp83640_settime;
740 clock->caps.enable = ptp_dp83640_enable;
741 /*
742 * Get a reference to this bus instance.
743 */
744 get_device(&bus->dev);
745}
746
747static int choose_this_phy(struct dp83640_clock *clock,
748 struct phy_device *phydev)
749{
750 if (chosen_phy == -1 && !clock->chosen)
751 return 1;
752
753 if (chosen_phy == phydev->addr)
754 return 1;
755
756 return 0;
757}
758
759static struct dp83640_clock *dp83640_clock_get(struct dp83640_clock *clock)
760{
761 if (clock)
762 mutex_lock(&clock->clock_lock);
763 return clock;
764}
765
766/*
767 * Look up and lock a clock by bus instance.
768 * If there is no clock for this bus, then create it first.
769 */
770static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
771{
772 struct dp83640_clock *clock = NULL, *tmp;
773 struct list_head *this;
774
775 mutex_lock(&phyter_clocks_lock);
776
777 list_for_each(this, &phyter_clocks) {
778 tmp = list_entry(this, struct dp83640_clock, list);
779 if (tmp->bus == bus) {
780 clock = tmp;
781 break;
782 }
783 }
784 if (clock)
785 goto out;
786
787 clock = kzalloc(sizeof(struct dp83640_clock), GFP_KERNEL);
788 if (!clock)
789 goto out;
790
791 dp83640_clock_init(clock, bus);
792 list_add_tail(&phyter_clocks, &clock->list);
793out:
794 mutex_unlock(&phyter_clocks_lock);
795
796 return dp83640_clock_get(clock);
797}
798
799static void dp83640_clock_put(struct dp83640_clock *clock)
800{
801 mutex_unlock(&clock->clock_lock);
802}
803
804static int dp83640_probe(struct phy_device *phydev)
805{
806 struct dp83640_clock *clock;
807 struct dp83640_private *dp83640;
808 int err = -ENOMEM, i;
809
810 if (phydev->addr == BROADCAST_ADDR)
811 return 0;
812
813 clock = dp83640_clock_get_bus(phydev->bus);
814 if (!clock)
815 goto no_clock;
816
817 dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
818 if (!dp83640)
819 goto no_memory;
820
821 dp83640->phydev = phydev;
822 INIT_WORK(&dp83640->ts_work, rx_timestamp_work);
823
824 INIT_LIST_HEAD(&dp83640->rxts);
825 INIT_LIST_HEAD(&dp83640->rxpool);
826 for (i = 0; i < MAX_RXTS; i++)
827 list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
828
829 phydev->priv = dp83640;
830
831 spin_lock_init(&dp83640->rx_lock);
832 skb_queue_head_init(&dp83640->rx_queue);
833 skb_queue_head_init(&dp83640->tx_queue);
834
835 dp83640->clock = clock;
836
837 if (choose_this_phy(clock, phydev)) {
838 clock->chosen = dp83640;
839 clock->ptp_clock = ptp_clock_register(&clock->caps);
840 if (IS_ERR(clock->ptp_clock)) {
841 err = PTR_ERR(clock->ptp_clock);
842 goto no_register;
843 }
844 } else
845 list_add_tail(&dp83640->list, &clock->phylist);
846
847 if (clock->chosen && !list_empty(&clock->phylist))
848 recalibrate(clock);
849 else
850 enable_broadcast(dp83640->phydev, clock->page, 1);
851
852 dp83640_clock_put(clock);
853 return 0;
854
855no_register:
856 clock->chosen = NULL;
857 kfree(dp83640);
858no_memory:
859 dp83640_clock_put(clock);
860no_clock:
861 return err;
862}
863
864static void dp83640_remove(struct phy_device *phydev)
865{
866 struct dp83640_clock *clock;
867 struct list_head *this, *next;
868 struct dp83640_private *tmp, *dp83640 = phydev->priv;
869
870 if (phydev->addr == BROADCAST_ADDR)
871 return;
872
873 enable_status_frames(phydev, false);
874 cancel_work_sync(&dp83640->ts_work);
875
876 clock = dp83640_clock_get(dp83640->clock);
877
878 if (dp83640 == clock->chosen) {
879 ptp_clock_unregister(clock->ptp_clock);
880 clock->chosen = NULL;
881 } else {
882 list_for_each_safe(this, next, &clock->phylist) {
883 tmp = list_entry(this, struct dp83640_private, list);
884 if (tmp == dp83640) {
885 list_del_init(&tmp->list);
886 break;
887 }
888 }
889 }
890
891 dp83640_clock_put(clock);
892 kfree(dp83640);
893}
894
895static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
896{
897 struct dp83640_private *dp83640 = phydev->priv;
898 struct hwtstamp_config cfg;
899 u16 txcfg0, rxcfg0;
900
901 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
902 return -EFAULT;
903
904 if (cfg.flags) /* reserved for future extensions */
905 return -EINVAL;
906
907 switch (cfg.tx_type) {
908 case HWTSTAMP_TX_OFF:
909 dp83640->hwts_tx_en = 0;
910 break;
911 case HWTSTAMP_TX_ON:
912 dp83640->hwts_tx_en = 1;
913 break;
914 default:
915 return -ERANGE;
916 }
917
918 switch (cfg.rx_filter) {
919 case HWTSTAMP_FILTER_NONE:
920 dp83640->hwts_rx_en = 0;
921 dp83640->layer = 0;
922 dp83640->version = 0;
923 break;
924 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
925 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
926 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
927 dp83640->hwts_rx_en = 1;
928 dp83640->layer = LAYER4;
929 dp83640->version = 1;
930 break;
931 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
932 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
933 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
934 dp83640->hwts_rx_en = 1;
935 dp83640->layer = LAYER4;
936 dp83640->version = 2;
937 break;
938 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
939 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
940 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
941 dp83640->hwts_rx_en = 1;
942 dp83640->layer = LAYER2;
943 dp83640->version = 2;
944 break;
945 case HWTSTAMP_FILTER_PTP_V2_EVENT:
946 case HWTSTAMP_FILTER_PTP_V2_SYNC:
947 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
948 dp83640->hwts_rx_en = 1;
949 dp83640->layer = LAYER4|LAYER2;
950 dp83640->version = 2;
951 break;
952 default:
953 return -ERANGE;
954 }
955
956 txcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
957 rxcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
958
959 if (dp83640->layer & LAYER2) {
960 txcfg0 |= TX_L2_EN;
961 rxcfg0 |= RX_L2_EN;
962 }
963 if (dp83640->layer & LAYER4) {
964 txcfg0 |= TX_IPV6_EN | TX_IPV4_EN;
965 rxcfg0 |= RX_IPV6_EN | RX_IPV4_EN;
966 }
967
968 if (dp83640->hwts_tx_en)
969 txcfg0 |= TX_TS_EN;
970
971 if (dp83640->hwts_rx_en)
972 rxcfg0 |= RX_TS_EN;
973
974 mutex_lock(&dp83640->clock->extreg_lock);
975
976 if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
977 enable_status_frames(phydev, true);
978 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
979 }
980
981 ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
982 ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
983
984 mutex_unlock(&dp83640->clock->extreg_lock);
985
986 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
987}
988
989static void rx_timestamp_work(struct work_struct *work)
990{
991 struct dp83640_private *dp83640 =
992 container_of(work, struct dp83640_private, ts_work);
993 struct list_head *this, *next;
994 struct rxts *rxts;
995 struct skb_shared_hwtstamps *shhwtstamps;
996 struct sk_buff *skb;
997 unsigned int type;
998 unsigned long flags;
999
1000 /* Deliver each deferred packet, with or without a time stamp. */
1001
1002 while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) {
1003 type = SKB_PTP_TYPE(skb);
1004 spin_lock_irqsave(&dp83640->rx_lock, flags);
1005 list_for_each_safe(this, next, &dp83640->rxts) {
1006 rxts = list_entry(this, struct rxts, list);
1007 if (match(skb, type, rxts)) {
1008 shhwtstamps = skb_hwtstamps(skb);
1009 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1010 shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
1011 list_del_init(&rxts->list);
1012 list_add(&rxts->list, &dp83640->rxpool);
1013 break;
1014 }
1015 }
1016 spin_unlock_irqrestore(&dp83640->rx_lock, flags);
1017 netif_rx(skb);
1018 }
1019
1020 /* Clear out expired time stamps. */
1021
1022 spin_lock_irqsave(&dp83640->rx_lock, flags);
1023 prune_rx_ts(dp83640);
1024 spin_unlock_irqrestore(&dp83640->rx_lock, flags);
1025}
1026
1027static bool dp83640_rxtstamp(struct phy_device *phydev,
1028 struct sk_buff *skb, int type)
1029{
1030 struct dp83640_private *dp83640 = phydev->priv;
1031
1032 if (!dp83640->hwts_rx_en)
1033 return false;
1034
1035 if (is_status_frame(skb, type)) {
1036 decode_status_frame(dp83640, skb);
1037 /* Let the stack drop this frame. */
1038 return false;
1039 }
1040
1041 SKB_PTP_TYPE(skb) = type;
1042 skb_queue_tail(&dp83640->rx_queue, skb);
1043 schedule_work(&dp83640->ts_work);
1044
1045 return true;
1046}
1047
1048static void dp83640_txtstamp(struct phy_device *phydev,
1049 struct sk_buff *skb, int type)
1050{
1051 struct dp83640_private *dp83640 = phydev->priv;
1052
1053 if (!dp83640->hwts_tx_en) {
1054 kfree_skb(skb);
1055 return;
1056 }
1057 skb_queue_tail(&dp83640->tx_queue, skb);
1058 schedule_work(&dp83640->ts_work);
1059}
1060
1061static struct phy_driver dp83640_driver = {
1062 .phy_id = DP83640_PHY_ID,
1063 .phy_id_mask = 0xfffffff0,
1064 .name = "NatSemi DP83640",
1065 .features = PHY_BASIC_FEATURES,
1066 .flags = 0,
1067 .probe = dp83640_probe,
1068 .remove = dp83640_remove,
1069 .config_aneg = genphy_config_aneg,
1070 .read_status = genphy_read_status,
1071 .hwtstamp = dp83640_hwtstamp,
1072 .rxtstamp = dp83640_rxtstamp,
1073 .txtstamp = dp83640_txtstamp,
1074 .driver = {.owner = THIS_MODULE,}
1075};
1076
1077static int __init dp83640_init(void)
1078{
1079 return phy_driver_register(&dp83640_driver);
1080}
1081
1082static void __exit dp83640_exit(void)
1083{
1084 dp83640_free_clocks();
1085 phy_driver_unregister(&dp83640_driver);
1086}
1087
1088MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
1089MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
1090MODULE_LICENSE("GPL");
1091
1092module_init(dp83640_init);
1093module_exit(dp83640_exit);
1094
1095static struct mdio_device_id __maybe_unused dp83640_tbl[] = {
1096 { DP83640_PHY_ID, 0xfffffff0 },
1097 { }
1098};
1099
1100MODULE_DEVICE_TABLE(mdio, dp83640_tbl);
diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
new file mode 100644
index 000000000000..e7fe41117003
--- /dev/null
+++ b/drivers/net/phy/dp83640_reg.h
@@ -0,0 +1,267 @@
1/* dp83640_reg.h
2 * Generated by regen.tcl on Thu Feb 17 10:02:48 AM CET 2011
3 */
4#ifndef HAVE_DP83640_REGISTERS
5#define HAVE_DP83640_REGISTERS
6
7#define PAGE0 0x0000
8#define PHYCR2 0x001c /* PHY Control Register 2 */
9
10#define PAGE4 0x0004
11#define PTP_CTL 0x0014 /* PTP Control Register */
12#define PTP_TDR 0x0015 /* PTP Time Data Register */
13#define PTP_STS 0x0016 /* PTP Status Register */
14#define PTP_TSTS 0x0017 /* PTP Trigger Status Register */
15#define PTP_RATEL 0x0018 /* PTP Rate Low Register */
16#define PTP_RATEH 0x0019 /* PTP Rate High Register */
17#define PTP_RDCKSUM 0x001a /* PTP Read Checksum */
18#define PTP_WRCKSUM 0x001b /* PTP Write Checksum */
19#define PTP_TXTS 0x001c /* PTP Transmit Timestamp Register, in four 16-bit reads */
20#define PTP_RXTS 0x001d /* PTP Receive Timestamp Register, in six? 16-bit reads */
21#define PTP_ESTS 0x001e /* PTP Event Status Register */
22#define PTP_EDATA 0x001f /* PTP Event Data Register */
23
24#define PAGE5 0x0005
25#define PTP_TRIG 0x0014 /* PTP Trigger Configuration Register */
26#define PTP_EVNT 0x0015 /* PTP Event Configuration Register */
27#define PTP_TXCFG0 0x0016 /* PTP Transmit Configuration Register 0 */
28#define PTP_TXCFG1 0x0017 /* PTP Transmit Configuration Register 1 */
29#define PSF_CFG0 0x0018 /* PHY Status Frame Configuration Register 0 */
30#define PTP_RXCFG0 0x0019 /* PTP Receive Configuration Register 0 */
31#define PTP_RXCFG1 0x001a /* PTP Receive Configuration Register 1 */
32#define PTP_RXCFG2 0x001b /* PTP Receive Configuration Register 2 */
33#define PTP_RXCFG3 0x001c /* PTP Receive Configuration Register 3 */
34#define PTP_RXCFG4 0x001d /* PTP Receive Configuration Register 4 */
35#define PTP_TRDL 0x001e /* PTP Temporary Rate Duration Low Register */
36#define PTP_TRDH 0x001f /* PTP Temporary Rate Duration High Register */
37
38#define PAGE6 0x0006
39#define PTP_COC 0x0014 /* PTP Clock Output Control Register */
40#define PSF_CFG1 0x0015 /* PHY Status Frame Configuration Register 1 */
41#define PSF_CFG2 0x0016 /* PHY Status Frame Configuration Register 2 */
42#define PSF_CFG3 0x0017 /* PHY Status Frame Configuration Register 3 */
43#define PSF_CFG4 0x0018 /* PHY Status Frame Configuration Register 4 */
44#define PTP_SFDCFG 0x0019 /* PTP SFD Configuration Register */
45#define PTP_INTCTL 0x001a /* PTP Interrupt Control Register */
46#define PTP_CLKSRC 0x001b /* PTP Clock Source Register */
47#define PTP_ETR 0x001c /* PTP Ethernet Type Register */
48#define PTP_OFF 0x001d /* PTP Offset Register */
49#define PTP_GPIOMON 0x001e /* PTP GPIO Monitor Register */
50#define PTP_RXHASH 0x001f /* PTP Receive Hash Register */
51
52/* Bit definitions for the PHYCR2 register */
53#define BC_WRITE (1<<11) /* Broadcast Write Enable */
54
55/* Bit definitions for the PTP_CTL register */
56#define TRIG_SEL_SHIFT (10) /* PTP Trigger Select */
57#define TRIG_SEL_MASK (0x7)
58#define TRIG_DIS (1<<9) /* Disable PTP Trigger */
59#define TRIG_EN (1<<8) /* Enable PTP Trigger */
60#define TRIG_READ (1<<7) /* Read PTP Trigger */
61#define TRIG_LOAD (1<<6) /* Load PTP Trigger */
62#define PTP_RD_CLK (1<<5) /* Read PTP Clock */
63#define PTP_LOAD_CLK (1<<4) /* Load PTP Clock */
64#define PTP_STEP_CLK (1<<3) /* Step PTP Clock */
65#define PTP_ENABLE (1<<2) /* Enable PTP Clock */
66#define PTP_DISABLE (1<<1) /* Disable PTP Clock */
67#define PTP_RESET (1<<0) /* Reset PTP Clock */
68
69/* Bit definitions for the PTP_STS register */
70#define TXTS_RDY (1<<11) /* Transmit Timestamp Ready */
71#define RXTS_RDY (1<<10) /* Receive Timestamp Ready */
72#define TRIG_DONE (1<<9) /* PTP Trigger Done */
73#define EVENT_RDY (1<<8) /* PTP Event Timestamp Ready */
74#define TXTS_IE (1<<3) /* Transmit Timestamp Interrupt Enable */
75#define RXTS_IE (1<<2) /* Receive Timestamp Interrupt Enable */
76#define TRIG_IE (1<<1) /* Trigger Interrupt Enable */
77#define EVENT_IE (1<<0) /* Event Interrupt Enable */
78
79/* Bit definitions for the PTP_TSTS register */
80#define TRIG7_ERROR (1<<15) /* Trigger 7 Error */
81#define TRIG7_ACTIVE (1<<14) /* Trigger 7 Active */
82#define TRIG6_ERROR (1<<13) /* Trigger 6 Error */
83#define TRIG6_ACTIVE (1<<12) /* Trigger 6 Active */
84#define TRIG5_ERROR (1<<11) /* Trigger 5 Error */
85#define TRIG5_ACTIVE (1<<10) /* Trigger 5 Active */
86#define TRIG4_ERROR (1<<9) /* Trigger 4 Error */
87#define TRIG4_ACTIVE (1<<8) /* Trigger 4 Active */
88#define TRIG3_ERROR (1<<7) /* Trigger 3 Error */
89#define TRIG3_ACTIVE (1<<6) /* Trigger 3 Active */
90#define TRIG2_ERROR (1<<5) /* Trigger 2 Error */
91#define TRIG2_ACTIVE (1<<4) /* Trigger 2 Active */
92#define TRIG1_ERROR (1<<3) /* Trigger 1 Error */
93#define TRIG1_ACTIVE (1<<2) /* Trigger 1 Active */
94#define TRIG0_ERROR (1<<1) /* Trigger 0 Error */
95#define TRIG0_ACTIVE (1<<0) /* Trigger 0 Active */
96
97/* Bit definitions for the PTP_RATEH register */
98#define PTP_RATE_DIR (1<<15) /* PTP Rate Direction */
99#define PTP_TMP_RATE (1<<14) /* PTP Temporary Rate */
100#define PTP_RATE_HI_SHIFT (0) /* PTP Rate High 10-bits */
101#define PTP_RATE_HI_MASK (0x3ff)
102
103/* Bit definitions for the PTP_ESTS register */
104#define EVNTS_MISSED_SHIFT (8) /* Indicates number of events missed */
105#define EVNTS_MISSED_MASK (0x7)
106#define EVNT_TS_LEN_SHIFT (6) /* Indicates length of the Timestamp field in 16-bit words minus 1 */
107#define EVNT_TS_LEN_MASK (0x3)
108#define EVNT_RF (1<<5) /* Indicates whether the event is a rise or falling event */
109#define EVNT_NUM_SHIFT (2) /* Indicates Event Timestamp Unit which detected an event */
110#define EVNT_NUM_MASK (0x7)
111#define MULT_EVNT (1<<1) /* Indicates multiple events were detected at the same time */
112#define EVENT_DET (1<<0) /* PTP Event Detected */
113
114/* Bit definitions for the PTP_EDATA register */
115#define E7_RISE (1<<15) /* Indicates direction of Event 7 */
116#define E7_DET (1<<14) /* Indicates Event 7 detected */
117#define E6_RISE (1<<13) /* Indicates direction of Event 6 */
118#define E6_DET (1<<12) /* Indicates Event 6 detected */
119#define E5_RISE (1<<11) /* Indicates direction of Event 5 */
120#define E5_DET (1<<10) /* Indicates Event 5 detected */
121#define E4_RISE (1<<9) /* Indicates direction of Event 4 */
122#define E4_DET (1<<8) /* Indicates Event 4 detected */
123#define E3_RISE (1<<7) /* Indicates direction of Event 3 */
124#define E3_DET (1<<6) /* Indicates Event 3 detected */
125#define E2_RISE (1<<5) /* Indicates direction of Event 2 */
126#define E2_DET (1<<4) /* Indicates Event 2 detected */
127#define E1_RISE (1<<3) /* Indicates direction of Event 1 */
128#define E1_DET (1<<2) /* Indicates Event 1 detected */
129#define E0_RISE (1<<1) /* Indicates direction of Event 0 */
130#define E0_DET (1<<0) /* Indicates Event 0 detected */
131
132/* Bit definitions for the PTP_TRIG register */
133#define TRIG_PULSE (1<<15) /* generate a Pulse rather than a single edge */
134#define TRIG_PER (1<<14) /* generate a periodic signal */
135#define TRIG_IF_LATE (1<<13) /* trigger immediately if already past */
136#define TRIG_NOTIFY (1<<12) /* Trigger Notification Enable */
137#define TRIG_GPIO_SHIFT (8) /* Trigger GPIO Connection, value 1-12 */
138#define TRIG_GPIO_MASK (0xf)
139#define TRIG_TOGGLE (1<<7) /* Trigger Toggle Mode Enable */
140#define TRIG_CSEL_SHIFT (1) /* Trigger Configuration Select */
141#define TRIG_CSEL_MASK (0x7)
142#define TRIG_WR (1<<0) /* Trigger Configuration Write */
143
144/* Bit definitions for the PTP_EVNT register */
145#define EVNT_RISE (1<<14) /* Event Rise Detect Enable */
146#define EVNT_FALL (1<<13) /* Event Fall Detect Enable */
147#define EVNT_SINGLE (1<<12) /* enable single event capture operation */
148#define EVNT_GPIO_SHIFT (8) /* Event GPIO Connection, value 1-12 */
149#define EVNT_GPIO_MASK (0xf)
150#define EVNT_SEL_SHIFT (1) /* Event Select */
151#define EVNT_SEL_MASK (0x7)
152#define EVNT_WR (1<<0) /* Event Configuration Write */
153
154/* Bit definitions for the PTP_TXCFG0 register */
155#define SYNC_1STEP (1<<15) /* insert timestamp into transmit Sync Messages */
156#define DR_INSERT (1<<13) /* Insert Delay_Req Timestamp in Delay_Resp (dangerous) */
157#define NTP_TS_EN (1<<12) /* Enable Timestamping of NTP Packets */
158#define IGNORE_2STEP (1<<11) /* Ignore Two_Step flag for One-Step operation */
159#define CRC_1STEP (1<<10) /* Disable checking of CRC for One-Step operation */
160#define CHK_1STEP (1<<9) /* Enable UDP Checksum correction for One-Step Operation */
161#define IP1588_EN (1<<8) /* Enable IEEE 1588 defined IP address filter */
162#define TX_L2_EN (1<<7) /* Layer2 Timestamp Enable */
163#define TX_IPV6_EN (1<<6) /* IPv6 Timestamp Enable */
164#define TX_IPV4_EN (1<<5) /* IPv4 Timestamp Enable */
165#define TX_PTP_VER_SHIFT (1) /* Enable Timestamp capture for IEEE 1588 version X */
166#define TX_PTP_VER_MASK (0xf)
167#define TX_TS_EN (1<<0) /* Transmit Timestamp Enable */
168
169/* Bit definitions for the PTP_TXCFG1 register */
170#define BYTE0_MASK_SHIFT (8) /* Bit mask to be used for matching Byte0 of the PTP Message */
171#define BYTE0_MASK_MASK (0xff)
172#define BYTE0_DATA_SHIFT (0) /* Data to be used for matching Byte0 of the PTP Message */
173#define BYTE0_DATA_MASK (0xff)
174
175/* Bit definitions for the PSF_CFG0 register */
176#define MAC_SRC_ADD_SHIFT (11) /* Status Frame Mac Source Address */
177#define MAC_SRC_ADD_MASK (0x3)
178#define MIN_PRE_SHIFT (8) /* Status Frame Minimum Preamble */
179#define MIN_PRE_MASK (0x7)
180#define PSF_ENDIAN (1<<7) /* Status Frame Endian Control */
181#define PSF_IPV4 (1<<6) /* Status Frame IPv4 Enable */
182#define PSF_PCF_RD (1<<5) /* Control Frame Read PHY Status Frame Enable */
183#define PSF_ERR_EN (1<<4) /* Error PHY Status Frame Enable */
184#define PSF_TXTS_EN (1<<3) /* Transmit Timestamp PHY Status Frame Enable */
185#define PSF_RXTS_EN (1<<2) /* Receive Timestamp PHY Status Frame Enable */
186#define PSF_TRIG_EN (1<<1) /* Trigger PHY Status Frame Enable */
187#define PSF_EVNT_EN (1<<0) /* Event PHY Status Frame Enable */
188
189/* Bit definitions for the PTP_RXCFG0 register */
190#define DOMAIN_EN (1<<15) /* Domain Match Enable */
191#define ALT_MAST_DIS (1<<14) /* Alternate Master Timestamp Disable */
192#define USER_IP_SEL (1<<13) /* Selects portion of IP address accessible thru PTP_RXCFG2 */
193#define USER_IP_EN (1<<12) /* Enable User-programmed IP address filter */
194#define RX_SLAVE (1<<11) /* Receive Slave Only */
195#define IP1588_EN_SHIFT (8) /* Enable IEEE 1588 defined IP address filters */
196#define IP1588_EN_MASK (0xf)
197#define RX_L2_EN (1<<7) /* Layer2 Timestamp Enable */
198#define RX_IPV6_EN (1<<6) /* IPv6 Timestamp Enable */
199#define RX_IPV4_EN (1<<5) /* IPv4 Timestamp Enable */
200#define RX_PTP_VER_SHIFT (1) /* Enable Timestamp capture for IEEE 1588 version X */
201#define RX_PTP_VER_MASK (0xf)
202#define RX_TS_EN (1<<0) /* Receive Timestamp Enable */
203
204/* Bit definitions for the PTP_RXCFG1 register */
205#define BYTE0_MASK_SHIFT (8) /* Bit mask to be used for matching Byte0 of the PTP Message */
206#define BYTE0_MASK_MASK (0xff)
207#define BYTE0_DATA_SHIFT (0) /* Data to be used for matching Byte0 of the PTP Message */
208#define BYTE0_DATA_MASK (0xff)
209
210/* Bit definitions for the PTP_RXCFG3 register */
211#define TS_MIN_IFG_SHIFT (12) /* Minimum Inter-frame Gap */
212#define TS_MIN_IFG_MASK (0xf)
213#define ACC_UDP (1<<11) /* Record Timestamp if UDP Checksum Error */
214#define ACC_CRC (1<<10) /* Record Timestamp if CRC Error */
215#define TS_APPEND (1<<9) /* Append Timestamp for L2 */
216#define TS_INSERT (1<<8) /* Enable Timestamp Insertion */
217#define PTP_DOMAIN_SHIFT (0) /* PTP Message domainNumber field */
218#define PTP_DOMAIN_MASK (0xff)
219
220/* Bit definitions for the PTP_RXCFG4 register */
221#define IPV4_UDP_MOD (1<<15) /* Enable IPV4 UDP Modification */
222#define TS_SEC_EN (1<<14) /* Enable Timestamp Seconds */
223#define TS_SEC_LEN_SHIFT (12) /* Inserted Timestamp Seconds Length */
224#define TS_SEC_LEN_MASK (0x3)
225#define RXTS_NS_OFF_SHIFT (6) /* Receive Timestamp Nanoseconds offset */
226#define RXTS_NS_OFF_MASK (0x3f)
227#define RXTS_SEC_OFF_SHIFT (0) /* Receive Timestamp Seconds offset */
228#define RXTS_SEC_OFF_MASK (0x3f)
229
230/* Bit definitions for the PTP_COC register */
231#define PTP_CLKOUT_EN (1<<15) /* PTP Clock Output Enable */
232#define PTP_CLKOUT_SEL (1<<14) /* PTP Clock Output Source Select */
233#define PTP_CLKOUT_SPEEDSEL (1<<13) /* PTP Clock Output I/O Speed Select */
234#define PTP_CLKDIV_SHIFT (0) /* PTP Clock Divide-by Value */
235#define PTP_CLKDIV_MASK (0xff)
236
237/* Bit definitions for the PSF_CFG1 register */
238#define PTPRESERVED_SHIFT (12) /* PTP v2 reserved field */
239#define PTPRESERVED_MASK (0xf)
240#define VERSIONPTP_SHIFT (8) /* PTP v2 versionPTP field */
241#define VERSIONPTP_MASK (0xf)
242#define TRANSPORT_SPECIFIC_SHIFT (4) /* PTP v2 Header transportSpecific field */
243#define TRANSPORT_SPECIFIC_MASK (0xf)
244#define MESSAGETYPE_SHIFT (0) /* PTP v2 messageType field */
245#define MESSAGETYPE_MASK (0xf)
246
247/* Bit definitions for the PTP_SFDCFG register */
248#define TX_SFD_GPIO_SHIFT (4) /* TX SFD GPIO Select, value 1-12 */
249#define TX_SFD_GPIO_MASK (0xf)
250#define RX_SFD_GPIO_SHIFT (0) /* RX SFD GPIO Select, value 1-12 */
251#define RX_SFD_GPIO_MASK (0xf)
252
253/* Bit definitions for the PTP_INTCTL register */
254#define PTP_INT_GPIO_SHIFT (0) /* PTP Interrupt GPIO Select */
255#define PTP_INT_GPIO_MASK (0xf)
256
257/* Bit definitions for the PTP_CLKSRC register */
258#define CLK_SRC_SHIFT (14) /* PTP Clock Source Select */
259#define CLK_SRC_MASK (0x3)
260#define CLK_SRC_PER_SHIFT (0) /* PTP Clock Source Period */
261#define CLK_SRC_PER_MASK (0x7f)
262
263/* Bit definitions for the PTP_OFF register */
264#define PTP_OFFSET_SHIFT (0) /* PTP Message offset from preceding header */
265#define PTP_OFFSET_MASK (0xff)
266
267#endif
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index db19332a7d87..f4b01c638a33 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -292,6 +292,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, 292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, 293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, 294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
295 {} 296 {}
296}; 297};
297 298
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 1e980fdd9d77..1e2af96fc29c 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -1658,11 +1658,9 @@ static int tile_net_stop(struct net_device *dev)
1658 while (tile_net_lepp_free_comps(dev, true)) 1658 while (tile_net_lepp_free_comps(dev, true))
1659 /* loop */; 1659 /* loop */;
1660 1660
1661 /* Wipe the EPP queue. */ 1661 /* Wipe the EPP queue, and wait till the stores hit the EPP. */
1662 memset(priv->eq, 0, sizeof(lepp_queue_t)); 1662 memset(priv->eq, 0, sizeof(lepp_queue_t));
1663 1663 mb();
1664 /* Evict the EPP queue. */
1665 finv_buffer(priv->eq, EQ_SIZE);
1666 1664
1667 return 0; 1665 return 0;
1668} 1666}
@@ -2398,7 +2396,7 @@ static void tile_net_cleanup(void)
2398 struct net_device *dev = tile_net_devs[i]; 2396 struct net_device *dev = tile_net_devs[i];
2399 struct tile_net_priv *priv = netdev_priv(dev); 2397 struct tile_net_priv *priv = netdev_priv(dev);
2400 unregister_netdev(dev); 2398 unregister_netdev(dev);
2401 finv_buffer(priv->eq, EQ_SIZE); 2399 finv_buffer_remote(priv->eq, EQ_SIZE, 0);
2402 __free_pages(priv->eq_pages, EQ_ORDER); 2400 __free_pages(priv->eq_pages, EQ_ORDER);
2403 free_netdev(dev); 2401 free_netdev(dev);
2404 } 2402 }
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4ab557d0287d..cdd3ae486109 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "06-May-2011" 57#define DRIVER_VERSION "24-May-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -134,8 +134,6 @@ struct cdc_ncm_ctx {
134 u16 tx_ndp_modulus; 134 u16 tx_ndp_modulus;
135 u16 tx_seq; 135 u16 tx_seq;
136 u16 connected; 136 u16 connected;
137 u8 data_claimed;
138 u8 control_claimed;
139}; 137};
140 138
141static void cdc_ncm_tx_timeout(unsigned long arg); 139static void cdc_ncm_tx_timeout(unsigned long arg);
@@ -460,17 +458,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
460 458
461 del_timer_sync(&ctx->tx_timer); 459 del_timer_sync(&ctx->tx_timer);
462 460
463 if (ctx->data_claimed) {
464 usb_set_intfdata(ctx->data, NULL);
465 usb_driver_release_interface(driver_of(ctx->intf), ctx->data);
466 }
467
468 if (ctx->control_claimed) {
469 usb_set_intfdata(ctx->control, NULL);
470 usb_driver_release_interface(driver_of(ctx->intf),
471 ctx->control);
472 }
473
474 if (ctx->tx_rem_skb != NULL) { 461 if (ctx->tx_rem_skb != NULL) {
475 dev_kfree_skb_any(ctx->tx_rem_skb); 462 dev_kfree_skb_any(ctx->tx_rem_skb);
476 ctx->tx_rem_skb = NULL; 463 ctx->tx_rem_skb = NULL;
@@ -495,7 +482,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
495 482
496 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 483 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
497 if (ctx == NULL) 484 if (ctx == NULL)
498 goto error; 485 return -ENODEV;
499 486
500 memset(ctx, 0, sizeof(*ctx)); 487 memset(ctx, 0, sizeof(*ctx));
501 488
@@ -568,46 +555,36 @@ advance:
568 555
569 /* check if we got everything */ 556 /* check if we got everything */
570 if ((ctx->control == NULL) || (ctx->data == NULL) || 557 if ((ctx->control == NULL) || (ctx->data == NULL) ||
571 (ctx->ether_desc == NULL)) 558 (ctx->ether_desc == NULL) || (ctx->control != intf))
572 goto error; 559 goto error;
573 560
574 /* claim interfaces, if any */ 561 /* claim interfaces, if any */
575 if (ctx->data != intf) { 562 temp = usb_driver_claim_interface(driver, ctx->data, dev);
576 temp = usb_driver_claim_interface(driver, ctx->data, dev); 563 if (temp)
577 if (temp) 564 goto error;
578 goto error;
579 ctx->data_claimed = 1;
580 }
581
582 if (ctx->control != intf) {
583 temp = usb_driver_claim_interface(driver, ctx->control, dev);
584 if (temp)
585 goto error;
586 ctx->control_claimed = 1;
587 }
588 565
589 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; 566 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
590 567
591 /* reset data interface */ 568 /* reset data interface */
592 temp = usb_set_interface(dev->udev, iface_no, 0); 569 temp = usb_set_interface(dev->udev, iface_no, 0);
593 if (temp) 570 if (temp)
594 goto error; 571 goto error2;
595 572
596 /* initialize data interface */ 573 /* initialize data interface */
597 if (cdc_ncm_setup(ctx)) 574 if (cdc_ncm_setup(ctx))
598 goto error; 575 goto error2;
599 576
600 /* configure data interface */ 577 /* configure data interface */
601 temp = usb_set_interface(dev->udev, iface_no, 1); 578 temp = usb_set_interface(dev->udev, iface_no, 1);
602 if (temp) 579 if (temp)
603 goto error; 580 goto error2;
604 581
605 cdc_ncm_find_endpoints(ctx, ctx->data); 582 cdc_ncm_find_endpoints(ctx, ctx->data);
606 cdc_ncm_find_endpoints(ctx, ctx->control); 583 cdc_ncm_find_endpoints(ctx, ctx->control);
607 584
608 if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) || 585 if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
609 (ctx->status_ep == NULL)) 586 (ctx->status_ep == NULL))
610 goto error; 587 goto error2;
611 588
612 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops; 589 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
613 590
@@ -617,7 +594,7 @@ advance:
617 594
618 temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress); 595 temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
619 if (temp) 596 if (temp)
620 goto error; 597 goto error2;
621 598
622 dev_info(&dev->udev->dev, "MAC-Address: " 599 dev_info(&dev->udev->dev, "MAC-Address: "
623 "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", 600 "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
@@ -642,38 +619,38 @@ advance:
642 ctx->tx_speed = ctx->rx_speed = 0; 619 ctx->tx_speed = ctx->rx_speed = 0;
643 return 0; 620 return 0;
644 621
622error2:
623 usb_set_intfdata(ctx->control, NULL);
624 usb_set_intfdata(ctx->data, NULL);
625 usb_driver_release_interface(driver, ctx->data);
645error: 626error:
646 cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]); 627 cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
647 dev->data[0] = 0; 628 dev->data[0] = 0;
648 dev_info(&dev->udev->dev, "Descriptor failure\n"); 629 dev_info(&dev->udev->dev, "bind() failure\n");
649 return -ENODEV; 630 return -ENODEV;
650} 631}
651 632
652static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) 633static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
653{ 634{
654 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; 635 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
655 struct usb_driver *driver; 636 struct usb_driver *driver = driver_of(intf);
656 637
657 if (ctx == NULL) 638 if (ctx == NULL)
658 return; /* no setup */ 639 return; /* no setup */
659 640
660 driver = driver_of(intf); 641 /* disconnect master --> disconnect slave */
661 642 if (intf == ctx->control && ctx->data) {
662 usb_set_intfdata(ctx->data, NULL); 643 usb_set_intfdata(ctx->data, NULL);
663 usb_set_intfdata(ctx->control, NULL);
664 usb_set_intfdata(ctx->intf, NULL);
665
666 /* release interfaces, if any */
667 if (ctx->data_claimed) {
668 usb_driver_release_interface(driver, ctx->data); 644 usb_driver_release_interface(driver, ctx->data);
669 ctx->data_claimed = 0; 645 ctx->data = NULL;
670 }
671 646
672 if (ctx->control_claimed) { 647 } else if (intf == ctx->data && ctx->control) {
648 usb_set_intfdata(ctx->control, NULL);
673 usb_driver_release_interface(driver, ctx->control); 649 usb_driver_release_interface(driver, ctx->control);
674 ctx->control_claimed = 0; 650 ctx->control = NULL;
675 } 651 }
676 652
653 usb_set_intfdata(ctx->intf, NULL);
677 cdc_ncm_free(ctx); 654 cdc_ncm_free(ctx);
678} 655}
679 656
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index d7227539484e..0f1f05f6c4f8 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1096,7 +1096,7 @@ struct mac_regs {
1096 1096
1097 volatile __le16 PatternCRC[8]; /* 0xB0 */ 1097 volatile __le16 PatternCRC[8]; /* 0xB0 */
1098 volatile __le32 ByteMask[4][4]; /* 0xC0 */ 1098 volatile __le32 ByteMask[4][4]; /* 0xC0 */
1099} __packed; 1099};
1100 1100
1101 1101
1102enum hw_mib { 1102enum hw_mib {
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 737b59f1a8dc..9617d3d0ee39 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3242,8 +3242,7 @@ static inline void show_version(void)
3242 rcsdate++; 3242 rcsdate++;
3243 tmp = strrchr(rcsdate, ' '); 3243 tmp = strrchr(rcsdate, ' ');
3244 *tmp = '\0'; 3244 *tmp = '\0';
3245 printk(KERN_INFO "Cyclades-PC300 driver %s %s (built %s %s)\n", 3245 printk(KERN_INFO "Cyclades-PC300 driver %s %s\n", rcsvers, rcsdate);
3246 rcsvers, rcsdate, __DATE__, __TIME__);
3247} /* show_version */ 3246} /* show_version */
3248 3247
3249static const struct net_device_ops cpc_netdev_ops = { 3248static const struct net_device_ops cpc_netdev_ops = {
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a70c512f05d2..55cf71fbffe3 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4501,17 +4501,15 @@ static int setup_proc_entry( struct net_device *dev,
4501 struct proc_dir_entry *entry; 4501 struct proc_dir_entry *entry;
4502 /* First setup the device directory */ 4502 /* First setup the device directory */
4503 strcpy(apriv->proc_name,dev->name); 4503 strcpy(apriv->proc_name,dev->name);
4504 apriv->proc_entry = create_proc_entry(apriv->proc_name, 4504 apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
4505 S_IFDIR|airo_perm, 4505 airo_entry);
4506 airo_entry);
4507 if (!apriv->proc_entry) 4506 if (!apriv->proc_entry)
4508 goto fail; 4507 goto fail;
4509 apriv->proc_entry->uid = proc_uid; 4508 apriv->proc_entry->uid = proc_uid;
4510 apriv->proc_entry->gid = proc_gid; 4509 apriv->proc_entry->gid = proc_gid;
4511 4510
4512 /* Setup the StatsDelta */ 4511 /* Setup the StatsDelta */
4513 entry = proc_create_data("StatsDelta", 4512 entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm,
4514 S_IFREG | (S_IRUGO&proc_perm),
4515 apriv->proc_entry, &proc_statsdelta_ops, dev); 4513 apriv->proc_entry, &proc_statsdelta_ops, dev);
4516 if (!entry) 4514 if (!entry)
4517 goto fail_stats_delta; 4515 goto fail_stats_delta;
@@ -4519,8 +4517,7 @@ static int setup_proc_entry( struct net_device *dev,
4519 entry->gid = proc_gid; 4517 entry->gid = proc_gid;
4520 4518
4521 /* Setup the Stats */ 4519 /* Setup the Stats */
4522 entry = proc_create_data("Stats", 4520 entry = proc_create_data("Stats", S_IRUGO & proc_perm,
4523 S_IFREG | (S_IRUGO&proc_perm),
4524 apriv->proc_entry, &proc_stats_ops, dev); 4521 apriv->proc_entry, &proc_stats_ops, dev);
4525 if (!entry) 4522 if (!entry)
4526 goto fail_stats; 4523 goto fail_stats;
@@ -4528,8 +4525,7 @@ static int setup_proc_entry( struct net_device *dev,
4528 entry->gid = proc_gid; 4525 entry->gid = proc_gid;
4529 4526
4530 /* Setup the Status */ 4527 /* Setup the Status */
4531 entry = proc_create_data("Status", 4528 entry = proc_create_data("Status", S_IRUGO & proc_perm,
4532 S_IFREG | (S_IRUGO&proc_perm),
4533 apriv->proc_entry, &proc_status_ops, dev); 4529 apriv->proc_entry, &proc_status_ops, dev);
4534 if (!entry) 4530 if (!entry)
4535 goto fail_status; 4531 goto fail_status;
@@ -4537,8 +4533,7 @@ static int setup_proc_entry( struct net_device *dev,
4537 entry->gid = proc_gid; 4533 entry->gid = proc_gid;
4538 4534
4539 /* Setup the Config */ 4535 /* Setup the Config */
4540 entry = proc_create_data("Config", 4536 entry = proc_create_data("Config", proc_perm,
4541 S_IFREG | proc_perm,
4542 apriv->proc_entry, &proc_config_ops, dev); 4537 apriv->proc_entry, &proc_config_ops, dev);
4543 if (!entry) 4538 if (!entry)
4544 goto fail_config; 4539 goto fail_config;
@@ -4546,8 +4541,7 @@ static int setup_proc_entry( struct net_device *dev,
4546 entry->gid = proc_gid; 4541 entry->gid = proc_gid;
4547 4542
4548 /* Setup the SSID */ 4543 /* Setup the SSID */
4549 entry = proc_create_data("SSID", 4544 entry = proc_create_data("SSID", proc_perm,
4550 S_IFREG | proc_perm,
4551 apriv->proc_entry, &proc_SSID_ops, dev); 4545 apriv->proc_entry, &proc_SSID_ops, dev);
4552 if (!entry) 4546 if (!entry)
4553 goto fail_ssid; 4547 goto fail_ssid;
@@ -4555,8 +4549,7 @@ static int setup_proc_entry( struct net_device *dev,
4555 entry->gid = proc_gid; 4549 entry->gid = proc_gid;
4556 4550
4557 /* Setup the APList */ 4551 /* Setup the APList */
4558 entry = proc_create_data("APList", 4552 entry = proc_create_data("APList", proc_perm,
4559 S_IFREG | proc_perm,
4560 apriv->proc_entry, &proc_APList_ops, dev); 4553 apriv->proc_entry, &proc_APList_ops, dev);
4561 if (!entry) 4554 if (!entry)
4562 goto fail_aplist; 4555 goto fail_aplist;
@@ -4564,8 +4557,7 @@ static int setup_proc_entry( struct net_device *dev,
4564 entry->gid = proc_gid; 4557 entry->gid = proc_gid;
4565 4558
4566 /* Setup the BSSList */ 4559 /* Setup the BSSList */
4567 entry = proc_create_data("BSSList", 4560 entry = proc_create_data("BSSList", proc_perm,
4568 S_IFREG | proc_perm,
4569 apriv->proc_entry, &proc_BSSList_ops, dev); 4561 apriv->proc_entry, &proc_BSSList_ops, dev);
4570 if (!entry) 4562 if (!entry)
4571 goto fail_bsslist; 4563 goto fail_bsslist;
@@ -4573,8 +4565,7 @@ static int setup_proc_entry( struct net_device *dev,
4573 entry->gid = proc_gid; 4565 entry->gid = proc_gid;
4574 4566
4575 /* Setup the WepKey */ 4567 /* Setup the WepKey */
4576 entry = proc_create_data("WepKey", 4568 entry = proc_create_data("WepKey", proc_perm,
4577 S_IFREG | proc_perm,
4578 apriv->proc_entry, &proc_wepkey_ops, dev); 4569 apriv->proc_entry, &proc_wepkey_ops, dev);
4579 if (!entry) 4570 if (!entry)
4580 goto fail_wepkey; 4571 goto fail_wepkey;
@@ -5706,9 +5697,7 @@ static int __init airo_init_module( void )
5706{ 5697{
5707 int i; 5698 int i;
5708 5699
5709 airo_entry = create_proc_entry("driver/aironet", 5700 airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL);
5710 S_IFDIR | airo_perm,
5711 NULL);
5712 5701
5713 if (airo_entry) { 5702 if (airo_entry) {
5714 airo_entry->uid = proc_uid; 5703 airo_entry->uid = proc_uid;
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 61956392f2da..5b49cd03bfdf 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> 3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> 4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
5 * 5 *
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 5a1f4f511bc1..bfb6481f01f9 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 0cd6783de883..dbab5b9ce494 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
index 36f7d0639db3..234617c948a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 4bf9dab4f2b3..441bb33f17ad 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
index 69a94c7e45cb..6d2e2f3303f9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index cb611b287b35..015d97439935 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index f44c84ab5dce..f344cc2b3d59 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 6203eed860dd..7573257731b6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 7a332f16b79a..077e8a6983fa 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index a57e963cf0dc..2fe0a34cbabc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 47780ef1c892..453af6dc514b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index f915a3dbfcad..e8ac70da5ac7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index f276cb922b4d..f48051c50092 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d985841ff401..0ca7635d0669 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index afb0b5ee1865..ab21a4915981 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -1,3 +1,19 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
1#ifndef AR9003_EEPROM_H 17#ifndef AR9003_EEPROM_H
2#define AR9003_EEPROM_H 18#define AR9003_EEPROM_H
3 19
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index a55eddbb2589..392bf0f8ff16 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index be6adec33ddb..10d71f7d3fc2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index 45cc7e80436c..c50449387bf1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 356d2fd78822..e4d6a87ec538 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 25f3c2fdf2bc..eee23ecd118a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index c7505b48e5c0..443090d278e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2002-2010 Atheros Communications, Inc. 2 * Copyright (c) 2010-2011 Atheros Communications, Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index fbdde29f0ab8..611ea6ce8508 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 03b37d7be1c3..f75068b4b310 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -397,6 +397,9 @@ struct ath_beacon {
397 struct ath_descdma bdma; 397 struct ath_descdma bdma;
398 struct ath_txq *cabq; 398 struct ath_txq *cabq;
399 struct list_head bbuf; 399 struct list_head bbuf;
400
401 bool tx_processed;
402 bool tx_last;
400}; 403};
401 404
402void ath_beacon_tasklet(unsigned long data); 405void ath_beacon_tasklet(unsigned long data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 637dbc5f7b67..d4d8ceced89b 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,12 @@
18 18
19#define FUDGE 2 19#define FUDGE 2
20 20
21static void ath9k_reset_beacon_status(struct ath_softc *sc)
22{
23 sc->beacon.tx_processed = false;
24 sc->beacon.tx_last = false;
25}
26
21/* 27/*
22 * This function will modify certain transmit queue properties depending on 28 * This function will modify certain transmit queue properties depending on
23 * the operating mode of the station (AP or AdHoc). Parameters are AIFS 29 * the operating mode of the station (AP or AdHoc). Parameters are AIFS
@@ -72,6 +78,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
72 struct ieee80211_supported_band *sband; 78 struct ieee80211_supported_band *sband;
73 u8 rate = 0; 79 u8 rate = 0;
74 80
81 ath9k_reset_beacon_status(sc);
82
75 ds = bf->bf_desc; 83 ds = bf->bf_desc;
76 flags = ATH9K_TXDESC_NOACK; 84 flags = ATH9K_TXDESC_NOACK;
77 85
@@ -134,6 +142,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
134 struct ieee80211_tx_info *info; 142 struct ieee80211_tx_info *info;
135 int cabq_depth; 143 int cabq_depth;
136 144
145 ath9k_reset_beacon_status(sc);
146
137 avp = (void *)vif->drv_priv; 147 avp = (void *)vif->drv_priv;
138 cabq = sc->beacon.cabq; 148 cabq = sc->beacon.cabq;
139 149
@@ -351,9 +361,7 @@ void ath_beacon_tasklet(unsigned long data)
351 struct ath_buf *bf = NULL; 361 struct ath_buf *bf = NULL;
352 struct ieee80211_vif *vif; 362 struct ieee80211_vif *vif;
353 int slot; 363 int slot;
354 u32 bfaddr, bc = 0, tsftu; 364 u32 bfaddr, bc = 0;
355 u64 tsf;
356 u16 intval;
357 365
358 /* 366 /*
359 * Check if the previous beacon has gone out. If 367 * Check if the previous beacon has gone out. If
@@ -388,17 +396,27 @@ void ath_beacon_tasklet(unsigned long data)
388 * on the tsf to safeguard against missing an swba. 396 * on the tsf to safeguard against missing an swba.
389 */ 397 */
390 398
391 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
392 399
393 tsf = ath9k_hw_gettsf64(ah); 400 if (ah->opmode == NL80211_IFTYPE_AP) {
394 tsf += TU_TO_USEC(ah->config.sw_beacon_response_time); 401 u16 intval;
395 tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF); 402 u32 tsftu;
396 slot = (tsftu % (intval * ATH_BCBUF)) / intval; 403 u64 tsf;
397 vif = sc->beacon.bslot[slot]; 404
405 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
406 tsf = ath9k_hw_gettsf64(ah);
407 tsf += TU_TO_USEC(ah->config.sw_beacon_response_time);
408 tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF);
409 slot = (tsftu % (intval * ATH_BCBUF)) / intval;
410 vif = sc->beacon.bslot[slot];
411
412 ath_dbg(common, ATH_DBG_BEACON,
413 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
414 slot, tsf, tsftu / ATH_BCBUF, intval, vif);
415 } else {
416 slot = 0;
417 vif = sc->beacon.bslot[slot];
418 }
398 419
399 ath_dbg(common, ATH_DBG_BEACON,
400 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
401 slot, tsf, tsftu / ATH_BCBUF, intval, vif);
402 420
403 bfaddr = 0; 421 bfaddr = 0;
404 if (vif) { 422 if (vif) {
@@ -636,6 +654,8 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
636 struct ath_common *common = ath9k_hw_common(ah); 654 struct ath_common *common = ath9k_hw_common(ah);
637 u32 tsf, delta, intval, nexttbtt; 655 u32 tsf, delta, intval, nexttbtt;
638 656
657 ath9k_reset_beacon_status(sc);
658
639 tsf = ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE); 659 tsf = ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE);
640 intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD); 660 intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
641 661
@@ -646,7 +666,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
646 delta = (tsf - sc->beacon.bc_tstamp); 666 delta = (tsf - sc->beacon.bc_tstamp);
647 else 667 else
648 delta = (tsf + 1 + (~0U - sc->beacon.bc_tstamp)); 668 delta = (tsf + 1 + (~0U - sc->beacon.bc_tstamp));
649 nexttbtt = tsf + roundup(delta, intval); 669 nexttbtt = tsf + intval - (delta % intval);
650 } 670 }
651 671
652 ath_dbg(common, ATH_DBG_BEACON, 672 ath_dbg(common, ATH_DBG_BEACON,
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 23f15a7ca7f1..41ce0b139886 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2009 Atheros Communications Inc. 2 * Copyright (c) 2009-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index a9efca83d676..234f77689b14 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2009 Atheros Communications Inc. 2 * Copyright (c) 2009-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 558b228a717f..a1250c586e40 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 4420780fa3b8..1bef41d1b1ff 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 74535e6dfb82..fa6bd2d189e5 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2009 Atheros Communications Inc. 2 * Copyright (c) 2009-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 5124f1420b3a..77ec288b5a70 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2009 Atheros Communications Inc. 2 * Copyright (c) 2009-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index bad1a87249b6..d55ffd7d4bd2 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -435,6 +435,7 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
435 conf->channel_type, 435 conf->channel_type,
436 channel_type_str(conf->channel_type)); 436 channel_type_str(conf->channel_type));
437 437
438 ath9k_ps_wakeup(sc);
438 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr); 439 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
439 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); 440 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
440 len += snprintf(buf + len, sizeof(buf) - len, 441 len += snprintf(buf + len, sizeof(buf) - len,
@@ -444,6 +445,7 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
444 len += snprintf(buf + len, sizeof(buf) - len, 445 len += snprintf(buf + len, sizeof(buf) - len,
445 "addrmask: %pM\n", addr); 446 "addrmask: %pM\n", addr);
446 tmp = ath9k_hw_getrxfilter(sc->sc_ah); 447 tmp = ath9k_hw_getrxfilter(sc->sc_ah);
448 ath9k_ps_restore(sc);
447 len += snprintf(buf + len, sizeof(buf) - len, 449 len += snprintf(buf + len, sizeof(buf) - len,
448 "rfilt: 0x%x", tmp); 450 "rfilt: 0x%x", tmp);
449 if (tmp & ATH9K_RX_FILTER_UCAST) 451 if (tmp & ATH9K_RX_FILTER_UCAST)
@@ -725,6 +727,7 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
725 break; 727 break;
726 } 728 }
727 729
730 ath9k_ps_wakeup(sc);
728 len += snprintf(buf + len, size - len, 731 len += snprintf(buf + len, size - len,
729 "curbssid: %pM\n" 732 "curbssid: %pM\n"
730 "OP-Mode: %s(%i)\n" 733 "OP-Mode: %s(%i)\n"
@@ -734,6 +737,7 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
734 REG_READ(ah, AR_BEACON_PERIOD)); 737 REG_READ(ah, AR_BEACON_PERIOD));
735 738
736 reg = REG_READ(ah, AR_TIMER_MODE); 739 reg = REG_READ(ah, AR_TIMER_MODE);
740 ath9k_ps_restore(sc);
737 len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (", 741 len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
738 reg); 742 reg);
739 if (reg & AR_TBTT_TIMER_EN) 743 if (reg & AR_TBTT_TIMER_EN)
@@ -1050,7 +1054,9 @@ static ssize_t read_file_regval(struct file *file, char __user *user_buf,
1050 unsigned int len; 1054 unsigned int len;
1051 u32 regval; 1055 u32 regval;
1052 1056
1057 ath9k_ps_wakeup(sc);
1053 regval = REG_READ_D(ah, sc->debug.regidx); 1058 regval = REG_READ_D(ah, sc->debug.regidx);
1059 ath9k_ps_restore(sc);
1054 len = sprintf(buf, "0x%08x\n", regval); 1060 len = sprintf(buf, "0x%08x\n", regval);
1055 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1061 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1056} 1062}
@@ -1072,7 +1078,9 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
1072 if (strict_strtoul(buf, 0, &regval)) 1078 if (strict_strtoul(buf, 0, &regval))
1073 return -EINVAL; 1079 return -EINVAL;
1074 1080
1081 ath9k_ps_wakeup(sc);
1075 REG_WRITE_D(ah, sc->debug.regidx, regval); 1082 REG_WRITE_D(ah, sc->debug.regidx, regval);
1083 ath9k_ps_restore(sc);
1076 return count; 1084 return count;
1077} 1085}
1078 1086
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 5488a324cc10..8ce6ad80f4e2 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 8c18bed3a558..e61404dda8c5 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 3e316133f114..de99c0da52e4 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 6f714dd72365..5b1e894f3d67 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index b87db4763098..7856f0d4512d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index c031854b569f..17f0a6806207 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 0349b3a1cc58..bc713fc28191 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 2e3a33a53406..260f1f37a60e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 2bdcdbc14b1e..794f63094e5d 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +18,7 @@
18#define HTC_USB_H 18#define HTC_USB_H
19 19
20#define MAJOR_VERSION_REQ 1 20#define MAJOR_VERSION_REQ 1
21#define MINOR_VERSION_REQ 2 21#define MINOR_VERSION_REQ 3
22 22
23#define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB)) 23#define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB))
24 24
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index dfc7a982fc7e..5bc022087e65 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -46,15 +46,8 @@ extern struct ieee80211_ops ath9k_htc_ops;
46extern int htc_modparam_nohwcrypt; 46extern int htc_modparam_nohwcrypt;
47 47
48enum htc_phymode { 48enum htc_phymode {
49 HTC_MODE_AUTO = 0, 49 HTC_MODE_11NA = 0,
50 HTC_MODE_11A = 1, 50 HTC_MODE_11NG = 1
51 HTC_MODE_11B = 2,
52 HTC_MODE_11G = 3,
53 HTC_MODE_FH = 4,
54 HTC_MODE_TURBO_A = 5,
55 HTC_MODE_TURBO_G = 6,
56 HTC_MODE_11NA = 7,
57 HTC_MODE_11NG = 8
58}; 51};
59 52
60enum htc_opmode { 53enum htc_opmode {
@@ -123,18 +116,13 @@ struct ath9k_htc_target_vif {
123 u8 pad; 116 u8 pad;
124} __packed; 117} __packed;
125 118
126#define ATH_HTC_STA_AUTH 0x0001
127#define ATH_HTC_STA_QOS 0x0002
128#define ATH_HTC_STA_ERP 0x0004
129#define ATH_HTC_STA_HT 0x0008
130
131struct ath9k_htc_target_sta { 119struct ath9k_htc_target_sta {
132 u8 macaddr[ETH_ALEN]; 120 u8 macaddr[ETH_ALEN];
133 u8 bssid[ETH_ALEN]; 121 u8 bssid[ETH_ALEN];
134 u8 sta_index; 122 u8 sta_index;
135 u8 vif_index; 123 u8 vif_index;
136 u8 is_vif_sta; 124 u8 is_vif_sta;
137 __be16 flags; /* ATH_HTC_STA_* */ 125 __be16 flags;
138 __be16 htcap; 126 __be16 htcap;
139 __be16 maxampdu; 127 __be16 maxampdu;
140 u8 pad; 128 u8 pad;
@@ -285,9 +273,9 @@ struct ath9k_htc_rx {
285}; 273};
286 274
287#define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */ 275#define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */
288#define ATH9K_HTC_TX_TIMEOUT_INTERVAL 2500 /* ms */ 276#define ATH9K_HTC_TX_TIMEOUT_INTERVAL 3000 /* ms */
289#define ATH9K_HTC_TX_RESERVE 10 277#define ATH9K_HTC_TX_RESERVE 10
290#define ATH9K_HTC_TX_TIMEOUT_COUNT 20 278#define ATH9K_HTC_TX_TIMEOUT_COUNT 40
291#define ATH9K_HTC_TX_THRESHOLD (MAX_TX_BUF_NUM - ATH9K_HTC_TX_RESERVE) 279#define ATH9K_HTC_TX_THRESHOLD (MAX_TX_BUF_NUM - ATH9K_HTC_TX_RESERVE)
292 280
293#define ATH9K_HTC_OP_TX_QUEUES_STOP BIT(0) 281#define ATH9K_HTC_OP_TX_QUEUES_STOP BIT(0)
@@ -450,6 +438,7 @@ struct ath9k_htc_priv {
450 u8 vif_sta_pos[ATH9K_HTC_MAX_VIF]; 438 u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
451 u8 num_ibss_vif; 439 u8 num_ibss_vif;
452 u8 num_sta_vif; 440 u8 num_sta_vif;
441 u8 num_sta_assoc_vif;
453 u8 num_ap_vif; 442 u8 num_ap_vif;
454 443
455 u16 op_flags; 444 u16 op_flags;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 0ded2c66d5ff..aa6a73118706 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index af57fe5aab98..db2352e5cc0d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index bfdc8a887183..61e6d3950718 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -258,7 +258,7 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid,
258 */ 258 */
259 259
260 if (IS_AR7010_DEVICE(drv_info)) 260 if (IS_AR7010_DEVICE(drv_info))
261 priv->htc->credits = 48; 261 priv->htc->credits = 45;
262 else 262 else
263 priv->htc->credits = 33; 263 priv->htc->credits = 33;
264 264
@@ -769,11 +769,6 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
769 hw->channel_change_time = 5000; 769 hw->channel_change_time = 5000;
770 hw->max_listen_interval = 10; 770 hw->max_listen_interval = 10;
771 771
772 if (AR_SREV_9271(priv->ah))
773 hw->max_tx_aggregation_subframes = MAX_TX_AMPDU_SUBFRAMES_9271;
774 else
775 hw->max_tx_aggregation_subframes = MAX_TX_AMPDU_SUBFRAMES_7010;
776
777 hw->vif_data_size = sizeof(struct ath9k_htc_vif); 772 hw->vif_data_size = sizeof(struct ath9k_htc_vif);
778 hw->sta_data_size = sizeof(struct ath9k_htc_sta); 773 hw->sta_data_size = sizeof(struct ath9k_htc_sta);
779 774
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 5aa104fe7eeb..7b7796895432 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -26,7 +26,7 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
26{ 26{
27 enum htc_phymode mode; 27 enum htc_phymode mode;
28 28
29 mode = HTC_MODE_AUTO; 29 mode = -EINVAL;
30 30
31 switch (ichan->chanmode) { 31 switch (ichan->chanmode) {
32 case CHANNEL_G: 32 case CHANNEL_G:
@@ -45,6 +45,8 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
45 break; 45 break;
46 } 46 }
47 47
48 WARN_ON(mode < 0);
49
48 return mode; 50 return mode;
49} 51}
50 52
@@ -500,9 +502,6 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
500 tsta.maxampdu = cpu_to_be16(maxampdu); 502 tsta.maxampdu = cpu_to_be16(maxampdu);
501 } 503 }
502 504
503 if (sta && sta->ht_cap.ht_supported)
504 tsta.flags = cpu_to_be16(ATH_HTC_STA_HT);
505
506 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); 505 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
507 if (ret) { 506 if (ret) {
508 if (sta) 507 if (sta)
@@ -582,7 +581,7 @@ int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv,
582 memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target)); 581 memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
583 582
584 tcap.ampdu_limit = cpu_to_be32(0xffff); 583 tcap.ampdu_limit = cpu_to_be32(0xffff);
585 tcap.ampdu_subframes = priv->hw->max_tx_aggregation_subframes; 584 tcap.ampdu_subframes = 0xff;
586 tcap.enable_coex = enable_coex; 585 tcap.enable_coex = enable_coex;
587 tcap.tx_chainmask = priv->ah->caps.tx_chainmask; 586 tcap.tx_chainmask = priv->ah->caps.tx_chainmask;
588 587
@@ -1165,6 +1164,8 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1165 1164
1166 ath9k_htc_set_opmode(priv); 1165 ath9k_htc_set_opmode(priv);
1167 1166
1167 ath9k_htc_set_bssid_mask(priv, vif);
1168
1168 /* 1169 /*
1169 * Stop ANI only if there are no associated station interfaces. 1170 * Stop ANI only if there are no associated station interfaces.
1170 */ 1171 */
@@ -1435,6 +1436,37 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1435 return ret; 1436 return ret;
1436} 1437}
1437 1438
1439static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv)
1440{
1441 struct ath_common *common = ath9k_hw_common(priv->ah);
1442
1443 ath9k_hw_write_associd(priv->ah);
1444 ath_dbg(common, ATH_DBG_CONFIG,
1445 "BSSID: %pM aid: 0x%x\n",
1446 common->curbssid, common->curaid);
1447}
1448
1449static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1450{
1451 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
1452 struct ath_common *common = ath9k_hw_common(priv->ah);
1453 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1454
1455 if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
1456 common->curaid = bss_conf->aid;
1457 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1458 }
1459}
1460
1461static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv)
1462{
1463 if (priv->num_sta_assoc_vif == 1) {
1464 ieee80211_iterate_active_interfaces_atomic(priv->hw,
1465 ath9k_htc_bss_iter, priv);
1466 ath9k_htc_set_bssid(priv);
1467 }
1468}
1469
1438static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, 1470static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1439 struct ieee80211_vif *vif, 1471 struct ieee80211_vif *vif,
1440 struct ieee80211_bss_conf *bss_conf, 1472 struct ieee80211_bss_conf *bss_conf,
@@ -1443,43 +1475,32 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1443 struct ath9k_htc_priv *priv = hw->priv; 1475 struct ath9k_htc_priv *priv = hw->priv;
1444 struct ath_hw *ah = priv->ah; 1476 struct ath_hw *ah = priv->ah;
1445 struct ath_common *common = ath9k_hw_common(ah); 1477 struct ath_common *common = ath9k_hw_common(ah);
1446 bool set_assoc;
1447 1478
1448 mutex_lock(&priv->mutex); 1479 mutex_lock(&priv->mutex);
1449 ath9k_htc_ps_wakeup(priv); 1480 ath9k_htc_ps_wakeup(priv);
1450 1481
1451 /*
1452 * Set the HW AID/BSSID only for the first station interface
1453 * or in IBSS mode.
1454 */
1455 set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) ||
1456 ((priv->ah->opmode == NL80211_IFTYPE_STATION) &&
1457 (priv->num_sta_vif == 1)));
1458
1459
1460 if (changed & BSS_CHANGED_ASSOC) { 1482 if (changed & BSS_CHANGED_ASSOC) {
1461 if (set_assoc) { 1483 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
1462 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 1484 bss_conf->assoc);
1463 bss_conf->assoc);
1464 1485
1465 common->curaid = bss_conf->assoc ? 1486 bss_conf->assoc ?
1466 bss_conf->aid : 0; 1487 priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
1467 1488
1468 if (bss_conf->assoc) 1489 if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
1490 if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
1469 ath9k_htc_start_ani(priv); 1491 ath9k_htc_start_ani(priv);
1470 else 1492 else if (priv->num_sta_assoc_vif == 0)
1471 ath9k_htc_stop_ani(priv); 1493 ath9k_htc_stop_ani(priv);
1472 } 1494 }
1473 } 1495 }
1474 1496
1475 if (changed & BSS_CHANGED_BSSID) { 1497 if (changed & BSS_CHANGED_BSSID) {
1476 if (set_assoc) { 1498 if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
1499 common->curaid = bss_conf->aid;
1477 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1500 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1478 ath9k_hw_write_associd(ah); 1501 ath9k_htc_set_bssid(priv);
1479 1502 } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
1480 ath_dbg(common, ATH_DBG_CONFIG, 1503 ath9k_htc_choose_set_bssid(priv);
1481 "BSSID: %pM aid: 0x%x\n",
1482 common->curbssid, common->curaid);
1483 } 1504 }
1484 } 1505 }
1485 1506
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index a898dac22337..2d81c700e201 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -875,6 +875,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
875 rfilt |= ATH9K_RX_FILTER_CONTROL; 875 rfilt |= ATH9K_RX_FILTER_CONTROL;
876 876
877 if ((ah->opmode == NL80211_IFTYPE_STATION) && 877 if ((ah->opmode == NL80211_IFTYPE_STATION) &&
878 (priv->nvifs <= 1) &&
878 !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC)) 879 !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
879 rfilt |= ATH9K_RX_FILTER_MYBEACON; 880 rfilt |= ATH9K_RX_FILTER_MYBEACON;
880 else 881 else
@@ -888,6 +889,9 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
888 if (priv->rxfilter & FIF_PSPOLL) 889 if (priv->rxfilter & FIF_PSPOLL)
889 rfilt |= ATH9K_RX_FILTER_PSPOLL; 890 rfilt |= ATH9K_RX_FILTER_PSPOLL;
890 891
892 if (priv->nvifs > 1)
893 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
894
891 return rfilt; 895 return rfilt;
892 896
893#undef RX_FILTER_PRESERVE 897#undef RX_FILTER_PRESERVE
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index cee970fdf652..1b90ed8795c3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index 91a5305db95a..e1ffbb6bd636 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 8b8f0445aef8..2f3e07263fcb 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index b75b5dca4e29..72543ce8f616 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 7af2773d2bfc..57435ce62792 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index b172d1509515..45c585a337e9 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index bd6d2b9d736f..c2091f1f4096 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index b60c130917f7..8e848c4d16ba 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 17ebdf1e8b7b..a198ee374b05 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -2332,6 +2332,45 @@ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
2332 return false; 2332 return false;
2333} 2333}
2334 2334
2335int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
2336{
2337 struct ath_softc *sc = hw->priv;
2338 struct ath_hw *ah = sc->sc_ah;
2339 struct ieee80211_vif *vif;
2340 struct ath_vif *avp;
2341 struct ath_buf *bf;
2342 struct ath_tx_status ts;
2343 int status;
2344
2345 vif = sc->beacon.bslot[0];
2346 if (!vif)
2347 return 0;
2348
2349 avp = (void *)vif->drv_priv;
2350 if (!avp->is_bslot_active)
2351 return 0;
2352
2353 if (!sc->beacon.tx_processed) {
2354 tasklet_disable(&sc->bcon_tasklet);
2355
2356 bf = avp->av_bcbuf;
2357 if (!bf || !bf->bf_mpdu)
2358 goto skip;
2359
2360 status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts);
2361 if (status == -EINPROGRESS)
2362 goto skip;
2363
2364 sc->beacon.tx_processed = true;
2365 sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
2366
2367skip:
2368 tasklet_enable(&sc->bcon_tasklet);
2369 }
2370
2371 return sc->beacon.tx_last;
2372}
2373
2335struct ieee80211_ops ath9k_ops = { 2374struct ieee80211_ops ath9k_ops = {
2336 .tx = ath9k_tx, 2375 .tx = ath9k_tx,
2337 .start = ath9k_start, 2376 .start = ath9k_start,
@@ -2356,4 +2395,5 @@ struct ieee80211_ops ath9k_ops = {
2356 .set_coverage_class = ath9k_set_coverage_class, 2395 .set_coverage_class = ath9k_set_coverage_class,
2357 .flush = ath9k_flush, 2396 .flush = ath9k_flush,
2358 .tx_frames_pending = ath9k_tx_frames_pending, 2397 .tx_frames_pending = ath9k_tx_frames_pending,
2398 .tx_last_beacon = ath9k_tx_last_beacon,
2359}; 2399};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 9c65459be100..b8cbfc707213 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 9441bf8ca2fd..8b380305b0fc 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 4ccbf2ddb553..17542214c93f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Video54 Technologies, Inc. 2 * Copyright (c) 2004 Video54 Technologies, Inc.
3 * Copyright (c) 2004-2009 Atheros Communications, Inc. 3 * Copyright (c) 2004-2011 Atheros Communications, Inc.
4 * 4 *
5 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 5d984b8acdb1..c3d850207bee 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Sam Leffler, Errno Consulting 2 * Copyright (c) 2004 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004 Video54 Technologies, Inc. 3 * Copyright (c) 2004 Video54 Technologies, Inc.
4 * Copyright (c) 2008-2009 Atheros Communications Inc. 4 * Copyright (c) 2008-2011 Atheros Communications Inc.
5 * 5 *
6 * Permission to use, copy, modify, and/or distribute this software for any 6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 4f52e0429f99..07e35e59c9e3 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 456f3ec20fef..c18ee9921fb1 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index f9b1eb4853c4..35422fc1f2ce 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 6095eeb6e025..fde6da619f30 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2010 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 97dd1fac98b6..3779b8977d47 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index bb578690935e..4da01a9f5680 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -286,6 +286,10 @@ struct ar9170 {
286 unsigned int tx_seq_table; 286 unsigned int tx_seq_table;
287 } fw; 287 } fw;
288 288
289 /* interface configuration combinations */
290 struct ieee80211_iface_limit if_comb_limits[1];
291 struct ieee80211_iface_combination if_combs[1];
292
289 /* reset / stuck frames/queue detection */ 293 /* reset / stuck frames/queue detection */
290 struct work_struct restart_work; 294 struct work_struct restart_work;
291 struct work_struct ping_work; 295 struct work_struct ping_work;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 9517ede9e2df..221957c5d373 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -151,6 +151,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
151 const struct carl9170fw_chk_desc *chk_desc; 151 const struct carl9170fw_chk_desc *chk_desc;
152 const struct carl9170fw_last_desc *last_desc; 152 const struct carl9170fw_last_desc *last_desc;
153 const struct carl9170fw_txsq_desc *txsq_desc; 153 const struct carl9170fw_txsq_desc *txsq_desc;
154 u16 if_comb_types;
154 155
155 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, 156 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
156 sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); 157 sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -268,6 +269,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
268 if (SUPP(CARL9170FW_WOL)) 269 if (SUPP(CARL9170FW_WOL))
269 device_set_wakeup_enable(&ar->udev->dev, true); 270 device_set_wakeup_enable(&ar->udev->dev, true);
270 271
272 if_comb_types = BIT(NL80211_IFTYPE_STATION) |
273 BIT(NL80211_IFTYPE_P2P_CLIENT);
274
271 ar->fw.vif_num = otus_desc->vif_num; 275 ar->fw.vif_num = otus_desc->vif_num;
272 ar->fw.cmd_bufs = otus_desc->cmd_bufs; 276 ar->fw.cmd_bufs = otus_desc->cmd_bufs;
273 ar->fw.address = le32_to_cpu(otus_desc->fw_address); 277 ar->fw.address = le32_to_cpu(otus_desc->fw_address);
@@ -294,12 +298,25 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
294 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 298 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
295 299
296 if (SUPP(CARL9170FW_WLANTX_CAB)) { 300 if (SUPP(CARL9170FW_WLANTX_CAB)) {
297 ar->hw->wiphy->interface_modes |= 301 if_comb_types |=
298 BIT(NL80211_IFTYPE_AP) | 302 BIT(NL80211_IFTYPE_AP) |
299 BIT(NL80211_IFTYPE_P2P_GO); 303 BIT(NL80211_IFTYPE_P2P_GO);
300 } 304 }
301 } 305 }
302 306
307 ar->if_comb_limits[0].max = ar->fw.vif_num;
308 ar->if_comb_limits[0].types = if_comb_types;
309
310 ar->if_combs[0].num_different_channels = 1;
311 ar->if_combs[0].max_interfaces = ar->fw.vif_num;
312 ar->if_combs[0].limits = ar->if_comb_limits;
313 ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits);
314
315 ar->hw->wiphy->iface_combinations = ar->if_combs;
316 ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs);
317
318 ar->hw->wiphy->interface_modes |= if_comb_types;
319
303 txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, 320 txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
304 sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER); 321 sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
305 322
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 7d5c65ea94e6..54d093c2ab44 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1570,14 +1570,8 @@ void *carl9170_alloc(size_t priv_size)
1570 INIT_LIST_HEAD(&ar->vif_list); 1570 INIT_LIST_HEAD(&ar->vif_list);
1571 init_completion(&ar->tx_flush); 1571 init_completion(&ar->tx_flush);
1572 1572
1573 /* 1573 /* firmware decides which modes we support */
1574 * Note: 1574 hw->wiphy->interface_modes = 0;
1575 * IBSS/ADHOC and AP mode are only enabled, if the firmware
1576 * supports these modes. The code which will add the
1577 * additional interface_modes is in fw.c.
1578 */
1579 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1580 BIT(NL80211_IFTYPE_P2P_CLIENT);
1581 1575
1582 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 1576 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1583 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 1577 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index cc11d66f15bc..3f508e59f146 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -43,7 +43,7 @@
43 * set of ~ ( MAC XOR BSSID ) for all bssids we handle. 43 * set of ~ ( MAC XOR BSSID ) for all bssids we handle.
44 * 44 *
45 * When you do this you are essentially computing the common bits of all your 45 * When you do this you are essentially computing the common bits of all your
46 * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with 46 * BSSes. Later it is assumed the hardware will "and" (&) the BSSID mask with
47 * the MAC address to obtain the relevant bits and compare the result with 47 * the MAC address to obtain the relevant bits and compare the result with
48 * (frame's BSSID & mask) to see if they match. 48 * (frame's BSSID & mask) to see if they match.
49 * 49 *
@@ -71,8 +71,8 @@
71 * On loop iteration for BSSID-02: 71 * On loop iteration for BSSID-02:
72 * bssid_mask &= ~(0001 ^ 1001) 72 * bssid_mask &= ~(0001 ^ 1001)
73 * bssid_mask = (1010) & ~(0001 ^ 1001) 73 * bssid_mask = (1010) & ~(0001 ^ 1001)
74 * bssid_mask = (1010) & ~(1001) 74 * bssid_mask = (1010) & ~(1000)
75 * bssid_mask = (1010) & (0110) 75 * bssid_mask = (1010) & (0111)
76 * bssid_mask = 0010 76 * bssid_mask = 0010
77 * 77 *
78 * A bssid_mask of 0010 means "only pay attention to the second least 78 * A bssid_mask of 0010 means "only pay attention to the second least
@@ -102,11 +102,9 @@
102 * 102 *
103 * IFRAME-02: 0001 (we should allow) 103 * IFRAME-02: 0001 (we should allow)
104 * 104 *
105 * allow = (0001 & 1010) == 1010
106 *
107 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0; 105 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
108 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0; 106 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
109 * --> allow = (0010) == (0010) 107 * --> allow = (0000) == (0000)
110 * --> allow = 1 108 * --> allow = 1
111 * 109 *
112 * Other examples: 110 * Other examples:
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index ebc93c1bb5e7..25a78cfb7d15 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -567,6 +567,8 @@ struct b43_dma {
567 struct b43_dmaring *tx_ring_mcast; /* Multicast */ 567 struct b43_dmaring *tx_ring_mcast; /* Multicast */
568 568
569 struct b43_dmaring *rx_ring; 569 struct b43_dmaring *rx_ring;
570
571 u32 translation; /* Routing bits */
570}; 572};
571 573
572struct b43_pio_txqueue; 574struct b43_pio_txqueue;
@@ -705,7 +707,7 @@ enum {
705 707
706/* Data structure for one wireless device (802.11 core) */ 708/* Data structure for one wireless device (802.11 core) */
707struct b43_wldev { 709struct b43_wldev {
708 struct ssb_device *dev; 710 struct ssb_device *sdev;
709 struct b43_wl *wl; 711 struct b43_wl *wl;
710 712
711 /* The device initialization status. 713 /* The device initialization status.
@@ -879,22 +881,34 @@ static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
879 881
880static inline u16 b43_read16(struct b43_wldev *dev, u16 offset) 882static inline u16 b43_read16(struct b43_wldev *dev, u16 offset)
881{ 883{
882 return ssb_read16(dev->dev, offset); 884 return ssb_read16(dev->sdev, offset);
883} 885}
884 886
885static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value) 887static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value)
886{ 888{
887 ssb_write16(dev->dev, offset, value); 889 ssb_write16(dev->sdev, offset, value);
888} 890}
889 891
890static inline u32 b43_read32(struct b43_wldev *dev, u16 offset) 892static inline u32 b43_read32(struct b43_wldev *dev, u16 offset)
891{ 893{
892 return ssb_read32(dev->dev, offset); 894 return ssb_read32(dev->sdev, offset);
893} 895}
894 896
895static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value) 897static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
896{ 898{
897 ssb_write32(dev->dev, offset, value); 899 ssb_write32(dev->sdev, offset, value);
900}
901
902static inline void b43_block_read(struct b43_wldev *dev, void *buffer,
903 size_t count, u16 offset, u8 reg_width)
904{
905 ssb_block_read(dev->sdev, buffer, count, offset, reg_width);
906}
907
908static inline void b43_block_write(struct b43_wldev *dev, const void *buffer,
909 size_t count, u16 offset, u8 reg_width)
910{
911 ssb_block_write(dev->sdev, buffer, count, offset, reg_width);
898} 912}
899 913
900static inline bool b43_using_pio_transfers(struct b43_wldev *dev) 914static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index ff0f5ba14b2c..47d44bcff37d 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -80,7 +80,7 @@ static void op32_fill_descriptor(struct b43_dmaring *ring,
80 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); 80 addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
81 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) 81 addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
82 >> SSB_DMA_TRANSLATION_SHIFT; 82 >> SSB_DMA_TRANSLATION_SHIFT;
83 addr |= ssb_dma_translation(ring->dev->dev); 83 addr |= ring->dev->dma.translation;
84 ctl = bufsize & B43_DMA32_DCTL_BYTECNT; 84 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
85 if (slot == ring->nr_slots - 1) 85 if (slot == ring->nr_slots - 1)
86 ctl |= B43_DMA32_DCTL_DTABLEEND; 86 ctl |= B43_DMA32_DCTL_DTABLEEND;
@@ -174,7 +174,7 @@ static void op64_fill_descriptor(struct b43_dmaring *ring,
174 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); 174 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
175 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) 175 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
176 >> SSB_DMA_TRANSLATION_SHIFT; 176 >> SSB_DMA_TRANSLATION_SHIFT;
177 addrhi |= (ssb_dma_translation(ring->dev->dev) << 1); 177 addrhi |= (ring->dev->dma.translation << 1);
178 if (slot == ring->nr_slots - 1) 178 if (slot == ring->nr_slots - 1)
179 ctl0 |= B43_DMA64_DCTL0_DTABLEEND; 179 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
180 if (start) 180 if (start)
@@ -333,10 +333,10 @@ static inline
333 dma_addr_t dmaaddr; 333 dma_addr_t dmaaddr;
334 334
335 if (tx) { 335 if (tx) {
336 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 336 dmaaddr = dma_map_single(ring->dev->sdev->dma_dev,
337 buf, len, DMA_TO_DEVICE); 337 buf, len, DMA_TO_DEVICE);
338 } else { 338 } else {
339 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 339 dmaaddr = dma_map_single(ring->dev->sdev->dma_dev,
340 buf, len, DMA_FROM_DEVICE); 340 buf, len, DMA_FROM_DEVICE);
341 } 341 }
342 342
@@ -348,10 +348,10 @@ static inline
348 dma_addr_t addr, size_t len, int tx) 348 dma_addr_t addr, size_t len, int tx)
349{ 349{
350 if (tx) { 350 if (tx) {
351 dma_unmap_single(ring->dev->dev->dma_dev, 351 dma_unmap_single(ring->dev->sdev->dma_dev,
352 addr, len, DMA_TO_DEVICE); 352 addr, len, DMA_TO_DEVICE);
353 } else { 353 } else {
354 dma_unmap_single(ring->dev->dev->dma_dev, 354 dma_unmap_single(ring->dev->sdev->dma_dev,
355 addr, len, DMA_FROM_DEVICE); 355 addr, len, DMA_FROM_DEVICE);
356 } 356 }
357} 357}
@@ -361,7 +361,7 @@ static inline
361 dma_addr_t addr, size_t len) 361 dma_addr_t addr, size_t len)
362{ 362{
363 B43_WARN_ON(ring->tx); 363 B43_WARN_ON(ring->tx);
364 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, 364 dma_sync_single_for_cpu(ring->dev->sdev->dma_dev,
365 addr, len, DMA_FROM_DEVICE); 365 addr, len, DMA_FROM_DEVICE);
366} 366}
367 367
@@ -370,7 +370,7 @@ static inline
370 dma_addr_t addr, size_t len) 370 dma_addr_t addr, size_t len)
371{ 371{
372 B43_WARN_ON(ring->tx); 372 B43_WARN_ON(ring->tx);
373 dma_sync_single_for_device(ring->dev->dev->dma_dev, 373 dma_sync_single_for_device(ring->dev->sdev->dma_dev,
374 addr, len, DMA_FROM_DEVICE); 374 addr, len, DMA_FROM_DEVICE);
375} 375}
376 376
@@ -401,7 +401,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
401 */ 401 */
402 if (ring->type == B43_DMA_64BIT) 402 if (ring->type == B43_DMA_64BIT)
403 flags |= GFP_DMA; 403 flags |= GFP_DMA;
404 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 404 ring->descbase = dma_alloc_coherent(ring->dev->sdev->dma_dev,
405 B43_DMA_RINGMEMSIZE, 405 B43_DMA_RINGMEMSIZE,
406 &(ring->dmabase), flags); 406 &(ring->dmabase), flags);
407 if (!ring->descbase) { 407 if (!ring->descbase) {
@@ -415,7 +415,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
415 415
416static void free_ringmemory(struct b43_dmaring *ring) 416static void free_ringmemory(struct b43_dmaring *ring)
417{ 417{
418 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, 418 dma_free_coherent(ring->dev->sdev->dma_dev, B43_DMA_RINGMEMSIZE,
419 ring->descbase, ring->dmabase); 419 ring->descbase, ring->dmabase);
420} 420}
421 421
@@ -523,7 +523,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
523 dma_addr_t addr, 523 dma_addr_t addr,
524 size_t buffersize, bool dma_to_device) 524 size_t buffersize, bool dma_to_device)
525{ 525{
526 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) 526 if (unlikely(dma_mapping_error(ring->dev->sdev->dma_dev, addr)))
527 return 1; 527 return 1;
528 528
529 switch (ring->type) { 529 switch (ring->type) {
@@ -658,7 +658,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
658 int err = 0; 658 int err = 0;
659 u32 value; 659 u32 value;
660 u32 addrext; 660 u32 addrext;
661 u32 trans = ssb_dma_translation(ring->dev->dev); 661 u32 trans = ring->dev->dma.translation;
662 662
663 if (ring->tx) { 663 if (ring->tx) {
664 if (ring->type == B43_DMA_64BIT) { 664 if (ring->type == B43_DMA_64BIT) {
@@ -869,7 +869,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
869 goto err_kfree_meta; 869 goto err_kfree_meta;
870 870
871 /* test for ability to dma to txhdr_cache */ 871 /* test for ability to dma to txhdr_cache */
872 dma_test = dma_map_single(dev->dev->dma_dev, 872 dma_test = dma_map_single(dev->sdev->dma_dev,
873 ring->txhdr_cache, 873 ring->txhdr_cache,
874 b43_txhdr_size(dev), 874 b43_txhdr_size(dev),
875 DMA_TO_DEVICE); 875 DMA_TO_DEVICE);
@@ -884,7 +884,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
884 if (!ring->txhdr_cache) 884 if (!ring->txhdr_cache)
885 goto err_kfree_meta; 885 goto err_kfree_meta;
886 886
887 dma_test = dma_map_single(dev->dev->dma_dev, 887 dma_test = dma_map_single(dev->sdev->dma_dev,
888 ring->txhdr_cache, 888 ring->txhdr_cache,
889 b43_txhdr_size(dev), 889 b43_txhdr_size(dev),
890 DMA_TO_DEVICE); 890 DMA_TO_DEVICE);
@@ -898,7 +898,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
898 } 898 }
899 } 899 }
900 900
901 dma_unmap_single(dev->dev->dma_dev, 901 dma_unmap_single(dev->sdev->dma_dev,
902 dma_test, b43_txhdr_size(dev), 902 dma_test, b43_txhdr_size(dev),
903 DMA_TO_DEVICE); 903 DMA_TO_DEVICE);
904 } 904 }
@@ -1013,9 +1013,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
1013 /* Try to set the DMA mask. If it fails, try falling back to a 1013 /* Try to set the DMA mask. If it fails, try falling back to a
1014 * lower mask, as we can always also support a lower one. */ 1014 * lower mask, as we can always also support a lower one. */
1015 while (1) { 1015 while (1) {
1016 err = dma_set_mask(dev->dev->dma_dev, mask); 1016 err = dma_set_mask(dev->sdev->dma_dev, mask);
1017 if (!err) { 1017 if (!err) {
1018 err = dma_set_coherent_mask(dev->dev->dma_dev, mask); 1018 err = dma_set_coherent_mask(dev->sdev->dma_dev, mask);
1019 if (!err) 1019 if (!err)
1020 break; 1020 break;
1021 } 1021 }
@@ -1055,6 +1055,7 @@ int b43_dma_init(struct b43_wldev *dev)
1055 err = b43_dma_set_mask(dev, dmamask); 1055 err = b43_dma_set_mask(dev, dmamask);
1056 if (err) 1056 if (err)
1057 return err; 1057 return err;
1058 dma->translation = ssb_dma_translation(dev->sdev);
1058 1059
1059 err = -ENOMEM; 1060 err = -ENOMEM;
1060 /* setup TX DMA channels. */ 1061 /* setup TX DMA channels. */
@@ -1084,7 +1085,7 @@ int b43_dma_init(struct b43_wldev *dev)
1084 goto err_destroy_mcast; 1085 goto err_destroy_mcast;
1085 1086
1086 /* No support for the TX status DMA ring. */ 1087 /* No support for the TX status DMA ring. */
1087 B43_WARN_ON(dev->dev->id.revision < 5); 1088 B43_WARN_ON(dev->sdev->id.revision < 5);
1088 1089
1089 b43dbg(dev->wl, "%u-bit DMA initialized\n", 1090 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1090 (unsigned int)type); 1091 (unsigned int)type);
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index c587115dd2b9..0cafafe368af 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -138,7 +138,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
138 led->led_dev.default_trigger = default_trigger; 138 led->led_dev.default_trigger = default_trigger;
139 led->led_dev.brightness_set = b43_led_brightness_set; 139 led->led_dev.brightness_set = b43_led_brightness_set;
140 140
141 err = led_classdev_register(dev->dev->dev, &led->led_dev); 141 err = led_classdev_register(dev->sdev->dev, &led->led_dev);
142 if (err) { 142 if (err) {
143 b43warn(dev->wl, "LEDs: Failed to register %s\n", name); 143 b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
144 led->wl = NULL; 144 led->wl = NULL;
@@ -215,7 +215,7 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
215 enum b43_led_behaviour *behaviour, 215 enum b43_led_behaviour *behaviour,
216 bool *activelow) 216 bool *activelow)
217{ 217{
218 struct ssb_bus *bus = dev->dev->bus; 218 struct ssb_bus *bus = dev->sdev->bus;
219 u8 sprom[4]; 219 u8 sprom[4];
220 220
221 sprom[0] = bus->sprom.gpio0; 221 sprom[0] = bus->sprom.gpio0;
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 94e4f1378fc3..2ef7d4b38540 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -98,7 +98,7 @@ static u16 lo_measure_feedthrough(struct b43_wldev *dev,
98 rfover |= pga; 98 rfover |= pga;
99 rfover |= lna; 99 rfover |= lna;
100 rfover |= trsw_rx; 100 rfover |= trsw_rx;
101 if ((dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) 101 if ((dev->sdev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA)
102 && phy->rev > 6) 102 && phy->rev > 6)
103 rfover |= B43_PHY_RFOVERVAL_EXTLNA; 103 rfover |= B43_PHY_RFOVERVAL_EXTLNA;
104 104
@@ -387,7 +387,7 @@ struct lo_g_saved_values {
387static void lo_measure_setup(struct b43_wldev *dev, 387static void lo_measure_setup(struct b43_wldev *dev,
388 struct lo_g_saved_values *sav) 388 struct lo_g_saved_values *sav)
389{ 389{
390 struct ssb_sprom *sprom = &dev->dev->bus->sprom; 390 struct ssb_sprom *sprom = &dev->sdev->bus->sprom;
391 struct b43_phy *phy = &dev->phy; 391 struct b43_phy *phy = &dev->phy;
392 struct b43_phy_g *gphy = phy->g; 392 struct b43_phy_g *gphy = phy->g;
393 struct b43_txpower_lo_control *lo = gphy->lo_control; 393 struct b43_txpower_lo_control *lo = gphy->lo_control;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5a43984bdcea..eb4159686985 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -548,7 +548,7 @@ void b43_tsf_read(struct b43_wldev *dev, u64 *tsf)
548{ 548{
549 u32 low, high; 549 u32 low, high;
550 550
551 B43_WARN_ON(dev->dev->id.revision < 3); 551 B43_WARN_ON(dev->sdev->id.revision < 3);
552 552
553 /* The hardware guarantees us an atomic read, if we 553 /* The hardware guarantees us an atomic read, if we
554 * read the low register first. */ 554 * read the low register first. */
@@ -586,7 +586,7 @@ static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf)
586{ 586{
587 u32 low, high; 587 u32 low, high;
588 588
589 B43_WARN_ON(dev->dev->id.revision < 3); 589 B43_WARN_ON(dev->sdev->id.revision < 3);
590 590
591 low = tsf; 591 low = tsf;
592 high = (tsf >> 32); 592 high = (tsf >> 32);
@@ -714,7 +714,7 @@ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
714 b43_ram_write(dev, i * 4, buffer[i]); 714 b43_ram_write(dev, i * 4, buffer[i]);
715 715
716 b43_write16(dev, 0x0568, 0x0000); 716 b43_write16(dev, 0x0568, 0x0000);
717 if (dev->dev->id.revision < 11) 717 if (dev->sdev->id.revision < 11)
718 b43_write16(dev, 0x07C0, 0x0000); 718 b43_write16(dev, 0x07C0, 0x0000);
719 else 719 else
720 b43_write16(dev, 0x07C0, 0x0100); 720 b43_write16(dev, 0x07C0, 0x0100);
@@ -1132,7 +1132,7 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
1132 b43_write32(dev, B43_MMIO_MACCTL, macctl); 1132 b43_write32(dev, B43_MMIO_MACCTL, macctl);
1133 /* Commit write */ 1133 /* Commit write */
1134 b43_read32(dev, B43_MMIO_MACCTL); 1134 b43_read32(dev, B43_MMIO_MACCTL);
1135 if (awake && dev->dev->id.revision >= 5) { 1135 if (awake && dev->sdev->id.revision >= 5) {
1136 /* Wait for the microcode to wake up. */ 1136 /* Wait for the microcode to wake up. */
1137 for (i = 0; i < 100; i++) { 1137 for (i = 0; i < 100; i++) {
1138 ucstat = b43_shm_read16(dev, B43_SHM_SHARED, 1138 ucstat = b43_shm_read16(dev, B43_SHM_SHARED,
@@ -1144,29 +1144,35 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
1144 } 1144 }
1145} 1145}
1146 1146
1147void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) 1147static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, u32 flags)
1148{ 1148{
1149 u32 tmslow; 1149 u32 tmslow;
1150 u32 macctl;
1151 1150
1152 flags |= B43_TMSLOW_PHYCLKEN; 1151 flags |= B43_TMSLOW_PHYCLKEN;
1153 flags |= B43_TMSLOW_PHYRESET; 1152 flags |= B43_TMSLOW_PHYRESET;
1154 if (dev->phy.type == B43_PHYTYPE_N) 1153 if (dev->phy.type == B43_PHYTYPE_N)
1155 flags |= B43_TMSLOW_PHY_BANDWIDTH_20MHZ; /* Make 20 MHz def */ 1154 flags |= B43_TMSLOW_PHY_BANDWIDTH_20MHZ; /* Make 20 MHz def */
1156 ssb_device_enable(dev->dev, flags); 1155 ssb_device_enable(dev->sdev, flags);
1157 msleep(2); /* Wait for the PLL to turn on. */ 1156 msleep(2); /* Wait for the PLL to turn on. */
1158 1157
1159 /* Now take the PHY out of Reset again */ 1158 /* Now take the PHY out of Reset again */
1160 tmslow = ssb_read32(dev->dev, SSB_TMSLOW); 1159 tmslow = ssb_read32(dev->sdev, SSB_TMSLOW);
1161 tmslow |= SSB_TMSLOW_FGC; 1160 tmslow |= SSB_TMSLOW_FGC;
1162 tmslow &= ~B43_TMSLOW_PHYRESET; 1161 tmslow &= ~B43_TMSLOW_PHYRESET;
1163 ssb_write32(dev->dev, SSB_TMSLOW, tmslow); 1162 ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
1164 ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ 1163 ssb_read32(dev->sdev, SSB_TMSLOW); /* flush */
1165 msleep(1); 1164 msleep(1);
1166 tmslow &= ~SSB_TMSLOW_FGC; 1165 tmslow &= ~SSB_TMSLOW_FGC;
1167 ssb_write32(dev->dev, SSB_TMSLOW, tmslow); 1166 ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
1168 ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ 1167 ssb_read32(dev->sdev, SSB_TMSLOW); /* flush */
1169 msleep(1); 1168 msleep(1);
1169}
1170
1171void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
1172{
1173 u32 macctl;
1174
1175 b43_ssb_wireless_core_reset(dev, flags);
1170 1176
1171 /* Turn Analog ON, but only if we already know the PHY-type. 1177 /* Turn Analog ON, but only if we already know the PHY-type.
1172 * This protects against very early setup where we don't know the 1178 * This protects against very early setup where we don't know the
@@ -1215,7 +1221,7 @@ static void drain_txstatus_queue(struct b43_wldev *dev)
1215{ 1221{
1216 u32 dummy; 1222 u32 dummy;
1217 1223
1218 if (dev->dev->id.revision < 5) 1224 if (dev->sdev->id.revision < 5)
1219 return; 1225 return;
1220 /* Read all entries from the microcode TXstatus FIFO 1226 /* Read all entries from the microcode TXstatus FIFO
1221 * and throw them away. 1227 * and throw them away.
@@ -1421,9 +1427,9 @@ u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev,
1421 1427
1422 /* Get the mask of available antennas. */ 1428 /* Get the mask of available antennas. */
1423 if (dev->phy.gmode) 1429 if (dev->phy.gmode)
1424 antenna_mask = dev->dev->bus->sprom.ant_available_bg; 1430 antenna_mask = dev->sdev->bus->sprom.ant_available_bg;
1425 else 1431 else
1426 antenna_mask = dev->dev->bus->sprom.ant_available_a; 1432 antenna_mask = dev->sdev->bus->sprom.ant_available_a;
1427 1433
1428 if (!(antenna_mask & (1 << (antenna_nr - 1)))) { 1434 if (!(antenna_mask & (1 << (antenna_nr - 1)))) {
1429 /* This antenna is not available. Fall back to default. */ 1435 /* This antenna is not available. Fall back to default. */
@@ -1638,7 +1644,7 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
1638 mutex_lock(&wl->mutex); 1644 mutex_lock(&wl->mutex);
1639 dev = wl->current_dev; 1645 dev = wl->current_dev;
1640 if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) { 1646 if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
1641 if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { 1647 if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
1642 /* wl->mutex is enough. */ 1648 /* wl->mutex is enough. */
1643 b43_do_beacon_update_trigger_work(dev); 1649 b43_do_beacon_update_trigger_work(dev);
1644 mmiowb(); 1650 mmiowb();
@@ -1683,7 +1689,7 @@ static void b43_update_templates(struct b43_wl *wl)
1683static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) 1689static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
1684{ 1690{
1685 b43_time_lock(dev); 1691 b43_time_lock(dev);
1686 if (dev->dev->id.revision >= 3) { 1692 if (dev->sdev->id.revision >= 3) {
1687 b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16)); 1693 b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16));
1688 b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10)); 1694 b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10));
1689 } else { 1695 } else {
@@ -2057,7 +2063,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
2057 B43_WARN_ON(1); 2063 B43_WARN_ON(1);
2058 return -ENOSYS; 2064 return -ENOSYS;
2059 } 2065 }
2060 err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev); 2066 err = request_firmware(&blob, ctx->fwname, ctx->dev->sdev->dev);
2061 if (err == -ENOENT) { 2067 if (err == -ENOENT) {
2062 snprintf(ctx->errors[ctx->req_type], 2068 snprintf(ctx->errors[ctx->req_type],
2063 sizeof(ctx->errors[ctx->req_type]), 2069 sizeof(ctx->errors[ctx->req_type]),
@@ -2107,13 +2113,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
2107{ 2113{
2108 struct b43_wldev *dev = ctx->dev; 2114 struct b43_wldev *dev = ctx->dev;
2109 struct b43_firmware *fw = &ctx->dev->fw; 2115 struct b43_firmware *fw = &ctx->dev->fw;
2110 const u8 rev = ctx->dev->dev->id.revision; 2116 const u8 rev = ctx->dev->sdev->id.revision;
2111 const char *filename; 2117 const char *filename;
2112 u32 tmshigh; 2118 u32 tmshigh;
2113 int err; 2119 int err;
2114 2120
2115 /* Get microcode */ 2121 /* Get microcode */
2116 tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH);
2117 if ((rev >= 5) && (rev <= 10)) 2122 if ((rev >= 5) && (rev <= 10))
2118 filename = "ucode5"; 2123 filename = "ucode5";
2119 else if ((rev >= 11) && (rev <= 12)) 2124 else if ((rev >= 11) && (rev <= 12))
@@ -2152,6 +2157,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
2152 switch (dev->phy.type) { 2157 switch (dev->phy.type) {
2153 case B43_PHYTYPE_A: 2158 case B43_PHYTYPE_A:
2154 if ((rev >= 5) && (rev <= 10)) { 2159 if ((rev >= 5) && (rev <= 10)) {
2160 tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
2155 if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) 2161 if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY)
2156 filename = "a0g1initvals5"; 2162 filename = "a0g1initvals5";
2157 else 2163 else
@@ -2196,6 +2202,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
2196 switch (dev->phy.type) { 2202 switch (dev->phy.type) {
2197 case B43_PHYTYPE_A: 2203 case B43_PHYTYPE_A:
2198 if ((rev >= 5) && (rev <= 10)) { 2204 if ((rev >= 5) && (rev <= 10)) {
2205 tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
2199 if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) 2206 if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY)
2200 filename = "a0g1bsinitvals5"; 2207 filename = "a0g1bsinitvals5";
2201 else 2208 else
@@ -2441,7 +2448,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
2441 2448
2442 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u", 2449 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u",
2443 dev->fw.rev, dev->fw.patch); 2450 dev->fw.rev, dev->fw.patch);
2444 wiphy->hw_version = dev->dev->id.coreid; 2451 wiphy->hw_version = dev->sdev->id.coreid;
2445 2452
2446 if (b43_is_old_txhdr_format(dev)) { 2453 if (b43_is_old_txhdr_format(dev)) {
2447 /* We're over the deadline, but we keep support for old fw 2454 /* We're over the deadline, but we keep support for old fw
@@ -2557,10 +2564,20 @@ out:
2557/* Initialize the GPIOs 2564/* Initialize the GPIOs
2558 * http://bcm-specs.sipsolutions.net/GPIO 2565 * http://bcm-specs.sipsolutions.net/GPIO
2559 */ 2566 */
2567static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
2568{
2569 struct ssb_bus *bus = dev->sdev->bus;
2570
2571#ifdef CONFIG_SSB_DRIVER_PCICORE
2572 return (bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev);
2573#else
2574 return bus->chipco.dev;
2575#endif
2576}
2577
2560static int b43_gpio_init(struct b43_wldev *dev) 2578static int b43_gpio_init(struct b43_wldev *dev)
2561{ 2579{
2562 struct ssb_bus *bus = dev->dev->bus; 2580 struct ssb_device *gpiodev;
2563 struct ssb_device *gpiodev, *pcidev = NULL;
2564 u32 mask, set; 2581 u32 mask, set;
2565 2582
2566 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) 2583 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
@@ -2571,7 +2588,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
2571 2588
2572 mask = 0x0000001F; 2589 mask = 0x0000001F;
2573 set = 0x0000000F; 2590 set = 0x0000000F;
2574 if (dev->dev->bus->chip_id == 0x4301) { 2591 if (dev->sdev->bus->chip_id == 0x4301) {
2575 mask |= 0x0060; 2592 mask |= 0x0060;
2576 set |= 0x0060; 2593 set |= 0x0060;
2577 } 2594 }
@@ -2582,25 +2599,21 @@ static int b43_gpio_init(struct b43_wldev *dev)
2582 mask |= 0x0180; 2599 mask |= 0x0180;
2583 set |= 0x0180; 2600 set |= 0x0180;
2584 } 2601 }
2585 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) { 2602 if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) {
2586 b43_write16(dev, B43_MMIO_GPIO_MASK, 2603 b43_write16(dev, B43_MMIO_GPIO_MASK,
2587 b43_read16(dev, B43_MMIO_GPIO_MASK) 2604 b43_read16(dev, B43_MMIO_GPIO_MASK)
2588 | 0x0200); 2605 | 0x0200);
2589 mask |= 0x0200; 2606 mask |= 0x0200;
2590 set |= 0x0200; 2607 set |= 0x0200;
2591 } 2608 }
2592 if (dev->dev->id.revision >= 2) 2609 if (dev->sdev->id.revision >= 2)
2593 mask |= 0x0010; /* FIXME: This is redundant. */ 2610 mask |= 0x0010; /* FIXME: This is redundant. */
2594 2611
2595#ifdef CONFIG_SSB_DRIVER_PCICORE 2612 gpiodev = b43_ssb_gpio_dev(dev);
2596 pcidev = bus->pcicore.dev; 2613 if (gpiodev)
2597#endif 2614 ssb_write32(gpiodev, B43_GPIO_CONTROL,
2598 gpiodev = bus->chipco.dev ? : pcidev; 2615 (ssb_read32(gpiodev, B43_GPIO_CONTROL)
2599 if (!gpiodev) 2616 & mask) | set);
2600 return 0;
2601 ssb_write32(gpiodev, B43_GPIO_CONTROL,
2602 (ssb_read32(gpiodev, B43_GPIO_CONTROL)
2603 & mask) | set);
2604 2617
2605 return 0; 2618 return 0;
2606} 2619}
@@ -2608,16 +2621,11 @@ static int b43_gpio_init(struct b43_wldev *dev)
2608/* Turn off all GPIO stuff. Call this on module unload, for example. */ 2621/* Turn off all GPIO stuff. Call this on module unload, for example. */
2609static void b43_gpio_cleanup(struct b43_wldev *dev) 2622static void b43_gpio_cleanup(struct b43_wldev *dev)
2610{ 2623{
2611 struct ssb_bus *bus = dev->dev->bus; 2624 struct ssb_device *gpiodev;
2612 struct ssb_device *gpiodev, *pcidev = NULL;
2613 2625
2614#ifdef CONFIG_SSB_DRIVER_PCICORE 2626 gpiodev = b43_ssb_gpio_dev(dev);
2615 pcidev = bus->pcicore.dev; 2627 if (gpiodev)
2616#endif 2628 ssb_write32(gpiodev, B43_GPIO_CONTROL, 0);
2617 gpiodev = bus->chipco.dev ? : pcidev;
2618 if (!gpiodev)
2619 return;
2620 ssb_write32(gpiodev, B43_GPIO_CONTROL, 0);
2621} 2629}
2622 2630
2623/* http://bcm-specs.sipsolutions.net/EnableMac */ 2631/* http://bcm-specs.sipsolutions.net/EnableMac */
@@ -2689,12 +2697,12 @@ out:
2689/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */ 2697/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
2690void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on) 2698void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on)
2691{ 2699{
2692 u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW); 2700 u32 tmslow = ssb_read32(dev->sdev, SSB_TMSLOW);
2693 if (on) 2701 if (on)
2694 tmslow |= B43_TMSLOW_MACPHYCLKEN; 2702 tmslow |= B43_TMSLOW_MACPHYCLKEN;
2695 else 2703 else
2696 tmslow &= ~B43_TMSLOW_MACPHYCLKEN; 2704 tmslow &= ~B43_TMSLOW_MACPHYCLKEN;
2697 ssb_write32(dev->dev, SSB_TMSLOW, tmslow); 2705 ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
2698} 2706}
2699 2707
2700static void b43_adjust_opmode(struct b43_wldev *dev) 2708static void b43_adjust_opmode(struct b43_wldev *dev)
@@ -2733,15 +2741,15 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
2733 /* Workaround: On old hardware the HW-MAC-address-filter 2741 /* Workaround: On old hardware the HW-MAC-address-filter
2734 * doesn't work properly, so always run promisc in filter 2742 * doesn't work properly, so always run promisc in filter
2735 * it in software. */ 2743 * it in software. */
2736 if (dev->dev->id.revision <= 4) 2744 if (dev->sdev->id.revision <= 4)
2737 ctl |= B43_MACCTL_PROMISC; 2745 ctl |= B43_MACCTL_PROMISC;
2738 2746
2739 b43_write32(dev, B43_MMIO_MACCTL, ctl); 2747 b43_write32(dev, B43_MMIO_MACCTL, ctl);
2740 2748
2741 cfp_pretbtt = 2; 2749 cfp_pretbtt = 2;
2742 if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) { 2750 if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) {
2743 if (dev->dev->bus->chip_id == 0x4306 && 2751 if (dev->sdev->bus->chip_id == 0x4306 &&
2744 dev->dev->bus->chip_rev == 3) 2752 dev->sdev->bus->chip_rev == 3)
2745 cfp_pretbtt = 100; 2753 cfp_pretbtt = 100;
2746 else 2754 else
2747 cfp_pretbtt = 50; 2755 cfp_pretbtt = 50;
@@ -2899,7 +2907,7 @@ static int b43_chip_init(struct b43_wldev *dev)
2899 b43_write16(dev, 0x005E, value16); 2907 b43_write16(dev, 0x005E, value16);
2900 } 2908 }
2901 b43_write32(dev, 0x0100, 0x01000000); 2909 b43_write32(dev, 0x0100, 0x01000000);
2902 if (dev->dev->id.revision < 5) 2910 if (dev->sdev->id.revision < 5)
2903 b43_write32(dev, 0x010C, 0x01000000); 2911 b43_write32(dev, 0x010C, 0x01000000);
2904 2912
2905 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) 2913 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
@@ -2914,7 +2922,7 @@ static int b43_chip_init(struct b43_wldev *dev)
2914 /* Initially set the wireless operation mode. */ 2922 /* Initially set the wireless operation mode. */
2915 b43_adjust_opmode(dev); 2923 b43_adjust_opmode(dev);
2916 2924
2917 if (dev->dev->id.revision < 3) { 2925 if (dev->sdev->id.revision < 3) {
2918 b43_write16(dev, 0x060E, 0x0000); 2926 b43_write16(dev, 0x060E, 0x0000);
2919 b43_write16(dev, 0x0610, 0x8000); 2927 b43_write16(dev, 0x0610, 0x8000);
2920 b43_write16(dev, 0x0604, 0x0000); 2928 b43_write16(dev, 0x0604, 0x0000);
@@ -2934,7 +2942,7 @@ static int b43_chip_init(struct b43_wldev *dev)
2934 b43_mac_phy_clock_set(dev, true); 2942 b43_mac_phy_clock_set(dev, true);
2935 2943
2936 b43_write16(dev, B43_MMIO_POWERUP_DELAY, 2944 b43_write16(dev, B43_MMIO_POWERUP_DELAY,
2937 dev->dev->bus->chipco.fast_pwrup_delay); 2945 dev->sdev->bus->chipco.fast_pwrup_delay);
2938 2946
2939 err = 0; 2947 err = 0;
2940 b43dbg(dev->wl, "Chip initialized\n"); 2948 b43dbg(dev->wl, "Chip initialized\n");
@@ -3097,7 +3105,7 @@ static int b43_validate_chipaccess(struct b43_wldev *dev)
3097 b43_shm_write32(dev, B43_SHM_SHARED, 0, backup0); 3105 b43_shm_write32(dev, B43_SHM_SHARED, 0, backup0);
3098 b43_shm_write32(dev, B43_SHM_SHARED, 4, backup4); 3106 b43_shm_write32(dev, B43_SHM_SHARED, 4, backup4);
3099 3107
3100 if ((dev->dev->id.revision >= 3) && (dev->dev->id.revision <= 10)) { 3108 if ((dev->sdev->id.revision >= 3) && (dev->sdev->id.revision <= 10)) {
3101 /* The 32bit register shadows the two 16bit registers 3109 /* The 32bit register shadows the two 16bit registers
3102 * with update sideeffects. Validate this. */ 3110 * with update sideeffects. Validate this. */
3103 b43_write16(dev, B43_MMIO_TSF_CFP_START, 0xAAAA); 3111 b43_write16(dev, B43_MMIO_TSF_CFP_START, 0xAAAA);
@@ -3450,7 +3458,7 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
3450 3458
3451static void b43_put_phy_into_reset(struct b43_wldev *dev) 3459static void b43_put_phy_into_reset(struct b43_wldev *dev)
3452{ 3460{
3453 struct ssb_device *sdev = dev->dev; 3461 struct ssb_device *sdev = dev->sdev;
3454 u32 tmslow; 3462 u32 tmslow;
3455 3463
3456 tmslow = ssb_read32(sdev, SSB_TMSLOW); 3464 tmslow = ssb_read32(sdev, SSB_TMSLOW);
@@ -3946,7 +3954,7 @@ redo:
3946 3954
3947 /* Disable interrupts on the device. */ 3955 /* Disable interrupts on the device. */
3948 b43_set_status(dev, B43_STAT_INITIALIZED); 3956 b43_set_status(dev, B43_STAT_INITIALIZED);
3949 if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { 3957 if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
3950 /* wl->mutex is locked. That is enough. */ 3958 /* wl->mutex is locked. That is enough. */
3951 b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); 3959 b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
3952 b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ 3960 b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
@@ -3959,11 +3967,11 @@ redo:
3959 /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */ 3967 /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */
3960 orig_dev = dev; 3968 orig_dev = dev;
3961 mutex_unlock(&wl->mutex); 3969 mutex_unlock(&wl->mutex);
3962 if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { 3970 if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
3963 b43_sdio_free_irq(dev); 3971 b43_sdio_free_irq(dev);
3964 } else { 3972 } else {
3965 synchronize_irq(dev->dev->irq); 3973 synchronize_irq(dev->sdev->irq);
3966 free_irq(dev->dev->irq, dev); 3974 free_irq(dev->sdev->irq, dev);
3967 } 3975 }
3968 mutex_lock(&wl->mutex); 3976 mutex_lock(&wl->mutex);
3969 dev = wl->current_dev; 3977 dev = wl->current_dev;
@@ -3996,18 +4004,19 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
3996 B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED); 4004 B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED);
3997 4005
3998 drain_txstatus_queue(dev); 4006 drain_txstatus_queue(dev);
3999 if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { 4007 if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
4000 err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler); 4008 err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler);
4001 if (err) { 4009 if (err) {
4002 b43err(dev->wl, "Cannot request SDIO IRQ\n"); 4010 b43err(dev->wl, "Cannot request SDIO IRQ\n");
4003 goto out; 4011 goto out;
4004 } 4012 }
4005 } else { 4013 } else {
4006 err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler, 4014 err = request_threaded_irq(dev->sdev->irq, b43_interrupt_handler,
4007 b43_interrupt_thread_handler, 4015 b43_interrupt_thread_handler,
4008 IRQF_SHARED, KBUILD_MODNAME, dev); 4016 IRQF_SHARED, KBUILD_MODNAME, dev);
4009 if (err) { 4017 if (err) {
4010 b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); 4018 b43err(dev->wl, "Cannot request IRQ-%d\n",
4019 dev->sdev->irq);
4011 goto out; 4020 goto out;
4012 } 4021 }
4013 } 4022 }
@@ -4087,10 +4096,10 @@ static int b43_phy_versioning(struct b43_wldev *dev)
4087 analog_type, phy_type, phy_rev); 4096 analog_type, phy_type, phy_rev);
4088 4097
4089 /* Get RADIO versioning */ 4098 /* Get RADIO versioning */
4090 if (dev->dev->bus->chip_id == 0x4317) { 4099 if (dev->sdev->bus->chip_id == 0x4317) {
4091 if (dev->dev->bus->chip_rev == 0) 4100 if (dev->sdev->bus->chip_rev == 0)
4092 tmp = 0x3205017F; 4101 tmp = 0x3205017F;
4093 else if (dev->dev->bus->chip_rev == 1) 4102 else if (dev->sdev->bus->chip_rev == 1)
4094 tmp = 0x4205017F; 4103 tmp = 0x4205017F;
4095 else 4104 else
4096 tmp = 0x5205017F; 4105 tmp = 0x5205017F;
@@ -4195,7 +4204,7 @@ static void setup_struct_wldev_for_init(struct b43_wldev *dev)
4195 4204
4196static void b43_bluetooth_coext_enable(struct b43_wldev *dev) 4205static void b43_bluetooth_coext_enable(struct b43_wldev *dev)
4197{ 4206{
4198 struct ssb_sprom *sprom = &dev->dev->bus->sprom; 4207 struct ssb_sprom *sprom = &dev->sdev->bus->sprom;
4199 u64 hf; 4208 u64 hf;
4200 4209
4201 if (!modparam_btcoex) 4210 if (!modparam_btcoex)
@@ -4222,16 +4231,16 @@ static void b43_bluetooth_coext_disable(struct b43_wldev *dev)
4222 4231
4223static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev) 4232static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
4224{ 4233{
4225 struct ssb_bus *bus = dev->dev->bus; 4234 struct ssb_bus *bus = dev->sdev->bus;
4226 u32 tmp; 4235 u32 tmp;
4227 4236
4228 if ((bus->chip_id == 0x4311 && bus->chip_rev == 2) || 4237 if ((bus->chip_id == 0x4311 && bus->chip_rev == 2) ||
4229 (bus->chip_id == 0x4312)) { 4238 (bus->chip_id == 0x4312)) {
4230 tmp = ssb_read32(dev->dev, SSB_IMCFGLO); 4239 tmp = ssb_read32(dev->sdev, SSB_IMCFGLO);
4231 tmp &= ~SSB_IMCFGLO_REQTO; 4240 tmp &= ~SSB_IMCFGLO_REQTO;
4232 tmp &= ~SSB_IMCFGLO_SERTO; 4241 tmp &= ~SSB_IMCFGLO_SERTO;
4233 tmp |= 0x3; 4242 tmp |= 0x3;
4234 ssb_write32(dev->dev, SSB_IMCFGLO, tmp); 4243 ssb_write32(dev->sdev, SSB_IMCFGLO, tmp);
4235 ssb_commit_settings(bus); 4244 ssb_commit_settings(bus);
4236 } 4245 }
4237} 4246}
@@ -4301,14 +4310,14 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
4301 dev->wl->current_beacon = NULL; 4310 dev->wl->current_beacon = NULL;
4302 } 4311 }
4303 4312
4304 ssb_device_disable(dev->dev, 0); 4313 ssb_device_disable(dev->sdev, 0);
4305 ssb_bus_may_powerdown(dev->dev->bus); 4314 ssb_bus_may_powerdown(dev->sdev->bus);
4306} 4315}
4307 4316
4308/* Initialize a wireless core */ 4317/* Initialize a wireless core */
4309static int b43_wireless_core_init(struct b43_wldev *dev) 4318static int b43_wireless_core_init(struct b43_wldev *dev)
4310{ 4319{
4311 struct ssb_bus *bus = dev->dev->bus; 4320 struct ssb_bus *bus = dev->sdev->bus;
4312 struct ssb_sprom *sprom = &bus->sprom; 4321 struct ssb_sprom *sprom = &bus->sprom;
4313 struct b43_phy *phy = &dev->phy; 4322 struct b43_phy *phy = &dev->phy;
4314 int err; 4323 int err;
@@ -4320,7 +4329,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4320 err = ssb_bus_powerup(bus, 0); 4329 err = ssb_bus_powerup(bus, 0);
4321 if (err) 4330 if (err)
4322 goto out; 4331 goto out;
4323 if (!ssb_device_is_enabled(dev->dev)) { 4332 if (!ssb_device_is_enabled(dev->sdev)) {
4324 tmp = phy->gmode ? B43_TMSLOW_GMODE : 0; 4333 tmp = phy->gmode ? B43_TMSLOW_GMODE : 0;
4325 b43_wireless_core_reset(dev, tmp); 4334 b43_wireless_core_reset(dev, tmp);
4326 } 4335 }
@@ -4330,7 +4339,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4330 phy->ops->prepare_structs(dev); 4339 phy->ops->prepare_structs(dev);
4331 4340
4332 /* Enable IRQ routing to this device. */ 4341 /* Enable IRQ routing to this device. */
4333 ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); 4342 ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->sdev);
4334 4343
4335 b43_imcfglo_timeouts_workaround(dev); 4344 b43_imcfglo_timeouts_workaround(dev);
4336 b43_bluetooth_coext_disable(dev); 4345 b43_bluetooth_coext_disable(dev);
@@ -4343,7 +4352,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4343 if (err) 4352 if (err)
4344 goto err_busdown; 4353 goto err_busdown;
4345 b43_shm_write16(dev, B43_SHM_SHARED, 4354 b43_shm_write16(dev, B43_SHM_SHARED,
4346 B43_SHM_SH_WLCOREREV, dev->dev->id.revision); 4355 B43_SHM_SH_WLCOREREV, dev->sdev->id.revision);
4347 hf = b43_hf_read(dev); 4356 hf = b43_hf_read(dev);
4348 if (phy->type == B43_PHYTYPE_G) { 4357 if (phy->type == B43_PHYTYPE_G) {
4349 hf |= B43_HF_SYMW; 4358 hf |= B43_HF_SYMW;
@@ -4390,8 +4399,8 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4390 /* Maximum Contention Window */ 4399 /* Maximum Contention Window */
4391 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); 4400 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
4392 4401
4393 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || 4402 if ((dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
4394 (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || 4403 (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) ||
4395 dev->use_pio) { 4404 dev->use_pio) {
4396 dev->__using_pio_transfers = 1; 4405 dev->__using_pio_transfers = 1;
4397 err = b43_pio_init(dev); 4406 err = b43_pio_init(dev);
@@ -4728,7 +4737,7 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
4728static int b43_wireless_core_attach(struct b43_wldev *dev) 4737static int b43_wireless_core_attach(struct b43_wldev *dev)
4729{ 4738{
4730 struct b43_wl *wl = dev->wl; 4739 struct b43_wl *wl = dev->wl;
4731 struct ssb_bus *bus = dev->dev->bus; 4740 struct ssb_bus *bus = dev->sdev->bus;
4732 struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL; 4741 struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
4733 int err; 4742 int err;
4734 bool have_2ghz_phy = 0, have_5ghz_phy = 0; 4743 bool have_2ghz_phy = 0, have_5ghz_phy = 0;
@@ -4747,10 +4756,10 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4747 goto out; 4756 goto out;
4748 } 4757 }
4749 /* Get the PHY type. */ 4758 /* Get the PHY type. */
4750 if (dev->dev->id.revision >= 5) { 4759 if (dev->sdev->id.revision >= 5) {
4751 u32 tmshigh; 4760 u32 tmshigh;
4752 4761
4753 tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); 4762 tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
4754 have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY); 4763 have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY);
4755 have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY); 4764 have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY);
4756 } else 4765 } else
@@ -4823,7 +4832,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4823 INIT_WORK(&dev->restart_work, b43_chip_reset); 4832 INIT_WORK(&dev->restart_work, b43_chip_reset);
4824 4833
4825 dev->phy.ops->switch_analog(dev, 0); 4834 dev->phy.ops->switch_analog(dev, 0);
4826 ssb_device_disable(dev->dev, 0); 4835 ssb_device_disable(dev->sdev, 0);
4827 ssb_bus_may_powerdown(bus); 4836 ssb_bus_may_powerdown(bus);
4828 4837
4829out: 4838out:
@@ -4864,7 +4873,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
4864 goto out; 4873 goto out;
4865 4874
4866 wldev->use_pio = b43_modparam_pio; 4875 wldev->use_pio = b43_modparam_pio;
4867 wldev->dev = dev; 4876 wldev->sdev = dev;
4868 wldev->wl = wl; 4877 wldev->wl = wl;
4869 b43_set_status(wldev, B43_STAT_UNINIT); 4878 b43_set_status(wldev, B43_STAT_UNINIT);
4870 wldev->bad_frames_preempt = modparam_bad_frames_preempt; 4879 wldev->bad_frames_preempt = modparam_bad_frames_preempt;
@@ -4925,19 +4934,16 @@ static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl)
4925 ieee80211_free_hw(hw); 4934 ieee80211_free_hw(hw);
4926} 4935}
4927 4936
4928static int b43_wireless_init(struct ssb_device *dev) 4937static struct b43_wl *b43_wireless_init(struct ssb_device *dev)
4929{ 4938{
4930 struct ssb_sprom *sprom = &dev->bus->sprom; 4939 struct ssb_sprom *sprom = &dev->bus->sprom;
4931 struct ieee80211_hw *hw; 4940 struct ieee80211_hw *hw;
4932 struct b43_wl *wl; 4941 struct b43_wl *wl;
4933 int err = -ENOMEM;
4934
4935 b43_sprom_fixup(dev->bus);
4936 4942
4937 hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); 4943 hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops);
4938 if (!hw) { 4944 if (!hw) {
4939 b43err(NULL, "Could not allocate ieee80211 device\n"); 4945 b43err(NULL, "Could not allocate ieee80211 device\n");
4940 goto out; 4946 return ERR_PTR(-ENOMEM);
4941 } 4947 }
4942 wl = hw_to_b43_wl(hw); 4948 wl = hw_to_b43_wl(hw);
4943 4949
@@ -4971,12 +4977,9 @@ static int b43_wireless_init(struct ssb_device *dev)
4971 INIT_WORK(&wl->tx_work, b43_tx_work); 4977 INIT_WORK(&wl->tx_work, b43_tx_work);
4972 skb_queue_head_init(&wl->tx_queue); 4978 skb_queue_head_init(&wl->tx_queue);
4973 4979
4974 ssb_set_devtypedata(dev, wl);
4975 b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n", 4980 b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n",
4976 dev->bus->chip_id, dev->id.revision); 4981 dev->bus->chip_id, dev->id.revision);
4977 err = 0; 4982 return wl;
4978out:
4979 return err;
4980} 4983}
4981 4984
4982static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id) 4985static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id)
@@ -4989,11 +4992,14 @@ static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id)
4989 if (!wl) { 4992 if (!wl) {
4990 /* Probing the first core. Must setup common struct b43_wl */ 4993 /* Probing the first core. Must setup common struct b43_wl */
4991 first = 1; 4994 first = 1;
4992 err = b43_wireless_init(dev); 4995 b43_sprom_fixup(dev->bus);
4993 if (err) 4996 wl = b43_wireless_init(dev);
4997 if (IS_ERR(wl)) {
4998 err = PTR_ERR(wl);
4994 goto out; 4999 goto out;
4995 wl = ssb_get_devtypedata(dev); 5000 }
4996 B43_WARN_ON(!wl); 5001 ssb_set_devtypedata(dev, wl);
5002 B43_WARN_ON(ssb_get_devtypedata(dev) != wl);
4997 } 5003 }
4998 err = b43_one_core_attach(dev, wl); 5004 err = b43_one_core_attach(dev, wl);
4999 if (err) 5005 if (err)
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
index b6428ec16dd6..b01c8ced57c3 100644
--- a/drivers/net/wireless/b43/phy_a.c
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -265,7 +265,7 @@ static void hardware_pctl_init_aphy(struct b43_wldev *dev)
265 265
266void b43_phy_inita(struct b43_wldev *dev) 266void b43_phy_inita(struct b43_wldev *dev)
267{ 267{
268 struct ssb_bus *bus = dev->dev->bus; 268 struct ssb_bus *bus = dev->sdev->bus;
269 struct b43_phy *phy = &dev->phy; 269 struct b43_phy *phy = &dev->phy;
270 270
271 /* This lowlevel A-PHY init is also called from G-PHY init. 271 /* This lowlevel A-PHY init is also called from G-PHY init.
@@ -311,7 +311,7 @@ void b43_phy_inita(struct b43_wldev *dev)
311 } 311 }
312 312
313 if ((phy->type == B43_PHYTYPE_G) && 313 if ((phy->type == B43_PHYTYPE_G) &&
314 (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) { 314 (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
315 b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF); 315 b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF);
316 } 316 }
317} 317}
@@ -323,17 +323,17 @@ static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev)
323 struct b43_phy_a *aphy = phy->a; 323 struct b43_phy_a *aphy = phy->a;
324 s16 pab0, pab1, pab2; 324 s16 pab0, pab1, pab2;
325 325
326 pab0 = (s16) (dev->dev->bus->sprom.pa1b0); 326 pab0 = (s16) (dev->sdev->bus->sprom.pa1b0);
327 pab1 = (s16) (dev->dev->bus->sprom.pa1b1); 327 pab1 = (s16) (dev->sdev->bus->sprom.pa1b1);
328 pab2 = (s16) (dev->dev->bus->sprom.pa1b2); 328 pab2 = (s16) (dev->sdev->bus->sprom.pa1b2);
329 329
330 if (pab0 != 0 && pab1 != 0 && pab2 != 0 && 330 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
331 pab0 != -1 && pab1 != -1 && pab2 != -1) { 331 pab0 != -1 && pab1 != -1 && pab2 != -1) {
332 /* The pabX values are set in SPROM. Use them. */ 332 /* The pabX values are set in SPROM. Use them. */
333 if ((s8) dev->dev->bus->sprom.itssi_a != 0 && 333 if ((s8) dev->sdev->bus->sprom.itssi_a != 0 &&
334 (s8) dev->dev->bus->sprom.itssi_a != -1) 334 (s8) dev->sdev->bus->sprom.itssi_a != -1)
335 aphy->tgt_idle_tssi = 335 aphy->tgt_idle_tssi =
336 (s8) (dev->dev->bus->sprom.itssi_a); 336 (s8) (dev->sdev->bus->sprom.itssi_a);
337 else 337 else
338 aphy->tgt_idle_tssi = 62; 338 aphy->tgt_idle_tssi = 62;
339 aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, 339 aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index b5c5ce94d3fd..e46b2f4f0920 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -168,7 +168,7 @@ void b43_phy_lock(struct b43_wldev *dev)
168 B43_WARN_ON(dev->phy.phy_locked); 168 B43_WARN_ON(dev->phy.phy_locked);
169 dev->phy.phy_locked = 1; 169 dev->phy.phy_locked = 1;
170#endif 170#endif
171 B43_WARN_ON(dev->dev->id.revision < 3); 171 B43_WARN_ON(dev->sdev->id.revision < 3);
172 172
173 if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) 173 if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
174 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); 174 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
@@ -180,7 +180,7 @@ void b43_phy_unlock(struct b43_wldev *dev)
180 B43_WARN_ON(!dev->phy.phy_locked); 180 B43_WARN_ON(!dev->phy.phy_locked);
181 dev->phy.phy_locked = 0; 181 dev->phy.phy_locked = 0;
182#endif 182#endif
183 B43_WARN_ON(dev->dev->id.revision < 3); 183 B43_WARN_ON(dev->sdev->id.revision < 3);
184 184
185 if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) 185 if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
186 b43_power_saving_ctl_bits(dev, 0); 186 b43_power_saving_ctl_bits(dev, 0);
@@ -368,8 +368,8 @@ void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags)
368 /* The next check will be needed in two seconds, or later. */ 368 /* The next check will be needed in two seconds, or later. */
369 phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2)); 369 phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2));
370 370
371 if ((dev->dev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && 371 if ((dev->sdev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
372 (dev->dev->bus->boardinfo.type == SSB_BOARD_BU4306)) 372 (dev->sdev->bus->boardinfo.type == SSB_BOARD_BU4306))
373 return; /* No software txpower adjustment needed */ 373 return; /* No software txpower adjustment needed */
374 374
375 result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI)); 375 result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI));
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index be4828167012..1758a282f913 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -718,7 +718,7 @@ static void b43_calc_nrssi_threshold(struct b43_wldev *dev)
718 B43_WARN_ON(phy->type != B43_PHYTYPE_G); 718 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
719 719
720 if (!phy->gmode || 720 if (!phy->gmode ||
721 !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { 721 !(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
722 tmp16 = b43_nrssi_hw_read(dev, 0x20); 722 tmp16 = b43_nrssi_hw_read(dev, 0x20);
723 if (tmp16 >= 0x20) 723 if (tmp16 >= 0x20)
724 tmp16 -= 0x40; 724 tmp16 -= 0x40;
@@ -1114,7 +1114,7 @@ static u16 radio2050_rfover_val(struct b43_wldev *dev,
1114{ 1114{
1115 struct b43_phy *phy = &dev->phy; 1115 struct b43_phy *phy = &dev->phy;
1116 struct b43_phy_g *gphy = phy->g; 1116 struct b43_phy_g *gphy = phy->g;
1117 struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 1117 struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
1118 1118
1119 if (!phy->gmode) 1119 if (!phy->gmode)
1120 return 0; 1120 return 0;
@@ -1491,7 +1491,7 @@ static u16 b43_radio_init2050(struct b43_wldev *dev)
1491 1491
1492static void b43_phy_initb5(struct b43_wldev *dev) 1492static void b43_phy_initb5(struct b43_wldev *dev)
1493{ 1493{
1494 struct ssb_bus *bus = dev->dev->bus; 1494 struct ssb_bus *bus = dev->sdev->bus;
1495 struct b43_phy *phy = &dev->phy; 1495 struct b43_phy *phy = &dev->phy;
1496 struct b43_phy_g *gphy = phy->g; 1496 struct b43_phy_g *gphy = phy->g;
1497 u16 offset, value; 1497 u16 offset, value;
@@ -1620,7 +1620,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
1620 b43_radio_write16(dev, 0x5A, 0x88); 1620 b43_radio_write16(dev, 0x5A, 0x88);
1621 b43_radio_write16(dev, 0x5B, 0x6B); 1621 b43_radio_write16(dev, 0x5B, 0x6B);
1622 b43_radio_write16(dev, 0x5C, 0x0F); 1622 b43_radio_write16(dev, 0x5C, 0x0F);
1623 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) { 1623 if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
1624 b43_radio_write16(dev, 0x5D, 0xFA); 1624 b43_radio_write16(dev, 0x5D, 0xFA);
1625 b43_radio_write16(dev, 0x5E, 0xD8); 1625 b43_radio_write16(dev, 0x5E, 0xD8);
1626 } else { 1626 } else {
@@ -1787,7 +1787,7 @@ static void b43_calc_loopback_gain(struct b43_wldev *dev)
1787 b43_phy_set(dev, B43_PHY_RFOVER, 0x0100); 1787 b43_phy_set(dev, B43_PHY_RFOVER, 0x0100);
1788 b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xCFFF); 1788 b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xCFFF);
1789 1789
1790 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) { 1790 if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
1791 if (phy->rev >= 7) { 1791 if (phy->rev >= 7) {
1792 b43_phy_set(dev, B43_PHY_RFOVER, 0x0800); 1792 b43_phy_set(dev, B43_PHY_RFOVER, 0x0800);
1793 b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x8000); 1793 b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x8000);
@@ -1922,7 +1922,7 @@ static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev)
1922/* Initialize B/G PHY power control */ 1922/* Initialize B/G PHY power control */
1923static void b43_phy_init_pctl(struct b43_wldev *dev) 1923static void b43_phy_init_pctl(struct b43_wldev *dev)
1924{ 1924{
1925 struct ssb_bus *bus = dev->dev->bus; 1925 struct ssb_bus *bus = dev->sdev->bus;
1926 struct b43_phy *phy = &dev->phy; 1926 struct b43_phy *phy = &dev->phy;
1927 struct b43_phy_g *gphy = phy->g; 1927 struct b43_phy_g *gphy = phy->g;
1928 struct b43_rfatt old_rfatt; 1928 struct b43_rfatt old_rfatt;
@@ -2053,7 +2053,7 @@ static void b43_phy_initg(struct b43_wldev *dev)
2053 if (phy->rev >= 6) { 2053 if (phy->rev >= 6) {
2054 b43_phy_maskset(dev, B43_PHY_CCK(0x36), 0x0FFF, (gphy->lo_control->tx_bias << 12)); 2054 b43_phy_maskset(dev, B43_PHY_CCK(0x36), 0x0FFF, (gphy->lo_control->tx_bias << 12));
2055 } 2055 }
2056 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) 2056 if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
2057 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075); 2057 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
2058 else 2058 else
2059 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F); 2059 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
@@ -2066,7 +2066,7 @@ static void b43_phy_initg(struct b43_wldev *dev)
2066 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); 2066 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
2067 } 2067 }
2068 2068
2069 if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { 2069 if (!(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
2070 /* The specs state to update the NRSSI LT with 2070 /* The specs state to update the NRSSI LT with
2071 * the value 0x7FFFFFFF here. I think that is some weird 2071 * the value 0x7FFFFFFF here. I think that is some weird
2072 * compiler optimization in the original driver. 2072 * compiler optimization in the original driver.
@@ -2088,8 +2088,8 @@ static void b43_phy_initg(struct b43_wldev *dev)
2088 /* FIXME: The spec says in the following if, the 0 should be replaced 2088 /* FIXME: The spec says in the following if, the 0 should be replaced
2089 'if OFDM may not be used in the current locale' 2089 'if OFDM may not be used in the current locale'
2090 but OFDM is legal everywhere */ 2090 but OFDM is legal everywhere */
2091 if ((dev->dev->bus->chip_id == 0x4306 2091 if ((dev->sdev->bus->chip_id == 0x4306
2092 && dev->dev->bus->chip_package == 2) || 0) { 2092 && dev->sdev->bus->chip_package == 2) || 0) {
2093 b43_phy_mask(dev, B43_PHY_CRS0, 0xBFFF); 2093 b43_phy_mask(dev, B43_PHY_CRS0, 0xBFFF);
2094 b43_phy_mask(dev, B43_PHY_OFDM(0xC3), 0x7FFF); 2094 b43_phy_mask(dev, B43_PHY_OFDM(0xC3), 0x7FFF);
2095 } 2095 }
@@ -2105,7 +2105,7 @@ void b43_gphy_channel_switch(struct b43_wldev *dev,
2105 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); 2105 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
2106 2106
2107 if (channel == 14) { 2107 if (channel == 14) {
2108 if (dev->dev->bus->sprom.country_code == 2108 if (dev->sdev->bus->sprom.country_code ==
2109 SSB_SPROM1CCODE_JAPAN) 2109 SSB_SPROM1CCODE_JAPAN)
2110 b43_hf_write(dev, 2110 b43_hf_write(dev,
2111 b43_hf_read(dev) & ~B43_HF_ACPR); 2111 b43_hf_read(dev) & ~B43_HF_ACPR);
@@ -2136,7 +2136,7 @@ static void default_baseband_attenuation(struct b43_wldev *dev,
2136static void default_radio_attenuation(struct b43_wldev *dev, 2136static void default_radio_attenuation(struct b43_wldev *dev,
2137 struct b43_rfatt *rf) 2137 struct b43_rfatt *rf)
2138{ 2138{
2139 struct ssb_bus *bus = dev->dev->bus; 2139 struct ssb_bus *bus = dev->sdev->bus;
2140 struct b43_phy *phy = &dev->phy; 2140 struct b43_phy *phy = &dev->phy;
2141 2141
2142 rf->with_padmix = 0; 2142 rf->with_padmix = 0;
@@ -2384,11 +2384,11 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
2384 struct b43_phy_g *gphy = phy->g; 2384 struct b43_phy_g *gphy = phy->g;
2385 s16 pab0, pab1, pab2; 2385 s16 pab0, pab1, pab2;
2386 2386
2387 pab0 = (s16) (dev->dev->bus->sprom.pa0b0); 2387 pab0 = (s16) (dev->sdev->bus->sprom.pa0b0);
2388 pab1 = (s16) (dev->dev->bus->sprom.pa0b1); 2388 pab1 = (s16) (dev->sdev->bus->sprom.pa0b1);
2389 pab2 = (s16) (dev->dev->bus->sprom.pa0b2); 2389 pab2 = (s16) (dev->sdev->bus->sprom.pa0b2);
2390 2390
2391 B43_WARN_ON((dev->dev->bus->chip_id == 0x4301) && 2391 B43_WARN_ON((dev->sdev->bus->chip_id == 0x4301) &&
2392 (phy->radio_ver != 0x2050)); /* Not supported anymore */ 2392 (phy->radio_ver != 0x2050)); /* Not supported anymore */
2393 2393
2394 gphy->dyn_tssi_tbl = 0; 2394 gphy->dyn_tssi_tbl = 0;
@@ -2396,10 +2396,10 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
2396 if (pab0 != 0 && pab1 != 0 && pab2 != 0 && 2396 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
2397 pab0 != -1 && pab1 != -1 && pab2 != -1) { 2397 pab0 != -1 && pab1 != -1 && pab2 != -1) {
2398 /* The pabX values are set in SPROM. Use them. */ 2398 /* The pabX values are set in SPROM. Use them. */
2399 if ((s8) dev->dev->bus->sprom.itssi_bg != 0 && 2399 if ((s8) dev->sdev->bus->sprom.itssi_bg != 0 &&
2400 (s8) dev->dev->bus->sprom.itssi_bg != -1) { 2400 (s8) dev->sdev->bus->sprom.itssi_bg != -1) {
2401 gphy->tgt_idle_tssi = 2401 gphy->tgt_idle_tssi =
2402 (s8) (dev->dev->bus->sprom.itssi_bg); 2402 (s8) (dev->sdev->bus->sprom.itssi_bg);
2403 } else 2403 } else
2404 gphy->tgt_idle_tssi = 62; 2404 gphy->tgt_idle_tssi = 62;
2405 gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, 2405 gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
@@ -2840,7 +2840,7 @@ static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
2840 B43_TXCTL_TXMIX; 2840 B43_TXCTL_TXMIX;
2841 rfatt += 2; 2841 rfatt += 2;
2842 bbatt += 2; 2842 bbatt += 2;
2843 } else if (dev->dev->bus->sprom. 2843 } else if (dev->sdev->bus->sprom.
2844 boardflags_lo & 2844 boardflags_lo &
2845 B43_BFL_PACTRL) { 2845 B43_BFL_PACTRL) {
2846 bbatt += 4 * (rfatt - 2); 2846 bbatt += 4 * (rfatt - 2);
@@ -2914,14 +2914,14 @@ static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev,
2914 estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi); 2914 estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi);
2915 2915
2916 B43_WARN_ON(phy->type != B43_PHYTYPE_G); 2916 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2917 max_pwr = dev->dev->bus->sprom.maxpwr_bg; 2917 max_pwr = dev->sdev->bus->sprom.maxpwr_bg;
2918 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) 2918 if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
2919 max_pwr -= 3; /* minus 0.75 */ 2919 max_pwr -= 3; /* minus 0.75 */
2920 if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) { 2920 if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) {
2921 b43warn(dev->wl, 2921 b43warn(dev->wl,
2922 "Invalid max-TX-power value in SPROM.\n"); 2922 "Invalid max-TX-power value in SPROM.\n");
2923 max_pwr = INT_TO_Q52(20); /* fake it */ 2923 max_pwr = INT_TO_Q52(20); /* fake it */
2924 dev->dev->bus->sprom.maxpwr_bg = max_pwr; 2924 dev->sdev->bus->sprom.maxpwr_bg = max_pwr;
2925 } 2925 }
2926 2926
2927 /* Get desired power (in Q5.2) */ 2927 /* Get desired power (in Q5.2) */
@@ -3014,7 +3014,7 @@ static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev)
3014{ 3014{
3015 struct b43_phy *phy = &dev->phy; 3015 struct b43_phy *phy = &dev->phy;
3016 3016
3017 if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) 3017 if (!(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI))
3018 return; 3018 return;
3019 3019
3020 b43_mac_suspend(dev); 3020 b43_mac_suspend(dev);
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index fd50eb116243..012c8da2f944 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -86,7 +86,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
86static void lpphy_read_band_sprom(struct b43_wldev *dev) 86static void lpphy_read_band_sprom(struct b43_wldev *dev)
87{ 87{
88 struct b43_phy_lp *lpphy = dev->phy.lp; 88 struct b43_phy_lp *lpphy = dev->phy.lp;
89 struct ssb_bus *bus = dev->dev->bus; 89 struct ssb_bus *bus = dev->sdev->bus;
90 u16 cckpo, maxpwr; 90 u16 cckpo, maxpwr;
91 u32 ofdmpo; 91 u32 ofdmpo;
92 int i; 92 int i;
@@ -214,7 +214,7 @@ static void lpphy_table_init(struct b43_wldev *dev)
214 214
215static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev) 215static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
216{ 216{
217 struct ssb_bus *bus = dev->dev->bus; 217 struct ssb_bus *bus = dev->sdev->bus;
218 struct b43_phy_lp *lpphy = dev->phy.lp; 218 struct b43_phy_lp *lpphy = dev->phy.lp;
219 u16 tmp, tmp2; 219 u16 tmp, tmp2;
220 220
@@ -412,7 +412,7 @@ static void lpphy_restore_dig_flt_state(struct b43_wldev *dev)
412 412
413static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev) 413static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
414{ 414{
415 struct ssb_bus *bus = dev->dev->bus; 415 struct ssb_bus *bus = dev->sdev->bus;
416 struct b43_phy_lp *lpphy = dev->phy.lp; 416 struct b43_phy_lp *lpphy = dev->phy.lp;
417 417
418 b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50); 418 b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50);
@@ -519,7 +519,7 @@ struct b2062_freqdata {
519static void lpphy_2062_init(struct b43_wldev *dev) 519static void lpphy_2062_init(struct b43_wldev *dev)
520{ 520{
521 struct b43_phy_lp *lpphy = dev->phy.lp; 521 struct b43_phy_lp *lpphy = dev->phy.lp;
522 struct ssb_bus *bus = dev->dev->bus; 522 struct ssb_bus *bus = dev->sdev->bus;
523 u32 crystalfreq, tmp, ref; 523 u32 crystalfreq, tmp, ref;
524 unsigned int i; 524 unsigned int i;
525 const struct b2062_freqdata *fd = NULL; 525 const struct b2062_freqdata *fd = NULL;
@@ -697,7 +697,7 @@ static void lpphy_radio_init(struct b43_wldev *dev)
697 lpphy_sync_stx(dev); 697 lpphy_sync_stx(dev);
698 b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80); 698 b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80);
699 b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0); 699 b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0);
700 if (dev->dev->bus->chip_id == 0x4325) { 700 if (dev->sdev->bus->chip_id == 0x4325) {
701 // TODO SSB PMU recalibration 701 // TODO SSB PMU recalibration
702 } 702 }
703 } 703 }
@@ -1289,7 +1289,7 @@ finish:
1289 1289
1290static void lpphy_rev2plus_rc_calib(struct b43_wldev *dev) 1290static void lpphy_rev2plus_rc_calib(struct b43_wldev *dev)
1291{ 1291{
1292 struct ssb_bus *bus = dev->dev->bus; 1292 struct ssb_bus *bus = dev->sdev->bus;
1293 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; 1293 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
1294 u8 tmp = b43_radio_read(dev, B2063_RX_BB_SP8) & 0xFF; 1294 u8 tmp = b43_radio_read(dev, B2063_RX_BB_SP8) & 0xFF;
1295 int i; 1295 int i;
@@ -1840,7 +1840,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
1840static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) 1840static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
1841{ 1841{
1842 struct b43_phy_lp *lpphy = dev->phy.lp; 1842 struct b43_phy_lp *lpphy = dev->phy.lp;
1843 struct ssb_bus *bus = dev->dev->bus; 1843 struct ssb_bus *bus = dev->sdev->bus;
1844 struct lpphy_tx_gains gains, oldgains; 1844 struct lpphy_tx_gains gains, oldgains;
1845 int old_txpctl, old_afe_ovr, old_rf, old_bbmult; 1845 int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
1846 1846
@@ -1870,7 +1870,7 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
1870 bool rx, bool pa, struct lpphy_tx_gains *gains) 1870 bool rx, bool pa, struct lpphy_tx_gains *gains)
1871{ 1871{
1872 struct b43_phy_lp *lpphy = dev->phy.lp; 1872 struct b43_phy_lp *lpphy = dev->phy.lp;
1873 struct ssb_bus *bus = dev->dev->bus; 1873 struct ssb_bus *bus = dev->sdev->bus;
1874 const struct lpphy_rx_iq_comp *iqcomp = NULL; 1874 const struct lpphy_rx_iq_comp *iqcomp = NULL;
1875 struct lpphy_tx_gains nogains, oldgains; 1875 struct lpphy_tx_gains nogains, oldgains;
1876 u16 tmp; 1876 u16 tmp;
@@ -2408,7 +2408,7 @@ static const struct b206x_channel b2063_chantbl[] = {
2408 2408
2409static void lpphy_b2062_reset_pll_bias(struct b43_wldev *dev) 2409static void lpphy_b2062_reset_pll_bias(struct b43_wldev *dev)
2410{ 2410{
2411 struct ssb_bus *bus = dev->dev->bus; 2411 struct ssb_bus *bus = dev->sdev->bus;
2412 2412
2413 b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0xFF); 2413 b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0xFF);
2414 udelay(20); 2414 udelay(20);
@@ -2432,7 +2432,7 @@ static int lpphy_b2062_tune(struct b43_wldev *dev,
2432 unsigned int channel) 2432 unsigned int channel)
2433{ 2433{
2434 struct b43_phy_lp *lpphy = dev->phy.lp; 2434 struct b43_phy_lp *lpphy = dev->phy.lp;
2435 struct ssb_bus *bus = dev->dev->bus; 2435 struct ssb_bus *bus = dev->sdev->bus;
2436 const struct b206x_channel *chandata = NULL; 2436 const struct b206x_channel *chandata = NULL;
2437 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; 2437 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
2438 u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9; 2438 u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9;
@@ -2522,7 +2522,7 @@ static void lpphy_b2063_vco_calib(struct b43_wldev *dev)
2522static int lpphy_b2063_tune(struct b43_wldev *dev, 2522static int lpphy_b2063_tune(struct b43_wldev *dev,
2523 unsigned int channel) 2523 unsigned int channel)
2524{ 2524{
2525 struct ssb_bus *bus = dev->dev->bus; 2525 struct ssb_bus *bus = dev->sdev->bus;
2526 2526
2527 static const struct b206x_channel *chandata = NULL; 2527 static const struct b206x_channel *chandata = NULL;
2528 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; 2528 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b075a3f82a43..9ed65157bef5 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -299,7 +299,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
299static void b43_nphy_tx_power_fix(struct b43_wldev *dev) 299static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
300{ 300{
301 struct b43_phy_n *nphy = dev->phy.n; 301 struct b43_phy_n *nphy = dev->phy.n;
302 struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 302 struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
303 303
304 u8 txpi[2], bbmult, i; 304 u8 txpi[2], bbmult, i;
305 u16 tmp, radio_gain, dac_gain; 305 u16 tmp, radio_gain, dac_gain;
@@ -423,8 +423,8 @@ static void b43_radio_init2055_pre(struct b43_wldev *dev)
423static void b43_radio_init2055_post(struct b43_wldev *dev) 423static void b43_radio_init2055_post(struct b43_wldev *dev)
424{ 424{
425 struct b43_phy_n *nphy = dev->phy.n; 425 struct b43_phy_n *nphy = dev->phy.n;
426 struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 426 struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
427 struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo); 427 struct ssb_boardinfo *binfo = &(dev->sdev->bus->boardinfo);
428 int i; 428 int i;
429 u16 val; 429 u16 val;
430 bool workaround = false; 430 bool workaround = false;
@@ -609,12 +609,12 @@ static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
609 if (dev->phy.type != B43_PHYTYPE_N) 609 if (dev->phy.type != B43_PHYTYPE_N)
610 return; 610 return;
611 611
612 tmslow = ssb_read32(dev->dev, SSB_TMSLOW); 612 tmslow = ssb_read32(dev->sdev, SSB_TMSLOW);
613 if (force) 613 if (force)
614 tmslow |= SSB_TMSLOW_FGC; 614 tmslow |= SSB_TMSLOW_FGC;
615 else 615 else
616 tmslow &= ~SSB_TMSLOW_FGC; 616 tmslow &= ~SSB_TMSLOW_FGC;
617 ssb_write32(dev->dev, SSB_TMSLOW, tmslow); 617 ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
618} 618}
619 619
620/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ 620/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
@@ -959,7 +959,7 @@ static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
959 b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0); 959 b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
960 b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0); 960 b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
961 961
962 ssb_chipco_gpio_control(&dev->dev->bus->chipco, 0xFC00, 962 ssb_chipco_gpio_control(&dev->sdev->bus->chipco, 0xFC00,
963 0xFC00); 963 0xFC00);
964 b43_write32(dev, B43_MMIO_MACCTL, 964 b43_write32(dev, B43_MMIO_MACCTL,
965 b43_read32(dev, B43_MMIO_MACCTL) & 965 b43_read32(dev, B43_MMIO_MACCTL) &
@@ -983,7 +983,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
983{ 983{
984 u16 tmp; 984 u16 tmp;
985 985
986 if (dev->dev->id.revision == 16) 986 if (dev->sdev->id.revision == 16)
987 b43_mac_suspend(dev); 987 b43_mac_suspend(dev);
988 988
989 tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); 989 tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
@@ -993,7 +993,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
993 tmp |= (val & mask); 993 tmp |= (val & mask);
994 b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); 994 b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
995 995
996 if (dev->dev->id.revision == 16) 996 if (dev->sdev->id.revision == 16)
997 b43_mac_enable(dev); 997 b43_mac_enable(dev);
998 998
999 return tmp; 999 return tmp;
@@ -1168,7 +1168,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
1168static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) 1168static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
1169{ 1169{
1170 struct b43_phy_n *nphy = dev->phy.n; 1170 struct b43_phy_n *nphy = dev->phy.n;
1171 struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 1171 struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
1172 1172
1173 /* PHY rev 0, 1, 2 */ 1173 /* PHY rev 0, 1, 2 */
1174 u8 i, j; 1174 u8 i, j;
@@ -1373,7 +1373,7 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
1373/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ 1373/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
1374static void b43_nphy_workarounds(struct b43_wldev *dev) 1374static void b43_nphy_workarounds(struct b43_wldev *dev)
1375{ 1375{
1376 struct ssb_bus *bus = dev->dev->bus; 1376 struct ssb_bus *bus = dev->sdev->bus;
1377 struct b43_phy *phy = &dev->phy; 1377 struct b43_phy *phy = &dev->phy;
1378 struct b43_phy_n *nphy = phy->n; 1378 struct b43_phy_n *nphy = phy->n;
1379 1379
@@ -3586,7 +3586,7 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
3586 */ 3586 */
3587int b43_phy_initn(struct b43_wldev *dev) 3587int b43_phy_initn(struct b43_wldev *dev)
3588{ 3588{
3589 struct ssb_bus *bus = dev->dev->bus; 3589 struct ssb_bus *bus = dev->sdev->bus;
3590 struct b43_phy *phy = &dev->phy; 3590 struct b43_phy *phy = &dev->phy;
3591 struct b43_phy_n *nphy = phy->n; 3591 struct b43_phy_n *nphy = phy->n;
3592 u8 tx_pwr_state; 3592 u8 tx_pwr_state;
@@ -3601,7 +3601,7 @@ int b43_phy_initn(struct b43_wldev *dev)
3601 if ((dev->phy.rev >= 3) && 3601 if ((dev->phy.rev >= 3) &&
3602 (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) && 3602 (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
3603 (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) { 3603 (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
3604 chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40); 3604 chipco_set32(&dev->sdev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
3605 } 3605 }
3606 nphy->deaf_count = 0; 3606 nphy->deaf_count = 0;
3607 b43_nphy_tables_init(dev); 3607 b43_nphy_tables_init(dev);
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index aa12273ae716..72ab94df7569 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -111,7 +111,7 @@ static u16 index_to_pioqueue_base(struct b43_wldev *dev,
111 B43_MMIO_PIO11_BASE5, 111 B43_MMIO_PIO11_BASE5,
112 }; 112 };
113 113
114 if (dev->dev->id.revision >= 11) { 114 if (dev->sdev->id.revision >= 11) {
115 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11)); 115 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
116 return bases_rev11[index]; 116 return bases_rev11[index];
117 } 117 }
@@ -121,14 +121,14 @@ static u16 index_to_pioqueue_base(struct b43_wldev *dev,
121 121
122static u16 pio_txqueue_offset(struct b43_wldev *dev) 122static u16 pio_txqueue_offset(struct b43_wldev *dev)
123{ 123{
124 if (dev->dev->id.revision >= 11) 124 if (dev->sdev->id.revision >= 11)
125 return 0x18; 125 return 0x18;
126 return 0; 126 return 0;
127} 127}
128 128
129static u16 pio_rxqueue_offset(struct b43_wldev *dev) 129static u16 pio_rxqueue_offset(struct b43_wldev *dev)
130{ 130{
131 if (dev->dev->id.revision >= 11) 131 if (dev->sdev->id.revision >= 11)
132 return 0x38; 132 return 0x38;
133 return 8; 133 return 8;
134} 134}
@@ -144,7 +144,7 @@ static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
144 if (!q) 144 if (!q)
145 return NULL; 145 return NULL;
146 q->dev = dev; 146 q->dev = dev;
147 q->rev = dev->dev->id.revision; 147 q->rev = dev->sdev->id.revision;
148 q->mmio_base = index_to_pioqueue_base(dev, index) + 148 q->mmio_base = index_to_pioqueue_base(dev, index) +
149 pio_txqueue_offset(dev); 149 pio_txqueue_offset(dev);
150 q->index = index; 150 q->index = index;
@@ -178,7 +178,7 @@ static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
178 if (!q) 178 if (!q)
179 return NULL; 179 return NULL;
180 q->dev = dev; 180 q->dev = dev;
181 q->rev = dev->dev->id.revision; 181 q->rev = dev->sdev->id.revision;
182 q->mmio_base = index_to_pioqueue_base(dev, index) + 182 q->mmio_base = index_to_pioqueue_base(dev, index) +
183 pio_rxqueue_offset(dev); 183 pio_rxqueue_offset(dev);
184 184
@@ -339,7 +339,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
339 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; 339 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
340 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 340 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
341 341
342 ssb_block_write(dev->dev, data, (data_len & ~1), 342 b43_block_write(dev, data, (data_len & ~1),
343 q->mmio_base + B43_PIO_TXDATA, 343 q->mmio_base + B43_PIO_TXDATA,
344 sizeof(u16)); 344 sizeof(u16));
345 if (data_len & 1) { 345 if (data_len & 1) {
@@ -351,7 +351,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
351 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 351 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
352 tail[0] = data[data_len - 1]; 352 tail[0] = data[data_len - 1];
353 tail[1] = 0; 353 tail[1] = 0;
354 ssb_block_write(dev->dev, tail, 2, 354 b43_block_write(dev, tail, 2,
355 q->mmio_base + B43_PIO_TXDATA, 355 q->mmio_base + B43_PIO_TXDATA,
356 sizeof(u16)); 356 sizeof(u16));
357 } 357 }
@@ -393,7 +393,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
393 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31; 393 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
394 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 394 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
395 395
396 ssb_block_write(dev->dev, data, (data_len & ~3), 396 b43_block_write(dev, data, (data_len & ~3),
397 q->mmio_base + B43_PIO8_TXDATA, 397 q->mmio_base + B43_PIO8_TXDATA,
398 sizeof(u32)); 398 sizeof(u32));
399 if (data_len & 3) { 399 if (data_len & 3) {
@@ -421,7 +421,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
421 break; 421 break;
422 } 422 }
423 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 423 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
424 ssb_block_write(dev->dev, tail, 4, 424 b43_block_write(dev, tail, 4,
425 q->mmio_base + B43_PIO8_TXDATA, 425 q->mmio_base + B43_PIO8_TXDATA,
426 sizeof(u32)); 426 sizeof(u32));
427 } 427 }
@@ -657,11 +657,11 @@ data_ready:
657 657
658 /* Get the preamble (RX header) */ 658 /* Get the preamble (RX header) */
659 if (q->rev >= 8) { 659 if (q->rev >= 8) {
660 ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), 660 b43_block_read(dev, rxhdr, sizeof(*rxhdr),
661 q->mmio_base + B43_PIO8_RXDATA, 661 q->mmio_base + B43_PIO8_RXDATA,
662 sizeof(u32)); 662 sizeof(u32));
663 } else { 663 } else {
664 ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), 664 b43_block_read(dev, rxhdr, sizeof(*rxhdr),
665 q->mmio_base + B43_PIO_RXDATA, 665 q->mmio_base + B43_PIO_RXDATA,
666 sizeof(u16)); 666 sizeof(u16));
667 } 667 }
@@ -697,7 +697,7 @@ data_ready:
697 skb_reserve(skb, 2); 697 skb_reserve(skb, 2);
698 skb_put(skb, len + padding); 698 skb_put(skb, len + padding);
699 if (q->rev >= 8) { 699 if (q->rev >= 8) {
700 ssb_block_read(dev->dev, skb->data + padding, (len & ~3), 700 b43_block_read(dev, skb->data + padding, (len & ~3),
701 q->mmio_base + B43_PIO8_RXDATA, 701 q->mmio_base + B43_PIO8_RXDATA,
702 sizeof(u32)); 702 sizeof(u32));
703 if (len & 3) { 703 if (len & 3) {
@@ -705,7 +705,7 @@ data_ready:
705 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4); 705 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
706 706
707 /* Read the last few bytes. */ 707 /* Read the last few bytes. */
708 ssb_block_read(dev->dev, tail, 4, 708 b43_block_read(dev, tail, 4,
709 q->mmio_base + B43_PIO8_RXDATA, 709 q->mmio_base + B43_PIO8_RXDATA,
710 sizeof(u32)); 710 sizeof(u32));
711 switch (len & 3) { 711 switch (len & 3) {
@@ -724,7 +724,7 @@ data_ready:
724 } 724 }
725 } 725 }
726 } else { 726 } else {
727 ssb_block_read(dev->dev, skb->data + padding, (len & ~1), 727 b43_block_read(dev, skb->data + padding, (len & ~1),
728 q->mmio_base + B43_PIO_RXDATA, 728 q->mmio_base + B43_PIO_RXDATA,
729 sizeof(u16)); 729 sizeof(u16));
730 if (len & 1) { 730 if (len & 1) {
@@ -732,7 +732,7 @@ data_ready:
732 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2); 732 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
733 733
734 /* Read the last byte. */ 734 /* Read the last byte. */
735 ssb_block_read(dev->dev, tail, 2, 735 b43_block_read(dev, tail, 2,
736 q->mmio_base + B43_PIO_RXDATA, 736 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16)); 737 sizeof(u16));
738 skb->data[len + padding - 1] = tail[0]; 738 skb->data[len + padding - 1] = tail[0];
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 86bc0a0f735c..a617efe38289 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -37,7 +37,7 @@ void b43_rfkill_poll(struct ieee80211_hw *hw)
37{ 37{
38 struct b43_wl *wl = hw_to_b43_wl(hw); 38 struct b43_wl *wl = hw_to_b43_wl(hw);
39 struct b43_wldev *dev = wl->current_dev; 39 struct b43_wldev *dev = wl->current_dev;
40 struct ssb_bus *bus = dev->dev->bus; 40 struct ssb_bus *bus = dev->sdev->bus;
41 bool enabled; 41 bool enabled;
42 bool brought_up = false; 42 bool brought_up = false;
43 43
@@ -47,7 +47,7 @@ void b43_rfkill_poll(struct ieee80211_hw *hw)
47 mutex_unlock(&wl->mutex); 47 mutex_unlock(&wl->mutex);
48 return; 48 return;
49 } 49 }
50 ssb_device_enable(dev->dev, 0); 50 ssb_device_enable(dev->sdev, 0);
51 brought_up = true; 51 brought_up = true;
52 } 52 }
53 53
@@ -63,7 +63,7 @@ void b43_rfkill_poll(struct ieee80211_hw *hw)
63 } 63 }
64 64
65 if (brought_up) { 65 if (brought_up) {
66 ssb_device_disable(dev->dev, 0); 66 ssb_device_disable(dev->sdev, 0);
67 ssb_bus_may_powerdown(bus); 67 ssb_bus_may_powerdown(bus);
68 } 68 }
69 69
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 09e2dfd7b175..808e25b79703 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -66,7 +66,7 @@ static void b43_sdio_interrupt_dispatcher(struct sdio_func *func)
66int b43_sdio_request_irq(struct b43_wldev *dev, 66int b43_sdio_request_irq(struct b43_wldev *dev,
67 void (*handler)(struct b43_wldev *dev)) 67 void (*handler)(struct b43_wldev *dev))
68{ 68{
69 struct ssb_bus *bus = dev->dev->bus; 69 struct ssb_bus *bus = dev->sdev->bus;
70 struct sdio_func *func = bus->host_sdio; 70 struct sdio_func *func = bus->host_sdio;
71 struct b43_sdio *sdio = sdio_get_drvdata(func); 71 struct b43_sdio *sdio = sdio_get_drvdata(func);
72 int err; 72 int err;
@@ -82,7 +82,7 @@ int b43_sdio_request_irq(struct b43_wldev *dev,
82 82
83void b43_sdio_free_irq(struct b43_wldev *dev) 83void b43_sdio_free_irq(struct b43_wldev *dev)
84{ 84{
85 struct ssb_bus *bus = dev->dev->bus; 85 struct ssb_bus *bus = dev->sdev->bus;
86 struct sdio_func *func = bus->host_sdio; 86 struct sdio_func *func = bus->host_sdio;
87 struct b43_sdio *sdio = sdio_get_drvdata(func); 87 struct b43_sdio *sdio = sdio_get_drvdata(func);
88 88
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index f1ae4e05a32c..57af619725c3 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -140,7 +140,7 @@ static DEVICE_ATTR(interference, 0644,
140 140
141int b43_sysfs_register(struct b43_wldev *wldev) 141int b43_sysfs_register(struct b43_wldev *wldev)
142{ 142{
143 struct device *dev = wldev->dev->dev; 143 struct device *dev = wldev->sdev->dev;
144 144
145 B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED); 145 B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED);
146 146
@@ -149,7 +149,7 @@ int b43_sysfs_register(struct b43_wldev *wldev)
149 149
150void b43_sysfs_unregister(struct b43_wldev *wldev) 150void b43_sysfs_unregister(struct b43_wldev *wldev)
151{ 151{
152 struct device *dev = wldev->dev->dev; 152 struct device *dev = wldev->sdev->dev;
153 153
154 device_remove_file(dev, &dev_attr_interference); 154 device_remove_file(dev, &dev_attr_interference);
155} 155}
diff --git a/drivers/net/wireless/b43/tables_lpphy.c b/drivers/net/wireless/b43/tables_lpphy.c
index 61027ee84fb5..59df3c64af63 100644
--- a/drivers/net/wireless/b43/tables_lpphy.c
+++ b/drivers/net/wireless/b43/tables_lpphy.c
@@ -2304,7 +2304,7 @@ void lpphy_rev0_1_table_init(struct b43_wldev *dev)
2304 2304
2305void lpphy_rev2plus_table_init(struct b43_wldev *dev) 2305void lpphy_rev2plus_table_init(struct b43_wldev *dev)
2306{ 2306{
2307 struct ssb_bus *bus = dev->dev->bus; 2307 struct ssb_bus *bus = dev->sdev->bus;
2308 int i; 2308 int i;
2309 2309
2310 B43_WARN_ON(dev->phy.rev < 2); 2310 B43_WARN_ON(dev->phy.rev < 2);
@@ -2416,7 +2416,7 @@ void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count,
2416 2416
2417void lpphy_init_tx_gain_table(struct b43_wldev *dev) 2417void lpphy_init_tx_gain_table(struct b43_wldev *dev)
2418{ 2418{
2419 struct ssb_bus *bus = dev->dev->bus; 2419 struct ssb_bus *bus = dev->sdev->bus;
2420 2420
2421 switch (dev->phy.rev) { 2421 switch (dev->phy.rev) {
2422 case 0: 2422 case 0:
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 9a335da65b42..8f4db448ec33 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -458,7 +458,7 @@ static void b43_wa_rssi_adc(struct b43_wldev *dev)
458 458
459static void b43_wa_boards_a(struct b43_wldev *dev) 459static void b43_wa_boards_a(struct b43_wldev *dev)
460{ 460{
461 struct ssb_bus *bus = dev->dev->bus; 461 struct ssb_bus *bus = dev->sdev->bus;
462 462
463 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM && 463 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
464 bus->boardinfo.type == SSB_BOARD_BU4306 && 464 bus->boardinfo.type == SSB_BOARD_BU4306 &&
@@ -486,7 +486,7 @@ static void b43_wa_boards_a(struct b43_wldev *dev)
486 486
487static void b43_wa_boards_g(struct b43_wldev *dev) 487static void b43_wa_boards_g(struct b43_wldev *dev)
488{ 488{
489 struct ssb_bus *bus = dev->dev->bus; 489 struct ssb_bus *bus = dev->sdev->bus;
490 struct b43_phy *phy = &dev->phy; 490 struct b43_phy *phy = &dev->phy;
491 491
492 if (bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM || 492 if (bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM ||
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index e5be381c17bc..c8f99aebe01f 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -547,7 +547,7 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev,
547 else 547 else
548 tmp -= 3; 548 tmp -= 3;
549 } else { 549 } else {
550 if (dev->dev->bus->sprom. 550 if (dev->sdev->bus->sprom.
551 boardflags_lo & B43_BFL_RSSI) { 551 boardflags_lo & B43_BFL_RSSI) {
552 if (in_rssi > 63) 552 if (in_rssi > 63)
553 in_rssi = 63; 553 in_rssi = 63;
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index b4c81931e136..61d4a11f566b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -171,10 +171,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
171 171
172static struct iwl_lib_ops iwl1000_lib = { 172static struct iwl_lib_ops iwl1000_lib = {
173 .set_hw_params = iwl1000_hw_set_hw_params, 173 .set_hw_params = iwl1000_hw_set_hw_params,
174 .txq_set_sched = iwlagn_txq_set_sched,
175 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
176 .txq_free_tfd = iwl_hw_txq_free_tfd,
177 .txq_init = iwl_hw_tx_queue_init,
178 .rx_handler_setup = iwlagn_rx_handler_setup, 174 .rx_handler_setup = iwlagn_rx_handler_setup,
179 .setup_deferred_work = iwlagn_setup_deferred_work, 175 .setup_deferred_work = iwlagn_setup_deferred_work,
180 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 176 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 89b8da7a6c8b..86feec86d130 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -195,9 +195,9 @@ static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
195 struct ieee80211_vif *vif = ctx->vif; 195 struct ieee80211_vif *vif = ctx->vif;
196 struct iwl_host_cmd hcmd = { 196 struct iwl_host_cmd hcmd = {
197 .id = REPLY_CHANNEL_SWITCH, 197 .id = REPLY_CHANNEL_SWITCH,
198 .len = sizeof(cmd), 198 .len = { sizeof(cmd), },
199 .flags = CMD_SYNC, 199 .flags = CMD_SYNC,
200 .data = &cmd, 200 .data = { &cmd, },
201 }; 201 };
202 202
203 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 203 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
@@ -252,10 +252,6 @@ static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
252 252
253static struct iwl_lib_ops iwl2000_lib = { 253static struct iwl_lib_ops iwl2000_lib = {
254 .set_hw_params = iwl2000_hw_set_hw_params, 254 .set_hw_params = iwl2000_hw_set_hw_params,
255 .txq_set_sched = iwlagn_txq_set_sched,
256 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
257 .txq_free_tfd = iwl_hw_txq_free_tfd,
258 .txq_init = iwl_hw_tx_queue_init,
259 .rx_handler_setup = iwlagn_rx_handler_setup, 255 .rx_handler_setup = iwlagn_rx_handler_setup,
260 .setup_deferred_work = iwlagn_bt_setup_deferred_work, 256 .setup_deferred_work = iwlagn_bt_setup_deferred_work,
261 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, 257 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 98f81df166e3..a70b8cfafda1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -282,9 +282,9 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
282 struct ieee80211_vif *vif = ctx->vif; 282 struct ieee80211_vif *vif = ctx->vif;
283 struct iwl_host_cmd hcmd = { 283 struct iwl_host_cmd hcmd = {
284 .id = REPLY_CHANNEL_SWITCH, 284 .id = REPLY_CHANNEL_SWITCH,
285 .len = sizeof(cmd), 285 .len = { sizeof(cmd), },
286 .flags = CMD_SYNC, 286 .flags = CMD_SYNC,
287 .data = &cmd, 287 .data = { &cmd, },
288 }; 288 };
289 289
290 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 290 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
@@ -339,10 +339,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
339 339
340static struct iwl_lib_ops iwl5000_lib = { 340static struct iwl_lib_ops iwl5000_lib = {
341 .set_hw_params = iwl5000_hw_set_hw_params, 341 .set_hw_params = iwl5000_hw_set_hw_params,
342 .txq_set_sched = iwlagn_txq_set_sched,
343 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
344 .txq_free_tfd = iwl_hw_txq_free_tfd,
345 .txq_init = iwl_hw_tx_queue_init,
346 .rx_handler_setup = iwlagn_rx_handler_setup, 342 .rx_handler_setup = iwlagn_rx_handler_setup,
347 .setup_deferred_work = iwlagn_setup_deferred_work, 343 .setup_deferred_work = iwlagn_setup_deferred_work,
348 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 344 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
@@ -374,10 +370,6 @@ static struct iwl_lib_ops iwl5000_lib = {
374 370
375static struct iwl_lib_ops iwl5150_lib = { 371static struct iwl_lib_ops iwl5150_lib = {
376 .set_hw_params = iwl5150_hw_set_hw_params, 372 .set_hw_params = iwl5150_hw_set_hw_params,
377 .txq_set_sched = iwlagn_txq_set_sched,
378 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
379 .txq_free_tfd = iwl_hw_txq_free_tfd,
380 .txq_init = iwl_hw_tx_queue_init,
381 .rx_handler_setup = iwlagn_rx_handler_setup, 373 .rx_handler_setup = iwlagn_rx_handler_setup,
382 .setup_deferred_work = iwlagn_setup_deferred_work, 374 .setup_deferred_work = iwlagn_setup_deferred_work,
383 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 375 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index a7921f9a03c6..f8c710db6e6f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -221,9 +221,9 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
221 struct ieee80211_vif *vif = ctx->vif; 221 struct ieee80211_vif *vif = ctx->vif;
222 struct iwl_host_cmd hcmd = { 222 struct iwl_host_cmd hcmd = {
223 .id = REPLY_CHANNEL_SWITCH, 223 .id = REPLY_CHANNEL_SWITCH,
224 .len = sizeof(cmd), 224 .len = { sizeof(cmd), },
225 .flags = CMD_SYNC, 225 .flags = CMD_SYNC,
226 .data = &cmd, 226 .data = { &cmd, },
227 }; 227 };
228 228
229 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 229 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
@@ -278,10 +278,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
278 278
279static struct iwl_lib_ops iwl6000_lib = { 279static struct iwl_lib_ops iwl6000_lib = {
280 .set_hw_params = iwl6000_hw_set_hw_params, 280 .set_hw_params = iwl6000_hw_set_hw_params,
281 .txq_set_sched = iwlagn_txq_set_sched,
282 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
283 .txq_free_tfd = iwl_hw_txq_free_tfd,
284 .txq_init = iwl_hw_tx_queue_init,
285 .rx_handler_setup = iwlagn_rx_handler_setup, 281 .rx_handler_setup = iwlagn_rx_handler_setup,
286 .setup_deferred_work = iwlagn_setup_deferred_work, 282 .setup_deferred_work = iwlagn_setup_deferred_work,
287 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, 283 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
@@ -314,10 +310,6 @@ static struct iwl_lib_ops iwl6000_lib = {
314 310
315static struct iwl_lib_ops iwl6030_lib = { 311static struct iwl_lib_ops iwl6030_lib = {
316 .set_hw_params = iwl6000_hw_set_hw_params, 312 .set_hw_params = iwl6000_hw_set_hw_params,
317 .txq_set_sched = iwlagn_txq_set_sched,
318 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
319 .txq_free_tfd = iwl_hw_txq_free_tfd,
320 .txq_init = iwl_hw_tx_queue_init,
321 .rx_handler_setup = iwlagn_bt_rx_handler_setup, 313 .rx_handler_setup = iwlagn_bt_rx_handler_setup,
322 .setup_deferred_work = iwlagn_bt_setup_deferred_work, 314 .setup_deferred_work = iwlagn_bt_setup_deferred_work,
323 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, 315 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 39d1e47a0978..c9255def1080 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -87,14 +87,14 @@ int iwl_send_calib_results(struct iwl_priv *priv)
87 87
88 struct iwl_host_cmd hcmd = { 88 struct iwl_host_cmd hcmd = {
89 .id = REPLY_PHY_CALIBRATION_CMD, 89 .id = REPLY_PHY_CALIBRATION_CMD,
90 .flags = CMD_SIZE_HUGE,
91 }; 90 };
92 91
93 for (i = 0; i < IWL_CALIB_MAX; i++) { 92 for (i = 0; i < IWL_CALIB_MAX; i++) {
94 if ((BIT(i) & priv->hw_params.calib_init_cfg) && 93 if ((BIT(i) & priv->hw_params.calib_init_cfg) &&
95 priv->calib_results[i].buf) { 94 priv->calib_results[i].buf) {
96 hcmd.len = priv->calib_results[i].buf_len; 95 hcmd.len[0] = priv->calib_results[i].buf_len;
97 hcmd.data = priv->calib_results[i].buf; 96 hcmd.data[0] = priv->calib_results[i].buf;
97 hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
98 ret = iwl_send_cmd_sync(priv, &hcmd); 98 ret = iwl_send_cmd_sync(priv, &hcmd);
99 if (ret) { 99 if (ret) {
100 IWL_ERR(priv, "Error %d iteration %d\n", 100 IWL_ERR(priv, "Error %d iteration %d\n",
@@ -456,9 +456,9 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
456 struct iwl_sensitivity_data *data = NULL; 456 struct iwl_sensitivity_data *data = NULL;
457 struct iwl_host_cmd cmd_out = { 457 struct iwl_host_cmd cmd_out = {
458 .id = SENSITIVITY_CMD, 458 .id = SENSITIVITY_CMD,
459 .len = sizeof(struct iwl_sensitivity_cmd), 459 .len = { sizeof(struct iwl_sensitivity_cmd), },
460 .flags = CMD_ASYNC, 460 .flags = CMD_ASYNC,
461 .data = &cmd, 461 .data = { &cmd, },
462 }; 462 };
463 463
464 data = &(priv->sensitivity_data); 464 data = &(priv->sensitivity_data);
@@ -491,9 +491,9 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
491 struct iwl_sensitivity_data *data = NULL; 491 struct iwl_sensitivity_data *data = NULL;
492 struct iwl_host_cmd cmd_out = { 492 struct iwl_host_cmd cmd_out = {
493 .id = SENSITIVITY_CMD, 493 .id = SENSITIVITY_CMD,
494 .len = sizeof(struct iwl_enhance_sensitivity_cmd), 494 .len = { sizeof(struct iwl_enhance_sensitivity_cmd), },
495 .flags = CMD_ASYNC, 495 .flags = CMD_ASYNC,
496 .data = &cmd, 496 .data = { &cmd, },
497 }; 497 };
498 498
499 data = &(priv->sensitivity_data); 499 data = &(priv->sensitivity_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 8e79653aed9a..f803fb62f8bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -1140,8 +1140,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1140{ 1140{
1141 struct iwl_host_cmd cmd = { 1141 struct iwl_host_cmd cmd = {
1142 .id = REPLY_SCAN_CMD, 1142 .id = REPLY_SCAN_CMD,
1143 .len = sizeof(struct iwl_scan_cmd), 1143 .len = { sizeof(struct iwl_scan_cmd), },
1144 .flags = CMD_SIZE_HUGE,
1145 }; 1144 };
1146 struct iwl_scan_cmd *scan; 1145 struct iwl_scan_cmd *scan;
1147 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 1146 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -1425,10 +1424,11 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1425 return -EIO; 1424 return -EIO;
1426 } 1425 }
1427 1426
1428 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 1427 cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
1429 scan->channel_count * sizeof(struct iwl_scan_channel); 1428 scan->channel_count * sizeof(struct iwl_scan_channel);
1430 cmd.data = scan; 1429 cmd.data[0] = scan;
1431 scan->len = cpu_to_le16(cmd.len); 1430 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1431 scan->len = cpu_to_le16(cmd.len[0]);
1432 1432
1433 /* set scan bit here for PAN params */ 1433 /* set scan bit here for PAN params */
1434 set_bit(STATUS_SCAN_HW, &priv->status); 1434 set_bit(STATUS_SCAN_HW, &priv->status);
@@ -1520,9 +1520,9 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1520 struct iwl_txfifo_flush_cmd flush_cmd; 1520 struct iwl_txfifo_flush_cmd flush_cmd;
1521 struct iwl_host_cmd cmd = { 1521 struct iwl_host_cmd cmd = {
1522 .id = REPLY_TXFIFO_FLUSH, 1522 .id = REPLY_TXFIFO_FLUSH,
1523 .len = sizeof(struct iwl_txfifo_flush_cmd), 1523 .len = { sizeof(struct iwl_txfifo_flush_cmd), },
1524 .flags = CMD_SYNC, 1524 .flags = CMD_SYNC,
1525 .data = &flush_cmd, 1525 .data = { &flush_cmd, },
1526 }; 1526 };
1527 1527
1528 might_sleep(); 1528 might_sleep();
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 91f26556ac23..592b0cfcf717 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -335,6 +335,32 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
335 return tid; 335 return tid;
336} 336}
337 337
338#ifdef CONFIG_MAC80211_DEBUGFS
339static void rs_program_fix_rate(struct iwl_priv *priv,
340 struct iwl_lq_sta *lq_sta)
341{
342 struct iwl_station_priv *sta_priv =
343 container_of(lq_sta, struct iwl_station_priv, lq_sta);
344 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
345
346 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
347 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
348 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
349 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
350
351 lq_sta->dbg_fixed_rate = priv->dbg_fixed_rate;
352
353 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
354 lq_sta->lq.sta_id, priv->dbg_fixed_rate);
355
356 if (priv->dbg_fixed_rate) {
357 rs_fill_link_cmd(NULL, lq_sta, priv->dbg_fixed_rate);
358 iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
359 false);
360 }
361}
362#endif
363
338/* 364/*
339 get the traffic load value for tid 365 get the traffic load value for tid
340*/ 366*/
@@ -1046,7 +1072,10 @@ done:
1046 /* See if there's a better rate or modulation mode to try. */ 1072 /* See if there's a better rate or modulation mode to try. */
1047 if (sta && sta->supp_rates[sband->band]) 1073 if (sta && sta->supp_rates[sband->band])
1048 rs_rate_scale_perform(priv, skb, sta, lq_sta); 1074 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1049 1075#ifdef CONFIG_MAC80211_DEBUGFS
1076 if (priv->dbg_fixed_rate != lq_sta->dbg_fixed_rate)
1077 rs_program_fix_rate(priv, lq_sta);
1078#endif
1050 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) 1079 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1051 rs_bt_update_lq(priv, ctx, lq_sta); 1080 rs_bt_update_lq(priv, ctx, lq_sta);
1052} 1081}
@@ -2170,11 +2199,11 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
2170 * setup rate table in uCode 2199 * setup rate table in uCode
2171 * return rate_n_flags as used in the table 2200 * return rate_n_flags as used in the table
2172 */ 2201 */
2173static u32 rs_update_rate_tbl(struct iwl_priv *priv, 2202static void rs_update_rate_tbl(struct iwl_priv *priv,
2174 struct iwl_rxon_context *ctx, 2203 struct iwl_rxon_context *ctx,
2175 struct iwl_lq_sta *lq_sta, 2204 struct iwl_lq_sta *lq_sta,
2176 struct iwl_scale_tbl_info *tbl, 2205 struct iwl_scale_tbl_info *tbl,
2177 int index, u8 is_green) 2206 int index, u8 is_green)
2178{ 2207{
2179 u32 rate; 2208 u32 rate;
2180 2209
@@ -2182,8 +2211,6 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
2182 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); 2211 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2183 rs_fill_link_cmd(priv, lq_sta, rate); 2212 rs_fill_link_cmd(priv, lq_sta, rate);
2184 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); 2213 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2185
2186 return rate;
2187} 2214}
2188 2215
2189/* 2216/*
@@ -2212,7 +2239,6 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2212 u8 update_lq = 0; 2239 u8 update_lq = 0;
2213 struct iwl_scale_tbl_info *tbl, *tbl1; 2240 struct iwl_scale_tbl_info *tbl, *tbl1;
2214 u16 rate_scale_index_msk = 0; 2241 u16 rate_scale_index_msk = 0;
2215 u32 rate;
2216 u8 is_green = 0; 2242 u8 is_green = 0;
2217 u8 active_tbl = 0; 2243 u8 active_tbl = 0;
2218 u8 done_search = 0; 2244 u8 done_search = 0;
@@ -2299,8 +2325,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2299 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 2325 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2300 /* get "active" rate info */ 2326 /* get "active" rate info */
2301 index = iwl_hwrate_to_plcp_idx(tbl->current_rate); 2327 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2302 rate = rs_update_rate_tbl(priv, ctx, lq_sta, 2328 rs_update_rate_tbl(priv, ctx, lq_sta, tbl,
2303 tbl, index, is_green); 2329 index, is_green);
2304 } 2330 }
2305 return; 2331 return;
2306 } 2332 }
@@ -2541,8 +2567,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2541lq_update: 2567lq_update:
2542 /* Replace uCode's rate table for the destination station. */ 2568 /* Replace uCode's rate table for the destination station. */
2543 if (update_lq) 2569 if (update_lq)
2544 rate = rs_update_rate_tbl(priv, ctx, lq_sta, 2570 rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green);
2545 tbl, index, is_green);
2546 2571
2547 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) { 2572 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
2548 /* Should we stay with this modulation mode, 2573 /* Should we stay with this modulation mode,
@@ -2871,6 +2896,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2871 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2896 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2872 lq_sta->is_agg = 0; 2897 lq_sta->is_agg = 0;
2873 2898
2899 priv->dbg_fixed_rate = 0;
2874#ifdef CONFIG_MAC80211_DEBUGFS 2900#ifdef CONFIG_MAC80211_DEBUGFS
2875 lq_sta->dbg_fixed_rate = 0; 2901 lq_sta->dbg_fixed_rate = 0;
2876#endif 2902#endif
@@ -3045,7 +3071,6 @@ static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
3045 IWL_DEBUG_RATE(priv, "leave\n"); 3071 IWL_DEBUG_RATE(priv, "leave\n");
3046} 3072}
3047 3073
3048
3049#ifdef CONFIG_MAC80211_DEBUGFS 3074#ifdef CONFIG_MAC80211_DEBUGFS
3050static int open_file_generic(struct inode *inode, struct file *file) 3075static int open_file_generic(struct inode *inode, struct file *file)
3051{ 3076{
@@ -3070,6 +3095,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3070 IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); 3095 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
3071 } else { 3096 } else {
3072 lq_sta->dbg_fixed_rate = 0; 3097 lq_sta->dbg_fixed_rate = 0;
3098 priv->dbg_fixed_rate = 0;
3073 IWL_ERR(priv, 3099 IWL_ERR(priv,
3074 "Invalid antenna selection 0x%X, Valid is 0x%X\n", 3100 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
3075 ant_sel_tx, valid_tx_ant); 3101 ant_sel_tx, valid_tx_ant);
@@ -3088,9 +3114,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
3088 char buf[64]; 3114 char buf[64];
3089 size_t buf_size; 3115 size_t buf_size;
3090 u32 parsed_rate; 3116 u32 parsed_rate;
3091 struct iwl_station_priv *sta_priv = 3117
3092 container_of(lq_sta, struct iwl_station_priv, lq_sta);
3093 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
3094 3118
3095 priv = lq_sta->drv; 3119 priv = lq_sta->drv;
3096 memset(buf, 0, sizeof(buf)); 3120 memset(buf, 0, sizeof(buf));
@@ -3099,23 +3123,11 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
3099 return -EFAULT; 3123 return -EFAULT;
3100 3124
3101 if (sscanf(buf, "%x", &parsed_rate) == 1) 3125 if (sscanf(buf, "%x", &parsed_rate) == 1)
3102 lq_sta->dbg_fixed_rate = parsed_rate; 3126 priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = parsed_rate;
3103 else 3127 else
3104 lq_sta->dbg_fixed_rate = 0; 3128 priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = 0;
3105 3129
3106 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ 3130 rs_program_fix_rate(priv, lq_sta);
3107 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
3108 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
3109 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
3110
3111 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
3112 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
3113
3114 if (lq_sta->dbg_fixed_rate) {
3115 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
3116 iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
3117 false);
3118 }
3119 3131
3120 return count; 3132 return count;
3121} 3133}
@@ -3143,7 +3155,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3143 lq_sta->total_failed, lq_sta->total_success, 3155 lq_sta->total_failed, lq_sta->total_success,
3144 lq_sta->active_legacy_rate); 3156 lq_sta->active_legacy_rate);
3145 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 3157 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3146 lq_sta->dbg_fixed_rate); 3158 priv->dbg_fixed_rate);
3147 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 3159 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3148 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", 3160 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
3149 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", 3161 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
@@ -3254,14 +3266,10 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
3254static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, 3266static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
3255 char __user *user_buf, size_t count, loff_t *ppos) 3267 char __user *user_buf, size_t count, loff_t *ppos)
3256{ 3268{
3257 char buff[120];
3258 int desc = 0;
3259
3260 struct iwl_lq_sta *lq_sta = file->private_data; 3269 struct iwl_lq_sta *lq_sta = file->private_data;
3261 struct iwl_priv *priv;
3262 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; 3270 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
3263 3271 char buff[120];
3264 priv = lq_sta->drv; 3272 int desc = 0;
3265 3273
3266 if (is_Ht(tbl->lq_type)) 3274 if (is_Ht(tbl->lq_type))
3267 desc += sprintf(buff+desc, 3275 desc += sprintf(buff+desc,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 02387430f7fe..a95ad84c5377 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -289,7 +289,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
289 /* cast away the const for active_rxon in this function */ 289 /* cast away the const for active_rxon in this function */
290 struct iwl_rxon_cmd *active = (void *)&ctx->active; 290 struct iwl_rxon_cmd *active = (void *)&ctx->active;
291 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); 291 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
292 bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
293 int ret; 292 int ret;
294 293
295 lockdep_assert_held(&priv->mutex); 294 lockdep_assert_held(&priv->mutex);
@@ -389,11 +388,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
389 * AP station must be done after the BSSID is set to correctly 388 * AP station must be done after the BSSID is set to correctly
390 * set up filters in the device. 389 * set up filters in the device.
391 */ 390 */
392 if ((old_assoc && new_assoc) || !new_assoc) { 391 ret = iwlagn_rxon_disconn(priv, ctx);
393 ret = iwlagn_rxon_disconn(priv, ctx); 392 if (ret)
394 if (ret) 393 return ret;
395 return ret;
396 }
397 394
398 if (new_assoc) 395 if (new_assoc)
399 return iwlagn_rxon_connect(priv, ctx); 396 return iwlagn_rxon_connect(priv, ctx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 079275f2c64d..0bd722cee5ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -144,7 +144,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
144 size_t cmd_size = sizeof(struct iwl_wep_cmd); 144 size_t cmd_size = sizeof(struct iwl_wep_cmd);
145 struct iwl_host_cmd cmd = { 145 struct iwl_host_cmd cmd = {
146 .id = ctx->wep_key_cmd, 146 .id = ctx->wep_key_cmd,
147 .data = wep_cmd, 147 .data = { wep_cmd, },
148 .flags = CMD_SYNC, 148 .flags = CMD_SYNC,
149 }; 149 };
150 150
@@ -172,7 +172,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
172 172
173 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; 173 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
174 174
175 cmd.len = cmd_size; 175 cmd.len[0] = cmd_size;
176 176
177 if (not_empty || send_if_empty) 177 if (not_empty || send_if_empty)
178 return iwl_send_cmd(priv, &cmd); 178 return iwl_send_cmd(priv, &cmd);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 342de780a366..4974cd7837cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -755,12 +755,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
755 spin_unlock(&priv->sta_lock); 755 spin_unlock(&priv->sta_lock);
756 756
757 /* Attach buffers to TFD */ 757 /* Attach buffers to TFD */
758 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 758 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
759 txcmd_phys, firstlen, 1, 0);
760 if (secondlen > 0) 759 if (secondlen > 0)
761 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 760 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
762 phys_addr, secondlen, 761 secondlen, 0);
763 0, 0);
764 762
765 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 763 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
766 offsetof(struct iwl_tx_cmd, scratch); 764 offsetof(struct iwl_tx_cmd, scratch);
@@ -916,7 +914,7 @@ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
916 spin_lock_irqsave(&priv->lock, flags); 914 spin_lock_irqsave(&priv->lock, flags);
917 915
918 /* Turn off all Tx DMA fifos */ 916 /* Turn off all Tx DMA fifos */
919 priv->cfg->ops->lib->txq_set_sched(priv, 0); 917 iwlagn_txq_set_sched(priv, 0);
920 918
921 /* Tell NIC where to find the "keep warm" buffer */ 919 /* Tell NIC where to find the "keep warm" buffer */
922 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 920 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
@@ -954,7 +952,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
954 spin_lock_irqsave(&priv->lock, flags); 952 spin_lock_irqsave(&priv->lock, flags);
955 953
956 /* Turn off all Tx DMA fifos */ 954 /* Turn off all Tx DMA fifos */
957 priv->cfg->ops->lib->txq_set_sched(priv, 0); 955 iwlagn_txq_set_sched(priv, 0);
958 956
959 /* Tell NIC where to find the "keep warm" buffer */ 957 /* Tell NIC where to find the "keep warm" buffer */
960 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 958 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
@@ -980,7 +978,7 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
980 /* Turn off all Tx DMA fifos */ 978 /* Turn off all Tx DMA fifos */
981 spin_lock_irqsave(&priv->lock, flags); 979 spin_lock_irqsave(&priv->lock, flags);
982 980
983 priv->cfg->ops->lib->txq_set_sched(priv, 0); 981 iwlagn_txq_set_sched(priv, 0);
984 982
985 /* Stop each Tx DMA channel, and wait for it to be idle */ 983 /* Stop each Tx DMA channel, and wait for it to be idle */
986 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { 984 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
@@ -1263,7 +1261,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1263 1261
1264 iwlagn_txq_inval_byte_cnt_tbl(priv, txq); 1262 iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
1265 1263
1266 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 1264 iwlagn_txq_free_tfd(priv, txq);
1267 } 1265 }
1268 return nfreed; 1266 return nfreed;
1269} 1267}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 8bda0e8d6661..97de5d9de67b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -217,8 +217,8 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
217 struct iwl_calib_cfg_cmd calib_cfg_cmd; 217 struct iwl_calib_cfg_cmd calib_cfg_cmd;
218 struct iwl_host_cmd cmd = { 218 struct iwl_host_cmd cmd = {
219 .id = CALIBRATION_CFG_CMD, 219 .id = CALIBRATION_CFG_CMD,
220 .len = sizeof(struct iwl_calib_cfg_cmd), 220 .len = { sizeof(struct iwl_calib_cfg_cmd), },
221 .data = &calib_cfg_cmd, 221 .data = { &calib_cfg_cmd, },
222 }; 222 };
223 223
224 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); 224 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
@@ -440,7 +440,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
440 IWL_MASK(0, priv->hw_params.max_txq_num)); 440 IWL_MASK(0, priv->hw_params.max_txq_num));
441 441
442 /* Activate all Tx DMA/FIFO channels */ 442 /* Activate all Tx DMA/FIFO channels */
443 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); 443 iwlagn_txq_set_sched(priv, IWL_MASK(0, 7));
444 444
445 /* map queues to FIFOs */ 445 /* map queues to FIFOs */
446 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) 446 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 08e3cae4fa5a..11c6c1169e78 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -134,12 +134,10 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
134 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 134 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
135 struct iwl_host_cmd cmd = { 135 struct iwl_host_cmd cmd = {
136 .id = REPLY_TX_BEACON, 136 .id = REPLY_TX_BEACON,
137 .flags = CMD_SIZE_HUGE,
138 }; 137 };
139 u32 frame_size; 138 u32 frame_size;
140 u32 rate_flags; 139 u32 rate_flags;
141 u32 rate; 140 u32 rate;
142 int err;
143 141
144 /* 142 /*
145 * We have to set up the TX command, the TX Beacon command, and the 143 * We have to set up the TX command, the TX Beacon command, and the
@@ -156,17 +154,15 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
156 if (WARN_ON(!priv->beacon_skb)) 154 if (WARN_ON(!priv->beacon_skb))
157 return -EINVAL; 155 return -EINVAL;
158 156
159 /* Allocate beacon memory */ 157 /* Allocate beacon command */
160 tx_beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd) + priv->beacon_skb->len, 158 if (!priv->beacon_cmd)
161 GFP_KERNEL); 159 priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
160 tx_beacon_cmd = priv->beacon_cmd;
162 if (!tx_beacon_cmd) 161 if (!tx_beacon_cmd)
163 return -ENOMEM; 162 return -ENOMEM;
164 163
165 frame_size = priv->beacon_skb->len; 164 frame_size = priv->beacon_skb->len;
166 165
167 /* Set up TX beacon contents */
168 memcpy(tx_beacon_cmd->frame, priv->beacon_skb->data, frame_size);
169
170 /* Set up TX command fields */ 166 /* Set up TX command fields */
171 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); 167 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
172 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; 168 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
@@ -175,7 +171,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
175 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; 171 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
176 172
177 /* Set up TX beacon command fields */ 173 /* Set up TX beacon command fields */
178 iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, 174 iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
179 frame_size); 175 frame_size);
180 176
181 /* Set up packet rate and flags */ 177 /* Set up packet rate and flags */
@@ -189,164 +185,14 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
189 rate_flags); 185 rate_flags);
190 186
191 /* Submit command */ 187 /* Submit command */
192 cmd.len = sizeof(*tx_beacon_cmd) + frame_size; 188 cmd.len[0] = sizeof(*tx_beacon_cmd);
193 cmd.data = tx_beacon_cmd; 189 cmd.data[0] = tx_beacon_cmd;
194 190 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
195 err = iwl_send_cmd_sync(priv, &cmd); 191 cmd.len[1] = frame_size;
196 192 cmd.data[1] = priv->beacon_skb->data;
197 /* Free temporary storage */ 193 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
198 kfree(tx_beacon_cmd);
199
200 return err;
201}
202
203static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
204{
205 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
206 194
207 dma_addr_t addr = get_unaligned_le32(&tb->lo); 195 return iwl_send_cmd_sync(priv, &cmd);
208 if (sizeof(dma_addr_t) > sizeof(u32))
209 addr |=
210 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
211
212 return addr;
213}
214
215static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
216{
217 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
218
219 return le16_to_cpu(tb->hi_n_len) >> 4;
220}
221
222static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
223 dma_addr_t addr, u16 len)
224{
225 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
226 u16 hi_n_len = len << 4;
227
228 put_unaligned_le32(addr, &tb->lo);
229 if (sizeof(dma_addr_t) > sizeof(u32))
230 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
231
232 tb->hi_n_len = cpu_to_le16(hi_n_len);
233
234 tfd->num_tbs = idx + 1;
235}
236
237static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
238{
239 return tfd->num_tbs & 0x1f;
240}
241
242/**
243 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
244 * @priv - driver private data
245 * @txq - tx queue
246 *
247 * Does NOT advance any TFD circular buffer read/write indexes
248 * Does NOT free the TFD itself (which is within circular buffer)
249 */
250void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
251{
252 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
253 struct iwl_tfd *tfd;
254 struct pci_dev *dev = priv->pci_dev;
255 int index = txq->q.read_ptr;
256 int i;
257 int num_tbs;
258
259 tfd = &tfd_tmp[index];
260
261 /* Sanity check on number of chunks */
262 num_tbs = iwl_tfd_get_num_tbs(tfd);
263
264 if (num_tbs >= IWL_NUM_OF_TBS) {
265 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
266 /* @todo issue fatal error, it is quite serious situation */
267 return;
268 }
269
270 /* Unmap tx_cmd */
271 if (num_tbs)
272 pci_unmap_single(dev,
273 dma_unmap_addr(&txq->meta[index], mapping),
274 dma_unmap_len(&txq->meta[index], len),
275 PCI_DMA_BIDIRECTIONAL);
276
277 /* Unmap chunks, if any. */
278 for (i = 1; i < num_tbs; i++)
279 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
280 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
281
282 /* free SKB */
283 if (txq->txb) {
284 struct sk_buff *skb;
285
286 skb = txq->txb[txq->q.read_ptr].skb;
287
288 /* can be called from irqs-disabled context */
289 if (skb) {
290 dev_kfree_skb_any(skb);
291 txq->txb[txq->q.read_ptr].skb = NULL;
292 }
293 }
294}
295
296int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
297 struct iwl_tx_queue *txq,
298 dma_addr_t addr, u16 len,
299 u8 reset, u8 pad)
300{
301 struct iwl_queue *q;
302 struct iwl_tfd *tfd, *tfd_tmp;
303 u32 num_tbs;
304
305 q = &txq->q;
306 tfd_tmp = (struct iwl_tfd *)txq->tfds;
307 tfd = &tfd_tmp[q->write_ptr];
308
309 if (reset)
310 memset(tfd, 0, sizeof(*tfd));
311
312 num_tbs = iwl_tfd_get_num_tbs(tfd);
313
314 /* Each TFD can point to a maximum 20 Tx buffers */
315 if (num_tbs >= IWL_NUM_OF_TBS) {
316 IWL_ERR(priv, "Error can not send more than %d chunks\n",
317 IWL_NUM_OF_TBS);
318 return -EINVAL;
319 }
320
321 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
322 return -EINVAL;
323
324 if (unlikely(addr & ~IWL_TX_DMA_MASK))
325 IWL_ERR(priv, "Unaligned address = %llx\n",
326 (unsigned long long)addr);
327
328 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
329
330 return 0;
331}
332
333/*
334 * Tell nic where to find circular buffer of Tx Frame Descriptors for
335 * given Tx queue, and enable the DMA channel used for that queue.
336 *
337 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
338 * channels supported in hardware.
339 */
340int iwl_hw_tx_queue_init(struct iwl_priv *priv,
341 struct iwl_tx_queue *txq)
342{
343 int txq_id = txq->q.id;
344
345 /* Circular buffer (TFD queue in DRAM) physical base address */
346 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
347 txq->q.dma_addr >> 8);
348
349 return 0;
350} 196}
351 197
352static void iwl_bg_beacon_update(struct work_struct *work) 198static void iwl_bg_beacon_update(struct work_struct *work)
@@ -1776,10 +1622,7 @@ static const char *desc_lookup(u32 num)
1776 1622
1777void iwl_dump_nic_error_log(struct iwl_priv *priv) 1623void iwl_dump_nic_error_log(struct iwl_priv *priv)
1778{ 1624{
1779 u32 data2, line; 1625 u32 base;
1780 u32 desc, time, count, base, data1;
1781 u32 blink1, blink2, ilink1, ilink2;
1782 u32 pc, hcmd;
1783 struct iwl_error_event_table table; 1626 struct iwl_error_event_table table;
1784 1627
1785 base = priv->device_pointers.error_event_table; 1628 base = priv->device_pointers.error_event_table;
@@ -1802,37 +1645,40 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1802 1645
1803 iwl_read_targ_mem_words(priv, base, &table, sizeof(table)); 1646 iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
1804 1647
1805 count = table.valid; 1648 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1806
1807 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1808 IWL_ERR(priv, "Start IWL Error Log Dump:\n"); 1649 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1809 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", 1650 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1810 priv->status, count); 1651 priv->status, table.valid);
1811 } 1652 }
1812 1653
1813 desc = table.error_id; 1654 priv->isr_stats.err_code = table.error_id;
1814 priv->isr_stats.err_code = desc; 1655
1815 pc = table.pc; 1656 trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
1816 blink1 = table.blink1; 1657 table.data1, table.data2, table.line,
1817 blink2 = table.blink2; 1658 table.blink1, table.blink2, table.ilink1,
1818 ilink1 = table.ilink1; 1659 table.ilink2, table.bcon_time, table.gp1,
1819 ilink2 = table.ilink2; 1660 table.gp2, table.gp3, table.ucode_ver,
1820 data1 = table.data1; 1661 table.hw_ver, table.brd_ver);
1821 data2 = table.data2; 1662 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1822 line = table.line; 1663 desc_lookup(table.error_id));
1823 time = table.tsf_low; 1664 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1824 hcmd = table.hcmd; 1665 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1825 1666 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1826 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, 1667 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1827 blink1, blink2, ilink1, ilink2); 1668 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1828 1669 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1829 IWL_ERR(priv, "Desc Time " 1670 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1830 "data1 data2 line\n"); 1671 IWL_ERR(priv, "0x%08X | line\n", table.line);
1831 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", 1672 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1832 desc_lookup(desc), desc, time, data1, data2, line); 1673 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1833 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); 1674 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1834 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", 1675 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1835 pc, blink1, blink2, ilink1, ilink2, hcmd); 1676 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1677 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1678 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1679 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1680 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1681 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1836} 1682}
1837 1683
1838#define EVENT_START_OFFSET (4 * sizeof(u32)) 1684#define EVENT_START_OFFSET (4 * sizeof(u32))
@@ -2114,8 +1960,8 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
2114 struct iwl_calib_cfg_cmd calib_cfg_cmd; 1960 struct iwl_calib_cfg_cmd calib_cfg_cmd;
2115 struct iwl_host_cmd cmd = { 1961 struct iwl_host_cmd cmd = {
2116 .id = CALIBRATION_CFG_CMD, 1962 .id = CALIBRATION_CFG_CMD,
2117 .len = sizeof(struct iwl_calib_cfg_cmd), 1963 .len = { sizeof(struct iwl_calib_cfg_cmd), },
2118 .data = &calib_cfg_cmd, 1964 .data = { &calib_cfg_cmd, },
2119 }; 1965 };
2120 1966
2121 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); 1967 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
@@ -3395,6 +3241,7 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
3395 iwlcore_free_geos(priv); 3241 iwlcore_free_geos(priv);
3396 iwl_free_channel_map(priv); 3242 iwl_free_channel_map(priv);
3397 kfree(priv->scan_cmd); 3243 kfree(priv->scan_cmd);
3244 kfree(priv->beacon_cmd);
3398} 3245}
3399 3246
3400struct ieee80211_ops iwlagn_hw_ops = { 3247struct ieee80211_ops iwlagn_hw_ops = {
@@ -3812,6 +3659,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3812 */ 3659 */
3813 set_bit(STATUS_EXIT_PENDING, &priv->status); 3660 set_bit(STATUS_EXIT_PENDING, &priv->status);
3814 3661
3662 iwl_testmode_cleanup(priv);
3815 iwl_leds_exit(priv); 3663 iwl_leds_exit(priv);
3816 3664
3817 if (priv->mac80211_registered) { 3665 if (priv->mac80211_registered) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index fe33fe8aa418..2495fe7a58cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -191,12 +191,10 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
191void iwl_setup_rx_handlers(struct iwl_priv *priv); 191void iwl_setup_rx_handlers(struct iwl_priv *priv);
192 192
193/* tx */ 193/* tx */
194void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 194void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
195int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 195int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
196 struct iwl_tx_queue *txq, 196 struct iwl_tx_queue *txq,
197 dma_addr_t addr, u16 len, u8 reset, u8 pad); 197 dma_addr_t addr, u16 len, u8 reset);
198int iwl_hw_tx_queue_init(struct iwl_priv *priv,
199 struct iwl_tx_queue *txq);
200void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, 198void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
201 struct ieee80211_tx_info *info); 199 struct ieee80211_tx_info *info);
202int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 200int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
@@ -345,6 +343,7 @@ extern int iwl_alive_start(struct iwl_priv *priv);
345#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL 343#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
346extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len); 344extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len);
347extern void iwl_testmode_init(struct iwl_priv *priv); 345extern void iwl_testmode_init(struct iwl_priv *priv);
346extern void iwl_testmode_cleanup(struct iwl_priv *priv);
348#else 347#else
349static inline 348static inline
350int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) 349int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
@@ -355,6 +354,10 @@ static inline
355void iwl_testmode_init(struct iwl_priv *priv) 354void iwl_testmode_init(struct iwl_priv *priv)
356{ 355{
357} 356}
357static inline
358void iwl_testmode_cleanup(struct iwl_priv *priv)
359{
360}
358#endif 361#endif
359 362
360#endif /* __iwl_agn_h__ */ 363#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 5fdad6532118..6ee5f1aa555c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -205,7 +205,6 @@ enum {
205#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 205#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
206#define SEQ_TO_INDEX(s) ((s) & 0xff) 206#define SEQ_TO_INDEX(s) ((s) & 0xff)
207#define INDEX_TO_SEQ(i) ((i) & 0xff) 207#define INDEX_TO_SEQ(i) ((i) & 0xff)
208#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
209#define SEQ_RX_FRAME cpu_to_le16(0x8000) 208#define SEQ_RX_FRAME cpu_to_le16(0x8000)
210 209
211/** 210/**
@@ -234,9 +233,7 @@ struct iwl_cmd_header {
234 * 233 *
235 * 0:7 tfd index - position within TX queue 234 * 0:7 tfd index - position within TX queue
236 * 8:12 TX queue id 235 * 8:12 TX queue id
237 * 13 reserved 236 * 13:14 reserved
238 * 14 huge - driver sets this to indicate command is in the
239 * 'huge' storage at the end of the command buffers
240 * 15 unsolicited RX or uCode-originated notification 237 * 15 unsolicited RX or uCode-originated notification
241 */ 238 */
242 __le16 sequence; 239 __le16 sequence;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 5b5b0cce4a54..3bb76f6ea410 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -127,16 +127,6 @@ struct iwl_temp_ops {
127struct iwl_lib_ops { 127struct iwl_lib_ops {
128 /* set hw dependent parameters */ 128 /* set hw dependent parameters */
129 int (*set_hw_params)(struct iwl_priv *priv); 129 int (*set_hw_params)(struct iwl_priv *priv);
130 /* Handling TX */
131 void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
132 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
133 struct iwl_tx_queue *txq,
134 dma_addr_t addr,
135 u16 len, u8 reset, u8 pad);
136 void (*txq_free_tfd)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 int (*txq_init)(struct iwl_priv *priv,
139 struct iwl_tx_queue *txq);
140 /* setup Rx handler */ 130 /* setup Rx handler */
141 void (*rx_handler_setup)(struct iwl_priv *priv); 131 void (*rx_handler_setup)(struct iwl_priv *priv);
142 /* setup deferred work */ 132 /* setup deferred work */
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 214e4658c495..22a6e3ec7094 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -48,8 +48,6 @@
48#include "iwl-agn-rs.h" 48#include "iwl-agn-rs.h"
49#include "iwl-agn-tt.h" 49#include "iwl-agn-tt.h"
50 50
51#define U32_PAD(n) ((4-(n))&0x3)
52
53struct iwl_tx_queue; 51struct iwl_tx_queue;
54 52
55/* CT-KILL constants */ 53/* CT-KILL constants */
@@ -83,7 +81,7 @@ struct iwl_tx_queue;
83#define MAX_RTS_THRESHOLD 2347U 81#define MAX_RTS_THRESHOLD 2347U
84#define MAX_MSDU_SIZE 2304U 82#define MAX_MSDU_SIZE 2304U
85#define MAX_MPDU_SIZE 2346U 83#define MAX_MPDU_SIZE 2346U
86#define DEFAULT_BEACON_INTERVAL 100U 84#define DEFAULT_BEACON_INTERVAL 200U
87#define DEFAULT_SHORT_RETRY_LIMIT 7U 85#define DEFAULT_SHORT_RETRY_LIMIT 7U
88#define DEFAULT_LONG_RETRY_LIMIT 4U 86#define DEFAULT_LONG_RETRY_LIMIT 4U
89 87
@@ -112,8 +110,6 @@ struct iwl_cmd_meta {
112 struct iwl_device_cmd *cmd, 110 struct iwl_device_cmd *cmd,
113 struct iwl_rx_packet *pkt); 111 struct iwl_rx_packet *pkt);
114 112
115 /* The CMD_SIZE_HUGE flag bit indicates that the command
116 * structure is stored at the end of the shared queue memory. */
117 u32 flags; 113 u32 flags;
118 114
119 DEFINE_DMA_UNMAP_ADDR(mapping); 115 DEFINE_DMA_UNMAP_ADDR(mapping);
@@ -123,7 +119,23 @@ struct iwl_cmd_meta {
123/* 119/*
124 * Generic queue structure 120 * Generic queue structure
125 * 121 *
126 * Contains common data for Rx and Tx queues 122 * Contains common data for Rx and Tx queues.
123 *
124 * Note the difference between n_bd and n_window: the hardware
125 * always assumes 256 descriptors, so n_bd is always 256 (unless
126 * there might be HW changes in the future). For the normal TX
127 * queues, n_window, which is the size of the software queue data
128 * is also 256; however, for the command queue, n_window is only
129 * 32 since we don't need so many commands pending. Since the HW
130 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
131 * the software buffers (in the variables @meta, @txb in struct
132 * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
133 * in the same struct) have 256.
134 * This means that we end up with the following:
135 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
136 * SW entries: | 0 | ... | 31 |
137 * where N is a number between 0 and 7. This means that the SW
138 * data is a window overlayed over the HW queue.
127 */ 139 */
128struct iwl_queue { 140struct iwl_queue {
129 int n_bd; /* number of BDs in this queue */ 141 int n_bd; /* number of BDs in this queue */
@@ -165,7 +177,7 @@ struct iwl_tx_info {
165 177
166struct iwl_tx_queue { 178struct iwl_tx_queue {
167 struct iwl_queue q; 179 struct iwl_queue q;
168 void *tfds; 180 struct iwl_tfd *tfds;
169 struct iwl_device_cmd **cmd; 181 struct iwl_device_cmd **cmd;
170 struct iwl_cmd_meta *meta; 182 struct iwl_cmd_meta *meta;
171 struct iwl_tx_info *txb; 183 struct iwl_tx_info *txb;
@@ -247,7 +259,6 @@ enum {
247 CMD_SYNC = 0, 259 CMD_SYNC = 0,
248 CMD_SIZE_NORMAL = 0, 260 CMD_SIZE_NORMAL = 0,
249 CMD_NO_SKB = 0, 261 CMD_NO_SKB = 0,
250 CMD_SIZE_HUGE = (1 << 0),
251 CMD_ASYNC = (1 << 1), 262 CMD_ASYNC = (1 << 1),
252 CMD_WANT_SKB = (1 << 2), 263 CMD_WANT_SKB = (1 << 2),
253 CMD_MAPPED = (1 << 3), 264 CMD_MAPPED = (1 << 3),
@@ -259,8 +270,8 @@ enum {
259 * struct iwl_device_cmd 270 * struct iwl_device_cmd
260 * 271 *
261 * For allocation of the command and tx queues, this establishes the overall 272 * For allocation of the command and tx queues, this establishes the overall
262 * size of the largest command we send to uCode, except for a scan command 273 * size of the largest command we send to uCode, except for commands that
263 * (which is relatively huge; space is allocated separately). 274 * aren't fully copied and use other TFD space.
264 */ 275 */
265struct iwl_device_cmd { 276struct iwl_device_cmd {
266 struct iwl_cmd_header hdr; /* uCode API */ 277 struct iwl_cmd_header hdr; /* uCode API */
@@ -277,15 +288,21 @@ struct iwl_device_cmd {
277 288
278#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 289#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
279 290
291#define IWL_MAX_CMD_TFDS 2
292
293enum iwl_hcmd_dataflag {
294 IWL_HCMD_DFL_NOCOPY = BIT(0),
295};
280 296
281struct iwl_host_cmd { 297struct iwl_host_cmd {
282 const void *data; 298 const void *data[IWL_MAX_CMD_TFDS];
283 unsigned long reply_page; 299 unsigned long reply_page;
284 void (*callback)(struct iwl_priv *priv, 300 void (*callback)(struct iwl_priv *priv,
285 struct iwl_device_cmd *cmd, 301 struct iwl_device_cmd *cmd,
286 struct iwl_rx_packet *pkt); 302 struct iwl_rx_packet *pkt);
287 u32 flags; 303 u32 flags;
288 u16 len; 304 u16 len[IWL_MAX_CMD_TFDS];
305 u8 dataflags[IWL_MAX_CMD_TFDS];
289 u8 id; 306 u8 id;
290}; 307};
291 308
@@ -688,17 +705,8 @@ static inline int iwl_queue_used(const struct iwl_queue *q, int i)
688} 705}
689 706
690 707
691static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) 708static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
692{ 709{
693 /*
694 * This is for init calibration result and scan command which
695 * required buffer > TFD_MAX_PAYLOAD_SIZE,
696 * the big buffer at end of command array
697 */
698 if (is_huge)
699 return q->n_window; /* must be power of 2 */
700
701 /* Otherwise, use normal size buffers */
702 return index & (q->n_window - 1); 710 return index & (q->n_window - 1);
703} 711}
704 712
@@ -1171,6 +1179,14 @@ enum iwl_scan_type {
1171 IWL_SCAN_OFFCH_TX, 1179 IWL_SCAN_OFFCH_TX,
1172}; 1180};
1173 1181
1182#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
1183struct iwl_testmode_trace {
1184 u8 *cpu_addr;
1185 u8 *trace_addr;
1186 dma_addr_t dma_addr;
1187 bool trace_enabled;
1188};
1189#endif
1174struct iwl_priv { 1190struct iwl_priv {
1175 1191
1176 /* ieee device used by generic ieee processing code */ 1192 /* ieee device used by generic ieee processing code */
@@ -1452,6 +1468,7 @@ struct iwl_priv {
1452 struct work_struct beacon_update; 1468 struct work_struct beacon_update;
1453 struct iwl_rxon_context *beacon_ctx; 1469 struct iwl_rxon_context *beacon_ctx;
1454 struct sk_buff *beacon_skb; 1470 struct sk_buff *beacon_skb;
1471 void *beacon_cmd;
1455 1472
1456 struct work_struct tt_work; 1473 struct work_struct tt_work;
1457 struct work_struct ct_enter; 1474 struct work_struct ct_enter;
@@ -1501,6 +1518,11 @@ struct iwl_priv {
1501 struct led_classdev led; 1518 struct led_classdev led;
1502 unsigned long blink_on, blink_off; 1519 unsigned long blink_on, blink_off;
1503 bool led_registered; 1520 bool led_registered;
1521#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
1522 struct iwl_testmode_trace testmode_trace;
1523#endif
1524 u32 dbg_fixed_rate;
1525
1504}; /*iwl_priv */ 1526}; /*iwl_priv */
1505 1527
1506static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1528static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index f00172cb8a6d..2c84ba95afca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -137,20 +137,27 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
137#define TRACE_SYSTEM iwlwifi 137#define TRACE_SYSTEM iwlwifi
138 138
139TRACE_EVENT(iwlwifi_dev_hcmd, 139TRACE_EVENT(iwlwifi_dev_hcmd,
140 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags), 140 TP_PROTO(struct iwl_priv *priv, u32 flags,
141 TP_ARGS(priv, hcmd, len, flags), 141 const void *hcmd0, size_t len0,
142 const void *hcmd1, size_t len1,
143 const void *hcmd2, size_t len2),
144 TP_ARGS(priv, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
142 TP_STRUCT__entry( 145 TP_STRUCT__entry(
143 PRIV_ENTRY 146 PRIV_ENTRY
144 __dynamic_array(u8, hcmd, len) 147 __dynamic_array(u8, hcmd0, len0)
148 __dynamic_array(u8, hcmd1, len1)
149 __dynamic_array(u8, hcmd2, len2)
145 __field(u32, flags) 150 __field(u32, flags)
146 ), 151 ),
147 TP_fast_assign( 152 TP_fast_assign(
148 PRIV_ASSIGN; 153 PRIV_ASSIGN;
149 memcpy(__get_dynamic_array(hcmd), hcmd, len); 154 memcpy(__get_dynamic_array(hcmd0), hcmd0, len0);
155 memcpy(__get_dynamic_array(hcmd1), hcmd1, len1);
156 memcpy(__get_dynamic_array(hcmd2), hcmd2, len2);
150 __entry->flags = flags; 157 __entry->flags = flags;
151 ), 158 ),
152 TP_printk("[%p] hcmd %#.2x (%ssync)", 159 TP_printk("[%p] hcmd %#.2x (%ssync)",
153 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0], 160 __entry->priv, ((u8 *)__get_dynamic_array(hcmd0))[0],
154 __entry->flags & CMD_ASYNC ? "a" : "") 161 __entry->flags & CMD_ASYNC ? "a" : "")
155); 162);
156 163
@@ -202,15 +209,18 @@ TRACE_EVENT(iwlwifi_dev_tx,
202); 209);
203 210
204TRACE_EVENT(iwlwifi_dev_ucode_error, 211TRACE_EVENT(iwlwifi_dev_ucode_error,
205 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time, 212 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low,
206 u32 data1, u32 data2, u32 line, u32 blink1, 213 u32 data1, u32 data2, u32 line, u32 blink1,
207 u32 blink2, u32 ilink1, u32 ilink2), 214 u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
208 TP_ARGS(priv, desc, time, data1, data2, line, 215 u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
209 blink1, blink2, ilink1, ilink2), 216 u32 brd_ver),
217 TP_ARGS(priv, desc, tsf_low, data1, data2, line,
218 blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
219 gp3, ucode_ver, hw_ver, brd_ver),
210 TP_STRUCT__entry( 220 TP_STRUCT__entry(
211 PRIV_ENTRY 221 PRIV_ENTRY
212 __field(u32, desc) 222 __field(u32, desc)
213 __field(u32, time) 223 __field(u32, tsf_low)
214 __field(u32, data1) 224 __field(u32, data1)
215 __field(u32, data2) 225 __field(u32, data2)
216 __field(u32, line) 226 __field(u32, line)
@@ -218,11 +228,18 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
218 __field(u32, blink2) 228 __field(u32, blink2)
219 __field(u32, ilink1) 229 __field(u32, ilink1)
220 __field(u32, ilink2) 230 __field(u32, ilink2)
231 __field(u32, bcon_time)
232 __field(u32, gp1)
233 __field(u32, gp2)
234 __field(u32, gp3)
235 __field(u32, ucode_ver)
236 __field(u32, hw_ver)
237 __field(u32, brd_ver)
221 ), 238 ),
222 TP_fast_assign( 239 TP_fast_assign(
223 PRIV_ASSIGN; 240 PRIV_ASSIGN;
224 __entry->desc = desc; 241 __entry->desc = desc;
225 __entry->time = time; 242 __entry->tsf_low = tsf_low;
226 __entry->data1 = data1; 243 __entry->data1 = data1;
227 __entry->data2 = data2; 244 __entry->data2 = data2;
228 __entry->line = line; 245 __entry->line = line;
@@ -230,12 +247,25 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
230 __entry->blink2 = blink2; 247 __entry->blink2 = blink2;
231 __entry->ilink1 = ilink1; 248 __entry->ilink1 = ilink1;
232 __entry->ilink2 = ilink2; 249 __entry->ilink2 = ilink2;
250 __entry->bcon_time = bcon_time;
251 __entry->gp1 = gp1;
252 __entry->gp2 = gp2;
253 __entry->gp3 = gp3;
254 __entry->ucode_ver = ucode_ver;
255 __entry->hw_ver = hw_ver;
256 __entry->brd_ver = brd_ver;
233 ), 257 ),
234 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, " 258 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
235 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X", 259 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
236 __entry->priv, __entry->desc, __entry->time, __entry->data1, 260 "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X "
261 "hw 0x%08X brd 0x%08X",
262 __entry->priv, __entry->desc, __entry->tsf_low,
263 __entry->data1,
237 __entry->data2, __entry->line, __entry->blink1, 264 __entry->data2, __entry->line, __entry->blink1,
238 __entry->blink2, __entry->ilink1, __entry->ilink2) 265 __entry->blink2, __entry->ilink1, __entry->ilink2,
266 __entry->bcon_time, __entry->gp1, __entry->gp2,
267 __entry->gp3, __entry->ucode_ver, __entry->hw_ver,
268 __entry->brd_ver)
239); 269);
240 270
241TRACE_EVENT(iwlwifi_dev_ucode_event, 271TRACE_EVENT(iwlwifi_dev_ucode_event,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index c8397962632c..47a56bc1cd12 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -216,15 +216,14 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
216 216
217static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode) 217static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
218{ 218{
219 u32 otpgp; 219 iwl_read32(priv, CSR_OTP_GP_REG);
220 220
221 otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
222 if (mode == IWL_OTP_ACCESS_ABSOLUTE) 221 if (mode == IWL_OTP_ACCESS_ABSOLUTE)
223 iwl_clear_bit(priv, CSR_OTP_GP_REG, 222 iwl_clear_bit(priv, CSR_OTP_GP_REG,
224 CSR_OTP_GP_REG_OTP_ACCESS_MODE); 223 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
225 else 224 else
226 iwl_set_bit(priv, CSR_OTP_GP_REG, 225 iwl_set_bit(priv, CSR_OTP_GP_REG,
227 CSR_OTP_GP_REG_OTP_ACCESS_MODE); 226 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
228} 227}
229 228
230static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev) 229static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 8f0beb992ccf..76f996623140 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -188,6 +188,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
188 cmd_idx = iwl_enqueue_hcmd(priv, cmd); 188 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
189 if (cmd_idx < 0) { 189 if (cmd_idx < 0) {
190 ret = cmd_idx; 190 ret = cmd_idx;
191 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
191 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", 192 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
192 get_cmd_string(cmd->id), ret); 193 get_cmd_string(cmd->id), ret);
193 return ret; 194 return ret;
@@ -264,8 +265,8 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
264{ 265{
265 struct iwl_host_cmd cmd = { 266 struct iwl_host_cmd cmd = {
266 .id = id, 267 .id = id,
267 .len = len, 268 .len = { len, },
268 .data = data, 269 .data = { data, },
269 }; 270 };
270 271
271 return iwl_send_cmd_sync(priv, &cmd); 272 return iwl_send_cmd_sync(priv, &cmd);
@@ -279,8 +280,8 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
279{ 280{
280 struct iwl_host_cmd cmd = { 281 struct iwl_host_cmd cmd = {
281 .id = id, 282 .id = id,
282 .len = len, 283 .len = { len, },
283 .data = data, 284 .data = { data, },
284 }; 285 };
285 286
286 cmd.flags |= CMD_ASYNC; 287 cmd.flags |= CMD_ASYNC;
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 439187f903c9..7c23beb49d7c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -107,8 +107,8 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
107{ 107{
108 struct iwl_host_cmd cmd = { 108 struct iwl_host_cmd cmd = {
109 .id = REPLY_LEDS_CMD, 109 .id = REPLY_LEDS_CMD,
110 .len = sizeof(struct iwl_led_cmd), 110 .len = { sizeof(struct iwl_led_cmd), },
111 .data = led_cmd, 111 .data = { led_cmd, },
112 .flags = CMD_ASYNC, 112 .flags = CMD_ASYNC,
113 .callback = NULL, 113 .callback = NULL,
114 }; 114 };
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 3c8cebde16cc..7df2814fd4f8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -141,7 +141,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
141 struct iwl_host_cmd cmd = { 141 struct iwl_host_cmd cmd = {
142 .id = REPLY_ADD_STA, 142 .id = REPLY_ADD_STA,
143 .flags = flags, 143 .flags = flags,
144 .data = data, 144 .data = { data, },
145 }; 145 };
146 u8 sta_id __maybe_unused = sta->sta.sta_id; 146 u8 sta_id __maybe_unused = sta->sta.sta_id;
147 147
@@ -155,7 +155,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
155 might_sleep(); 155 might_sleep();
156 } 156 }
157 157
158 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); 158 cmd.len[0] = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
159 ret = iwl_send_cmd(priv, &cmd); 159 ret = iwl_send_cmd(priv, &cmd);
160 160
161 if (ret || (flags & CMD_ASYNC)) 161 if (ret || (flags & CMD_ASYNC))
@@ -401,9 +401,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
401 401
402 struct iwl_host_cmd cmd = { 402 struct iwl_host_cmd cmd = {
403 .id = REPLY_REMOVE_STA, 403 .id = REPLY_REMOVE_STA,
404 .len = sizeof(struct iwl_rem_sta_cmd), 404 .len = { sizeof(struct iwl_rem_sta_cmd), },
405 .flags = CMD_SYNC, 405 .flags = CMD_SYNC,
406 .data = &rm_sta_cmd, 406 .data = { &rm_sta_cmd, },
407 }; 407 };
408 408
409 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 409 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
@@ -760,9 +760,9 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
760 760
761 struct iwl_host_cmd cmd = { 761 struct iwl_host_cmd cmd = {
762 .id = REPLY_TX_LINK_QUALITY_CMD, 762 .id = REPLY_TX_LINK_QUALITY_CMD,
763 .len = sizeof(struct iwl_link_quality_cmd), 763 .len = { sizeof(struct iwl_link_quality_cmd), },
764 .flags = flags, 764 .flags = flags,
765 .data = lq, 765 .data = { lq, },
766 }; 766 };
767 767
768 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) 768 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
index 89b6696622c1..69b7e6bf2d6f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
@@ -97,6 +97,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
97 97
98 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, 98 [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
99 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, 99 [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
100
101 [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
102
103 [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
104 [IWL_TM_ATTR_TRACE_DATA] = { .type = NLA_UNSPEC, },
105
106 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
100}; 107};
101 108
102/* 109/*
@@ -167,6 +174,31 @@ nla_put_failure:
167void iwl_testmode_init(struct iwl_priv *priv) 174void iwl_testmode_init(struct iwl_priv *priv)
168{ 175{
169 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; 176 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
177 priv->testmode_trace.trace_enabled = false;
178}
179
180static void iwl_trace_cleanup(struct iwl_priv *priv)
181{
182 struct device *dev = &priv->pci_dev->dev;
183
184 if (priv->testmode_trace.trace_enabled) {
185 if (priv->testmode_trace.cpu_addr &&
186 priv->testmode_trace.dma_addr)
187 dma_free_coherent(dev,
188 TRACE_TOTAL_SIZE,
189 priv->testmode_trace.cpu_addr,
190 priv->testmode_trace.dma_addr);
191 priv->testmode_trace.trace_enabled = false;
192 priv->testmode_trace.cpu_addr = NULL;
193 priv->testmode_trace.trace_addr = NULL;
194 priv->testmode_trace.dma_addr = 0;
195 }
196}
197
198
199void iwl_testmode_cleanup(struct iwl_priv *priv)
200{
201 iwl_trace_cleanup(priv);
170} 202}
171 203
172/* 204/*
@@ -198,10 +230,11 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
198 } 230 }
199 231
200 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); 232 cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
201 cmd.data = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); 233 cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
202 cmd.len = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); 234 cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
235 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
203 IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x," 236 IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
204 " len %d\n", cmd.id, cmd.flags, cmd.len); 237 " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
205 /* ok, let's submit the command to ucode */ 238 /* ok, let's submit the command to ucode */
206 return iwl_send_cmd(priv, &cmd); 239 return iwl_send_cmd(priv, &cmd);
207} 240}
@@ -388,6 +421,38 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
388 "Error starting the device: %d\n", status); 421 "Error starting the device: %d\n", status);
389 break; 422 break;
390 423
424 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
425 if (priv->eeprom) {
426 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
427 priv->cfg->base_params->eeprom_size + 20);
428 if (!skb) {
429 IWL_DEBUG_INFO(priv,
430 "Error allocating memory\n");
431 return -ENOMEM;
432 }
433 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
434 IWL_TM_CMD_DEV2APP_EEPROM_RSP);
435 NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
436 priv->cfg->base_params->eeprom_size,
437 priv->eeprom);
438 status = cfg80211_testmode_reply(skb);
439 if (status < 0)
440 IWL_DEBUG_INFO(priv,
441 "Error sending msg : %d\n",
442 status);
443 } else
444 return -EFAULT;
445 break;
446
447 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
448 if (!tb[IWL_TM_ATTR_FIXRATE]) {
449 IWL_DEBUG_INFO(priv,
450 "Error finding fixrate setting\n");
451 return -ENOMSG;
452 }
453 priv->dbg_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
454 break;
455
391 default: 456 default:
392 IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n"); 457 IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
393 return -ENOSYS; 458 return -ENOSYS;
@@ -399,6 +464,102 @@ nla_put_failure:
399 return -EMSGSIZE; 464 return -EMSGSIZE;
400} 465}
401 466
467
468/*
469 * This function handles the user application commands for uCode trace
470 *
471 * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
472 * handlers respectively.
473 *
474 * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
475 * value of the actual command execution is replied to the user application.
476 *
477 * @hw: ieee80211_hw object that represents the device
478 * @tb: gnl message fields from the user space
479 */
480static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
481{
482 struct iwl_priv *priv = hw->priv;
483 struct sk_buff *skb;
484 int status = 0;
485 struct device *dev = &priv->pci_dev->dev;
486
487 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
488 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
489 if (priv->testmode_trace.trace_enabled)
490 return -EBUSY;
491
492 priv->testmode_trace.cpu_addr =
493 dma_alloc_coherent(dev,
494 TRACE_TOTAL_SIZE,
495 &priv->testmode_trace.dma_addr,
496 GFP_KERNEL);
497 if (!priv->testmode_trace.cpu_addr)
498 return -ENOMEM;
499 priv->testmode_trace.trace_enabled = true;
500 priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
501 priv->testmode_trace.cpu_addr, 0x100);
502 memset(priv->testmode_trace.trace_addr, 0x03B,
503 TRACE_BUFF_SIZE);
504 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
505 sizeof(priv->testmode_trace.dma_addr) + 20);
506 if (!skb) {
507 IWL_DEBUG_INFO(priv,
508 "Error allocating memory\n");
509 iwl_trace_cleanup(priv);
510 return -ENOMEM;
511 }
512 NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR,
513 sizeof(priv->testmode_trace.dma_addr),
514 (u64 *)&priv->testmode_trace.dma_addr);
515 status = cfg80211_testmode_reply(skb);
516 if (status < 0) {
517 IWL_DEBUG_INFO(priv,
518 "Error sending msg : %d\n",
519 status);
520 }
521 break;
522
523 case IWL_TM_CMD_APP2DEV_END_TRACE:
524 iwl_trace_cleanup(priv);
525 break;
526
527 case IWL_TM_CMD_APP2DEV_READ_TRACE:
528 if (priv->testmode_trace.trace_enabled &&
529 priv->testmode_trace.trace_addr) {
530 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
531 20 + TRACE_BUFF_SIZE);
532 if (skb == NULL) {
533 IWL_DEBUG_INFO(priv,
534 "Error allocating memory\n");
535 return -ENOMEM;
536 }
537 NLA_PUT(skb, IWL_TM_ATTR_TRACE_DATA,
538 TRACE_BUFF_SIZE,
539 priv->testmode_trace.trace_addr);
540 status = cfg80211_testmode_reply(skb);
541 if (status < 0) {
542 IWL_DEBUG_INFO(priv,
543 "Error sending msg : %d\n", status);
544 }
545 } else
546 return -EFAULT;
547 break;
548
549 default:
550 IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n");
551 return -ENOSYS;
552 }
553 return status;
554
555nla_put_failure:
556 kfree_skb(skb);
557 if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
558 IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
559 iwl_trace_cleanup(priv);
560 return -EMSGSIZE;
561}
562
402/* The testmode gnl message handler that takes the gnl message from the 563/* The testmode gnl message handler that takes the gnl message from the
403 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then 564 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
404 * invoke the corresponding handlers. 565 * invoke the corresponding handlers.
@@ -455,9 +616,19 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
455 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: 616 case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
456 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: 617 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
457 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: 618 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
619 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
620 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
458 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); 621 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
459 result = iwl_testmode_driver(hw, tb); 622 result = iwl_testmode_driver(hw, tb);
460 break; 623 break;
624
625 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
626 case IWL_TM_CMD_APP2DEV_END_TRACE:
627 case IWL_TM_CMD_APP2DEV_READ_TRACE:
628 IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
629 result = iwl_testmode_trace(hw, tb);
630 break;
631
461 default: 632 default:
462 IWL_DEBUG_INFO(priv, "Unknown testmode command\n"); 633 IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
463 result = -ENOSYS; 634 result = -ENOSYS;
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index 31f8949f2801..a88085e9b361 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -88,9 +88,15 @@ enum iwl_tm_cmd_t {
88 IWL_TM_CMD_APP2DEV_LOAD_INIT_FW, 88 IWL_TM_CMD_APP2DEV_LOAD_INIT_FW,
89 IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB, 89 IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB,
90 IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW, 90 IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW,
91 IWL_TM_CMD_APP2DEV_GET_EEPROM,
92 IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
91 /* if there is other new command for the driver layer operation, 93 /* if there is other new command for the driver layer operation,
92 * append them here */ 94 * append them here */
93 95
96 /* commands fom user space for uCode trace operations */
97 IWL_TM_CMD_APP2DEV_BEGIN_TRACE,
98 IWL_TM_CMD_APP2DEV_END_TRACE,
99 IWL_TM_CMD_APP2DEV_READ_TRACE,
94 100
95 /* commands from kernel space to carry the synchronous response 101 /* commands from kernel space to carry the synchronous response
96 * to user application */ 102 * to user application */
@@ -99,6 +105,11 @@ enum iwl_tm_cmd_t {
99 /* commands from kernel space to multicast the spontaneous messages 105 /* commands from kernel space to multicast the spontaneous messages
100 * to user application */ 106 * to user application */
101 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT, 107 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
108
109 /* commands from kernel space to carry the eeprom response
110 * to user application */
111 IWL_TM_CMD_DEV2APP_EEPROM_RSP,
112
102 IWL_TM_CMD_MAX, 113 IWL_TM_CMD_MAX,
103}; 114};
104 115
@@ -144,8 +155,31 @@ enum iwl_tm_attr_t {
144 * application */ 155 * application */
145 IWL_TM_ATTR_UCODE_RX_PKT, 156 IWL_TM_ATTR_UCODE_RX_PKT,
146 157
158 /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM,
159 * The mandatory fields are:
160 * IWL_TM_ATTR_EEPROM for the data content responging to the user
161 * application */
162 IWL_TM_ATTR_EEPROM,
163
164 /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE,
165 * The mandatory fields are:
166 * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address
167 */
168 IWL_TM_ATTR_TRACE_ADDR,
169 IWL_TM_ATTR_TRACE_DATA,
170
171 /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
172 * The mandatory fields are:
173 * IWL_TM_ATTR_FIXRATE for the fixed rate
174 */
175 IWL_TM_ATTR_FIXRATE,
176
147 IWL_TM_ATTR_MAX, 177 IWL_TM_ATTR_MAX,
148}; 178};
149 179
180/* uCode trace buffer */
181#define TRACE_BUFF_SIZE 0x20000
182#define TRACE_BUFF_PADD 0x2000
183#define TRACE_TOTAL_SIZE (TRACE_BUFF_SIZE + TRACE_BUFF_PADD)
150 184
151#endif 185#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index e69597ea43e2..686e176b5ebd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34#include "iwl-eeprom.h" 34#include "iwl-eeprom.h"
35#include "iwl-agn.h"
35#include "iwl-dev.h" 36#include "iwl-dev.h"
36#include "iwl-core.h" 37#include "iwl-core.h"
37#include "iwl-sta.h" 38#include "iwl-sta.h"
@@ -85,6 +86,158 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
85 txq->need_update = 0; 86 txq->need_update = 0;
86} 87}
87 88
89static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
90{
91 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
92
93 dma_addr_t addr = get_unaligned_le32(&tb->lo);
94 if (sizeof(dma_addr_t) > sizeof(u32))
95 addr |=
96 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
97
98 return addr;
99}
100
101static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
102{
103 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
104
105 return le16_to_cpu(tb->hi_n_len) >> 4;
106}
107
108static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
109 dma_addr_t addr, u16 len)
110{
111 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
112 u16 hi_n_len = len << 4;
113
114 put_unaligned_le32(addr, &tb->lo);
115 if (sizeof(dma_addr_t) > sizeof(u32))
116 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
117
118 tb->hi_n_len = cpu_to_le16(hi_n_len);
119
120 tfd->num_tbs = idx + 1;
121}
122
123static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
124{
125 return tfd->num_tbs & 0x1f;
126}
127
128static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
129 struct iwl_tfd *tfd)
130{
131 struct pci_dev *dev = priv->pci_dev;
132 int i;
133 int num_tbs;
134
135 /* Sanity check on number of chunks */
136 num_tbs = iwl_tfd_get_num_tbs(tfd);
137
138 if (num_tbs >= IWL_NUM_OF_TBS) {
139 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
140 /* @todo issue fatal error, it is quite serious situation */
141 return;
142 }
143
144 /* Unmap tx_cmd */
145 if (num_tbs)
146 pci_unmap_single(dev,
147 dma_unmap_addr(meta, mapping),
148 dma_unmap_len(meta, len),
149 PCI_DMA_BIDIRECTIONAL);
150
151 /* Unmap chunks, if any. */
152 for (i = 1; i < num_tbs; i++)
153 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
154 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
155}
156
157/**
158 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
159 * @priv - driver private data
160 * @txq - tx queue
161 *
162 * Does NOT advance any TFD circular buffer read/write indexes
163 * Does NOT free the TFD itself (which is within circular buffer)
164 */
165void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
166{
167 struct iwl_tfd *tfd_tmp = txq->tfds;
168 int index = txq->q.read_ptr;
169
170 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
171
172 /* free SKB */
173 if (txq->txb) {
174 struct sk_buff *skb;
175
176 skb = txq->txb[txq->q.read_ptr].skb;
177
178 /* can be called from irqs-disabled context */
179 if (skb) {
180 dev_kfree_skb_any(skb);
181 txq->txb[txq->q.read_ptr].skb = NULL;
182 }
183 }
184}
185
186int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
187 struct iwl_tx_queue *txq,
188 dma_addr_t addr, u16 len,
189 u8 reset)
190{
191 struct iwl_queue *q;
192 struct iwl_tfd *tfd, *tfd_tmp;
193 u32 num_tbs;
194
195 q = &txq->q;
196 tfd_tmp = txq->tfds;
197 tfd = &tfd_tmp[q->write_ptr];
198
199 if (reset)
200 memset(tfd, 0, sizeof(*tfd));
201
202 num_tbs = iwl_tfd_get_num_tbs(tfd);
203
204 /* Each TFD can point to a maximum 20 Tx buffers */
205 if (num_tbs >= IWL_NUM_OF_TBS) {
206 IWL_ERR(priv, "Error can not send more than %d chunks\n",
207 IWL_NUM_OF_TBS);
208 return -EINVAL;
209 }
210
211 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
212 return -EINVAL;
213
214 if (unlikely(addr & ~IWL_TX_DMA_MASK))
215 IWL_ERR(priv, "Unaligned address = %llx\n",
216 (unsigned long long)addr);
217
218 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
219
220 return 0;
221}
222
223/*
224 * Tell nic where to find circular buffer of Tx Frame Descriptors for
225 * given Tx queue, and enable the DMA channel used for that queue.
226 *
227 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
228 * channels supported in hardware.
229 */
230static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
231{
232 int txq_id = txq->q.id;
233
234 /* Circular buffer (TFD queue in DRAM) physical base address */
235 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
236 txq->q.dma_addr >> 8);
237
238 return 0;
239}
240
88/** 241/**
89 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 242 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
90 */ 243 */
@@ -97,7 +250,7 @@ void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
97 return; 250 return;
98 251
99 while (q->write_ptr != q->read_ptr) { 252 while (q->write_ptr != q->read_ptr) {
100 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 253 iwlagn_txq_free_tfd(priv, txq);
101 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 254 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
102 } 255 }
103} 256}
@@ -154,7 +307,7 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
154 return; 307 return;
155 308
156 while (q->read_ptr != q->write_ptr) { 309 while (q->read_ptr != q->write_ptr) {
157 i = get_cmd_index(q, q->read_ptr, 0); 310 i = get_cmd_index(q, q->read_ptr);
158 311
159 if (txq->meta[i].flags & CMD_MAPPED) { 312 if (txq->meta[i].flags & CMD_MAPPED) {
160 pci_unmap_single(priv->pci_dev, 313 pci_unmap_single(priv->pci_dev,
@@ -166,15 +319,6 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
166 319
167 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 320 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
168 } 321 }
169
170 i = q->n_window;
171 if (txq->meta[i].flags & CMD_MAPPED) {
172 pci_unmap_single(priv->pci_dev,
173 dma_unmap_addr(&txq->meta[i], mapping),
174 dma_unmap_len(&txq->meta[i], len),
175 PCI_DMA_BIDIRECTIONAL);
176 txq->meta[i].flags = 0;
177 }
178} 322}
179 323
180/** 324/**
@@ -194,7 +338,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
194 iwl_cmd_queue_unmap(priv); 338 iwl_cmd_queue_unmap(priv);
195 339
196 /* De-alloc array of command/tx buffers */ 340 /* De-alloc array of command/tx buffers */
197 for (i = 0; i <= TFD_CMD_SLOTS; i++) 341 for (i = 0; i < TFD_CMD_SLOTS; i++)
198 kfree(txq->cmd[i]); 342 kfree(txq->cmd[i]);
199 343
200 /* De-alloc circular buffer of TFDs */ 344 /* De-alloc circular buffer of TFDs */
@@ -334,33 +478,17 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
334{ 478{
335 int i, len; 479 int i, len;
336 int ret; 480 int ret;
337 int actual_slots = slots_num;
338
339 /*
340 * Alloc buffer array for commands (Tx or other types of commands).
341 * For the command queue (#4/#9), allocate command space + one big
342 * command for scan, since scan command is very huge; the system will
343 * not have two scans at the same time, so only one is needed.
344 * For normal Tx queues (all other queues), no super-size command
345 * space is needed.
346 */
347 if (txq_id == priv->cmd_queue)
348 actual_slots++;
349 481
350 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, 482 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
351 GFP_KERNEL); 483 GFP_KERNEL);
352 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, 484 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
353 GFP_KERNEL); 485 GFP_KERNEL);
354 486
355 if (!txq->meta || !txq->cmd) 487 if (!txq->meta || !txq->cmd)
356 goto out_free_arrays; 488 goto out_free_arrays;
357 489
358 len = sizeof(struct iwl_device_cmd); 490 len = sizeof(struct iwl_device_cmd);
359 for (i = 0; i < actual_slots; i++) { 491 for (i = 0; i < slots_num; i++) {
360 /* only happens for cmd queue */
361 if (i == slots_num)
362 len = IWL_MAX_CMD_SIZE;
363
364 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 492 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
365 if (!txq->cmd[i]) 493 if (!txq->cmd[i])
366 goto err; 494 goto err;
@@ -391,11 +519,11 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
391 return ret; 519 return ret;
392 520
393 /* Tell device where to find queue */ 521 /* Tell device where to find queue */
394 priv->cfg->ops->lib->txq_init(priv, txq); 522 iwlagn_tx_queue_init(priv, txq);
395 523
396 return 0; 524 return 0;
397err: 525err:
398 for (i = 0; i < actual_slots; i++) 526 for (i = 0; i < slots_num; i++)
399 kfree(txq->cmd[i]); 527 kfree(txq->cmd[i]);
400out_free_arrays: 528out_free_arrays:
401 kfree(txq->meta); 529 kfree(txq->meta);
@@ -420,7 +548,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
420 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 548 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
421 549
422 /* Tell device where to find queue */ 550 /* Tell device where to find queue */
423 priv->cfg->ops->lib->txq_init(priv, txq); 551 iwlagn_tx_queue_init(priv, txq);
424} 552}
425 553
426/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 554/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -443,23 +571,49 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
443 dma_addr_t phys_addr; 571 dma_addr_t phys_addr;
444 unsigned long flags; 572 unsigned long flags;
445 u32 idx; 573 u32 idx;
446 u16 fix_size; 574 u16 copy_size, cmd_size;
447 bool is_ct_kill = false; 575 bool is_ct_kill = false;
576 bool had_nocopy = false;
577 int i;
578 u8 *cmd_dest;
579#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
580 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
581 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
582 int trace_idx;
583#endif
584
585 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
586 IWL_WARN(priv, "fw recovery, no hcmd send\n");
587 return -EIO;
588 }
448 589
449 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 590 copy_size = sizeof(out_cmd->hdr);
591 cmd_size = sizeof(out_cmd->hdr);
592
593 /* need one for the header if the first is NOCOPY */
594 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
595
596 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
597 if (!cmd->len[i])
598 continue;
599 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
600 had_nocopy = true;
601 } else {
602 /* NOCOPY must not be followed by normal! */
603 if (WARN_ON(had_nocopy))
604 return -EINVAL;
605 copy_size += cmd->len[i];
606 }
607 cmd_size += cmd->len[i];
608 }
450 609
451 /* 610 /*
452 * If any of the command structures end up being larger than 611 * If any of the command structures end up being larger than
453 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 612 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
454 * we will need to increase the size of the TFD entries 613 * allocated into separate TFDs, then we will need to
455 * Also, check to see if command buffer should not exceed the size 614 * increase the size of the buffers.
456 * of device_cmd and max_cmd_size.
457 */ 615 */
458 if (WARN_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 616 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
459 !(cmd->flags & CMD_SIZE_HUGE)))
460 return -EINVAL;
461
462 if (WARN_ON(fix_size > IWL_MAX_CMD_SIZE))
463 return -EINVAL; 617 return -EINVAL;
464 618
465 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { 619 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
@@ -468,14 +622,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
468 return -EIO; 622 return -EIO;
469 } 623 }
470 624
471 /*
472 * As we only have a single huge buffer, check that the command
473 * is synchronous (otherwise buffers could end up being reused).
474 */
475
476 if (WARN_ON((cmd->flags & CMD_ASYNC) && (cmd->flags & CMD_SIZE_HUGE)))
477 return -EINVAL;
478
479 spin_lock_irqsave(&priv->hcmd_lock, flags); 625 spin_lock_irqsave(&priv->hcmd_lock, flags);
480 626
481 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 627 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
@@ -490,7 +636,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
490 return -ENOSPC; 636 return -ENOSPC;
491 } 637 }
492 638
493 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 639 idx = get_cmd_index(q, q->write_ptr);
494 out_cmd = txq->cmd[idx]; 640 out_cmd = txq->cmd[idx];
495 out_meta = &txq->meta[idx]; 641 out_meta = &txq->meta[idx];
496 642
@@ -505,57 +651,84 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
505 if (cmd->flags & CMD_ASYNC) 651 if (cmd->flags & CMD_ASYNC)
506 out_meta->callback = cmd->callback; 652 out_meta->callback = cmd->callback;
507 653
508 out_cmd->hdr.cmd = cmd->id; 654 /* set up the header */
509 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
510
511 /* At this point, the out_cmd now has all of the incoming cmd
512 * information */
513 655
656 out_cmd->hdr.cmd = cmd->id;
514 out_cmd->hdr.flags = 0; 657 out_cmd->hdr.flags = 0;
515 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | 658 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
516 INDEX_TO_SEQ(q->write_ptr)); 659 INDEX_TO_SEQ(q->write_ptr));
517 if (cmd->flags & CMD_SIZE_HUGE) 660
518 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 661 /* and copy the data that needs to be copied */
519 662
520#ifdef CONFIG_IWLWIFI_DEBUG 663 cmd_dest = &out_cmd->cmd.payload[0];
521 switch (out_cmd->hdr.cmd) { 664 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
522 case REPLY_TX_LINK_QUALITY_CMD: 665 if (!cmd->len[i])
523 case SENSITIVITY_CMD: 666 continue;
524 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " 667 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
525 "%d bytes at %d[%d]:%d\n", 668 break;
526 get_cmd_string(out_cmd->hdr.cmd), 669 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
527 out_cmd->hdr.cmd, 670 cmd_dest += cmd->len[i];
528 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
529 q->write_ptr, idx, priv->cmd_queue);
530 break;
531 default:
532 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
533 "%d bytes at %d[%d]:%d\n",
534 get_cmd_string(out_cmd->hdr.cmd),
535 out_cmd->hdr.cmd,
536 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
537 q->write_ptr, idx, priv->cmd_queue);
538 } 671 }
539#endif 672
673 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
674 "%d bytes at %d[%d]:%d\n",
675 get_cmd_string(out_cmd->hdr.cmd),
676 out_cmd->hdr.cmd,
677 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
678 q->write_ptr, idx, priv->cmd_queue);
679
540 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, 680 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
541 fix_size, PCI_DMA_BIDIRECTIONAL); 681 copy_size, PCI_DMA_BIDIRECTIONAL);
542 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) { 682 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
543 idx = -ENOMEM; 683 idx = -ENOMEM;
544 goto out; 684 goto out;
545 } 685 }
546 686
547 dma_unmap_addr_set(out_meta, mapping, phys_addr); 687 dma_unmap_addr_set(out_meta, mapping, phys_addr);
548 dma_unmap_len_set(out_meta, len, fix_size); 688 dma_unmap_len_set(out_meta, len, copy_size);
689
690 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
691#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
692 trace_bufs[0] = &out_cmd->hdr;
693 trace_lens[0] = copy_size;
694 trace_idx = 1;
695#endif
696
697 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
698 if (!cmd->len[i])
699 continue;
700 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
701 continue;
702 phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
703 cmd->len[i], PCI_DMA_TODEVICE);
704 if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
705 iwlagn_unmap_tfd(priv, out_meta,
706 &txq->tfds[q->write_ptr]);
707 idx = -ENOMEM;
708 goto out;
709 }
710
711 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
712 cmd->len[i], 0);
713#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
714 trace_bufs[trace_idx] = cmd->data[i];
715 trace_lens[trace_idx] = cmd->len[i];
716 trace_idx++;
717#endif
718 }
549 719
550 out_meta->flags = cmd->flags | CMD_MAPPED; 720 out_meta->flags = cmd->flags | CMD_MAPPED;
551 721
552 txq->need_update = 1; 722 txq->need_update = 1;
553 723
554 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); 724 /* check that tracing gets all possible blocks */
555 725 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
556 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 726#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
557 phys_addr, fix_size, 1, 727 trace_iwlwifi_dev_hcmd(priv, cmd->flags,
558 U32_PAD(cmd->len)); 728 trace_bufs[0], trace_lens[0],
729 trace_bufs[1], trace_lens[1],
730 trace_bufs[2], trace_lens[2]);
731#endif
559 732
560 /* Increment and update queue's write index */ 733 /* Increment and update queue's write index */
561 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 734 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -573,8 +746,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
573 * need to be reclaimed. As result, some free space forms. If there is 746 * need to be reclaimed. As result, some free space forms. If there is
574 * enough free space (> low mark), wake the stack that feeds us. 747 * enough free space (> low mark), wake the stack that feeds us.
575 */ 748 */
576static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, 749static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
577 int idx, int cmd_idx)
578{ 750{
579 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 751 struct iwl_tx_queue *txq = &priv->txq[txq_id];
580 struct iwl_queue *q = &txq->q; 752 struct iwl_queue *q = &txq->q;
@@ -614,7 +786,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
614 int txq_id = SEQ_TO_QUEUE(sequence); 786 int txq_id = SEQ_TO_QUEUE(sequence);
615 int index = SEQ_TO_INDEX(sequence); 787 int index = SEQ_TO_INDEX(sequence);
616 int cmd_index; 788 int cmd_index;
617 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
618 struct iwl_device_cmd *cmd; 789 struct iwl_device_cmd *cmd;
619 struct iwl_cmd_meta *meta; 790 struct iwl_cmd_meta *meta;
620 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 791 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
@@ -632,14 +803,11 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
632 return; 803 return;
633 } 804 }
634 805
635 cmd_index = get_cmd_index(&txq->q, index, huge); 806 cmd_index = get_cmd_index(&txq->q, index);
636 cmd = txq->cmd[cmd_index]; 807 cmd = txq->cmd[cmd_index];
637 meta = &txq->meta[cmd_index]; 808 meta = &txq->meta[cmd_index];
638 809
639 pci_unmap_single(priv->pci_dev, 810 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
640 dma_unmap_addr(meta, mapping),
641 dma_unmap_len(meta, len),
642 PCI_DMA_BIDIRECTIONAL);
643 811
644 /* Input error checking is done when commands are added to queue. */ 812 /* Input error checking is done when commands are added to queue. */
645 if (meta->flags & CMD_WANT_SKB) { 813 if (meta->flags & CMD_WANT_SKB) {
@@ -650,7 +818,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
650 818
651 spin_lock_irqsave(&priv->hcmd_lock, flags); 819 spin_lock_irqsave(&priv->hcmd_lock, flags);
652 820
653 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 821 iwl_hcmd_queue_reclaim(priv, txq_id, index);
654 822
655 if (!(meta->flags & CMD_ASYNC)) { 823 if (!(meta->flags & CMD_ASYNC)) {
656 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 824 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 5665a1a9b99e..a414768f40f1 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -565,7 +565,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
565 if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status) 565 if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
566 && iwm->conf.mode == UMAC_MODE_BSS) { 566 && iwm->conf.mode == UMAC_MODE_BSS) {
567 cancel_delayed_work(&iwm->disconnect); 567 cancel_delayed_work(&iwm->disconnect);
568 cfg80211_roamed(iwm_to_ndev(iwm), 568 cfg80211_roamed(iwm_to_ndev(iwm), NULL,
569 complete->bssid, 569 complete->bssid,
570 iwm->req_ie, iwm->req_ie_len, 570 iwm->req_ie, iwm->req_ie_len,
571 iwm->resp_ie, iwm->resp_ie_len, 571 iwm->resp_ie, iwm->resp_ie_len,
@@ -586,7 +586,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
586 WLAN_STATUS_SUCCESS, 586 WLAN_STATUS_SUCCESS,
587 GFP_KERNEL); 587 GFP_KERNEL);
588 else 588 else
589 cfg80211_roamed(iwm_to_ndev(iwm), 589 cfg80211_roamed(iwm_to_ndev(iwm), NULL,
590 complete->bssid, 590 complete->bssid,
591 iwm->req_ie, iwm->req_ie_len, 591 iwm->req_ie, iwm->req_ie_len,
592 iwm->resp_ie, iwm->resp_ie_len, 592 iwm->resp_ie, iwm->resp_ie_len,
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index d3d5e0853c45..f807447e4d99 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -196,6 +196,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
196 if (skb_src) 196 if (skb_src)
197 pra_list->total_pkts_size -= skb_src->len; 197 pra_list->total_pkts_size -= skb_src->len;
198 198
199 atomic_dec(&priv->wmm.tx_pkts_queued);
200
199 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 201 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
200 ra_list_flags); 202 ra_list_flags);
201 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); 203 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
@@ -257,6 +259,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
257 259
258 pra_list->total_pkts_size += skb_aggr->len; 260 pra_list->total_pkts_size += skb_aggr->len;
259 261
262 atomic_inc(&priv->wmm.tx_pkts_queued);
263
260 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; 264 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
261 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 265 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
262 ra_list_flags); 266 ra_list_flags);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 672701dc2721..8316b3cd92cd 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -69,7 +69,8 @@ struct mwifiex_drv_mode {
69#define MWIFIEX_TIMER_10S 10000 69#define MWIFIEX_TIMER_10S 10000
70#define MWIFIEX_TIMER_1S 1000 70#define MWIFIEX_TIMER_1S 1000
71 71
72#define MAX_TX_PENDING 60 72#define MAX_TX_PENDING 100
73#define LOW_TX_PENDING 80
73 74
74#define MWIFIEX_UPLD_SIZE (2312) 75#define MWIFIEX_UPLD_SIZE (2312)
75 76
@@ -202,6 +203,7 @@ struct mwifiex_tid_tbl {
202#define WMM_HIGHEST_PRIORITY 7 203#define WMM_HIGHEST_PRIORITY 7
203#define HIGH_PRIO_TID 7 204#define HIGH_PRIO_TID 7
204#define LOW_PRIO_TID 0 205#define LOW_PRIO_TID 0
206#define NO_PKT_PRIO_TID (-1)
205 207
206struct mwifiex_wmm_desc { 208struct mwifiex_wmm_desc {
207 struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; 209 struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
@@ -213,7 +215,10 @@ struct mwifiex_wmm_desc {
213 u32 drv_pkt_delay_max; 215 u32 drv_pkt_delay_max;
214 u8 queue_priority[IEEE80211_MAX_QUEUES]; 216 u8 queue_priority[IEEE80211_MAX_QUEUES];
215 u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */ 217 u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */
216 218 /* Number of transmit packets queued */
219 atomic_t tx_pkts_queued;
220 /* Tracks highest priority with a packet queued */
221 atomic_t highest_queued_prio;
217}; 222};
218 223
219struct mwifiex_802_11_security { 224struct mwifiex_802_11_security {
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 210120889dfe..aaa50c074196 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -140,7 +140,9 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
140 } else { 140 } else {
141 priv->stats.tx_errors++; 141 priv->stats.tx_errors++;
142 } 142 }
143 atomic_dec(&adapter->tx_pending); 143
144 if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
145 goto done;
144 146
145 for (i = 0; i < adapter->priv_num; i++) { 147 for (i = 0; i < adapter->priv_num; i++) {
146 148
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index faa09e32902e..91634daec306 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -177,14 +177,20 @@ static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
177 * This function map ACs to TIDs. 177 * This function map ACs to TIDs.
178 */ 178 */
179static void 179static void
180mwifiex_wmm_queue_priorities_tid(u8 queue_priority[]) 180mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
181{ 181{
182 u8 *queue_priority = wmm->queue_priority;
182 int i; 183 int i;
183 184
184 for (i = 0; i < 4; ++i) { 185 for (i = 0; i < 4; ++i) {
185 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1]; 186 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
186 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0]; 187 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
187 } 188 }
189
190 for (i = 0; i < MAX_NUM_TID; ++i)
191 tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
192
193 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
188} 194}
189 195
190/* 196/*
@@ -246,7 +252,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
246 } 252 }
247 } 253 }
248 254
249 mwifiex_wmm_queue_priorities_tid(priv->wmm.queue_priority); 255 mwifiex_wmm_queue_priorities_tid(&priv->wmm);
250} 256}
251 257
252/* 258/*
@@ -399,6 +405,9 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
399 priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT; 405 priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT;
400 priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE; 406 priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
401 priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE; 407 priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
408
409 atomic_set(&priv->wmm.tx_pkts_queued, 0);
410 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
402 } 411 }
403} 412}
404 413
@@ -408,17 +417,13 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
408int 417int
409mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) 418mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
410{ 419{
411 int i, j; 420 int i;
412 struct mwifiex_private *priv; 421 struct mwifiex_private *priv;
413 422
414 for (j = 0; j < adapter->priv_num; ++j) { 423 for (i = 0; i < adapter->priv_num; ++i) {
415 priv = adapter->priv[j]; 424 priv = adapter->priv[i];
416 if (priv) { 425 if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
417 for (i = 0; i < MAX_NUM_TID; i++) 426 return false;
418 if (!mwifiex_wmm_is_ra_list_empty(
419 &priv->wmm.tid_tbl_ptr[i].ra_list))
420 return false;
421 }
422 } 427 }
423 428
424 return true; 429 return true;
@@ -468,6 +473,9 @@ static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
468 for (i = 0; i < MAX_NUM_TID; i++) 473 for (i = 0; i < MAX_NUM_TID; i++)
469 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i]. 474 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
470 ra_list); 475 ra_list);
476
477 atomic_set(&priv->wmm.tx_pkts_queued, 0);
478 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
471} 479}
472 480
473/* 481/*
@@ -638,6 +646,13 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
638 646
639 ra_list->total_pkts_size += skb->len; 647 ra_list->total_pkts_size += skb->len;
640 648
649 atomic_inc(&priv->wmm.tx_pkts_queued);
650
651 if (atomic_read(&priv->wmm.highest_queued_prio) <
652 tos_to_tid_inv[tid_down])
653 atomic_set(&priv->wmm.highest_queued_prio,
654 tos_to_tid_inv[tid_down]);
655
641 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 656 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
642} 657}
643 658
@@ -863,9 +878,14 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
863 } 878 }
864 879
865 do { 880 do {
881 atomic_t *hqp;
882 spinlock_t *lock;
883
866 priv_tmp = bssprio_node->priv; 884 priv_tmp = bssprio_node->priv;
885 hqp = &priv_tmp->wmm.highest_queued_prio;
886 lock = &priv_tmp->wmm.ra_list_spinlock;
867 887
868 for (i = HIGH_PRIO_TID; i >= LOW_PRIO_TID; --i) { 888 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
869 889
870 tid_ptr = &(priv_tmp)->wmm. 890 tid_ptr = &(priv_tmp)->wmm.
871 tid_tbl_ptr[tos_to_tid[i]]; 891 tid_tbl_ptr[tos_to_tid[i]];
@@ -903,6 +923,11 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
903 is_list_empty = 923 is_list_empty =
904 skb_queue_empty(&ptr->skb_head); 924 skb_queue_empty(&ptr->skb_head);
905 if (!is_list_empty) { 925 if (!is_list_empty) {
926 spin_lock_irqsave(lock, flags);
927 if (atomic_read(hqp) > i)
928 atomic_set(hqp, i);
929 spin_unlock_irqrestore(lock,
930 flags);
906 *priv = priv_tmp; 931 *priv = priv_tmp;
907 *tid = tos_to_tid[i]; 932 *tid = tos_to_tid[i];
908 return ptr; 933 return ptr;
@@ -921,6 +946,12 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
921 } while (ptr != head); 946 } while (ptr != head);
922 } 947 }
923 948
949 /* No packet at any TID for this priv. Mark as such
950 * to skip checking TIDs for this priv (until pkt is
951 * added).
952 */
953 atomic_set(hqp, NO_PKT_PRIO_TID);
954
924 /* Get next bss priority node */ 955 /* Get next bss priority node */
925 bssprio_node = list_first_entry(&bssprio_node->list, 956 bssprio_node = list_first_entry(&bssprio_node->list,
926 struct mwifiex_bss_prio_node, 957 struct mwifiex_bss_prio_node,
@@ -1028,6 +1059,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
1028 .bss_prio_cur->list, 1059 .bss_prio_cur->list,
1029 struct mwifiex_bss_prio_node, 1060 struct mwifiex_bss_prio_node,
1030 list); 1061 list);
1062 atomic_dec(&priv->wmm.tx_pkts_queued);
1031 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1063 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1032 ra_list_flags); 1064 ra_list_flags);
1033 } 1065 }
@@ -1134,6 +1166,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
1134 .bss_prio_cur->list, 1166 .bss_prio_cur->list,
1135 struct mwifiex_bss_prio_node, 1167 struct mwifiex_bss_prio_node,
1136 list); 1168 list);
1169 atomic_dec(&priv->wmm.tx_pkts_queued);
1137 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1170 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1138 ra_list_flags); 1171 ra_list_flags);
1139 } 1172 }
@@ -1227,5 +1260,5 @@ mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1227 1260
1228 if (mwifiex_dequeue_tx_packet(adapter)) 1261 if (mwifiex_dequeue_tx_packet(adapter))
1229 break; 1262 break;
1230 } while (true); 1263 } while (!mwifiex_wmm_lists_empty(adapter));
1231} 1264}
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index e18358725b69..a8f3bc740dfa 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -82,6 +82,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
82 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ 82 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
83 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 83 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
84 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 84 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
85 {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
85 {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ 86 {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
86 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ 87 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
87 {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */ 88 {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 518542b4bf9e..29f938930667 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2830,7 +2830,8 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2830 req_ie_len, resp_ie, 2830 req_ie_len, resp_ie,
2831 resp_ie_len, 0, GFP_KERNEL); 2831 resp_ie_len, 0, GFP_KERNEL);
2832 else 2832 else
2833 cfg80211_roamed(usbdev->net, bssid, req_ie, req_ie_len, 2833 cfg80211_roamed(usbdev->net, NULL, bssid,
2834 req_ie, req_ie_len,
2834 resp_ie, resp_ie_len, GFP_KERNEL); 2835 resp_ie, resp_ie_len, GFP_KERNEL);
2835 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) 2836 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
2836 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); 2837 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 2bb71195e976..39b0297ce925 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -190,7 +190,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
190 190
191 ppsc->swrf_processing = true; 191 ppsc->swrf_processing = true;
192 192
193 if (ppsc->inactive_pwrstate == ERFOFF && 193 if (ppsc->inactive_pwrstate == ERFON &&
194 rtlhal->interface == INTF_PCI) { 194 rtlhal->interface == INTF_PCI) {
195 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && 195 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
196 RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) && 196 RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) &&
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index c5424cad43cb..d2cc81586a6a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -728,7 +728,7 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
728 return; 728 return;
729 rtlphy->set_bwmode_inprogress = true; 729 rtlphy->set_bwmode_inprogress = true;
730 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { 730 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
731 rtlphy->set_bwmode_inprogress = false; 731 rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
732 } else { 732 } else {
733 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 733 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
734 ("FALSE driver sleep or unload\n")); 734 ("FALSE driver sleep or unload\n"));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 73ae8a431848..abe0fcc75368 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -366,6 +366,75 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
366 return true; 366 return true;
367} 367}
368 368
369void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
370{
371 struct rtl_priv *rtlpriv = rtl_priv(hw);
372 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
373 struct rtl_phy *rtlphy = &(rtlpriv->phy);
374 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
375 u8 reg_bw_opmode;
376 u8 reg_prsr_rsc;
377
378 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
379 ("Switch to %s bandwidth\n",
380 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
381 "20MHz" : "40MHz"))
382
383 if (is_hal_stop(rtlhal)) {
384 rtlphy->set_bwmode_inprogress = false;
385 return;
386 }
387
388 reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
389 reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
390
391 switch (rtlphy->current_chan_bw) {
392 case HT_CHANNEL_WIDTH_20:
393 reg_bw_opmode |= BW_OPMODE_20MHZ;
394 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
395 break;
396 case HT_CHANNEL_WIDTH_20_40:
397 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
398 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
399 reg_prsr_rsc =
400 (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
401 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
402 break;
403 default:
404 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
405 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
406 break;
407 }
408
409 switch (rtlphy->current_chan_bw) {
410 case HT_CHANNEL_WIDTH_20:
411 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
412 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
413 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
414 break;
415 case HT_CHANNEL_WIDTH_20_40:
416 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
417 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
418
419 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
420 (mac->cur_40_prime_sc >> 1));
421 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
422 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
423
424 rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
425 (mac->cur_40_prime_sc ==
426 HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
427 break;
428 default:
429 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
430 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
431 break;
432 }
433 rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
434 rtlphy->set_bwmode_inprogress = false;
435 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
436}
437
369void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) 438void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
370{ 439{
371 u8 tmpreg; 440 u8 tmpreg;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index ad580852cc76..be2c92adef33 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -257,5 +257,6 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
257 u8 configtype); 257 u8 configtype);
258bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, 258bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
259 u8 configtype); 259 u8 configtype);
260void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
260 261
261#endif 262#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 390bbb5ee11d..373dc78af1dc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -232,6 +232,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
232 .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile, 232 .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile,
233 .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile, 233 .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile,
234 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate, 234 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
235 .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
235 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower, 236 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
236}; 237};
237 238
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index db9a763aaa7f..d29365a232a1 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1581,7 +1581,9 @@ static int xennet_connect(struct net_device *dev)
1581 if (err) 1581 if (err)
1582 return err; 1582 return err;
1583 1583
1584 rtnl_lock();
1584 netdev_update_features(dev); 1585 netdev_update_features(dev);
1586 rtnl_unlock();
1585 1587
1586 spin_lock_bh(&np->rx_lock); 1588 spin_lock_bh(&np->rx_lock);
1587 spin_lock_irq(&np->tx_lock); 1589 spin_lock_irq(&np->tx_lock);
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index d3d7809af8bf..0dc34f12f92e 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -2203,7 +2203,6 @@ static __exit void parport_ip32_unregister_port(struct parport *p)
2203static int __init parport_ip32_init(void) 2203static int __init parport_ip32_init(void)
2204{ 2204{
2205 pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n"); 2205 pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
2206 pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__);
2207 this_port = parport_ip32_probe_port(); 2206 this_port = parport_ip32_probe_port();
2208 return IS_ERR(this_port) ? PTR_ERR(this_port) : 0; 2207 return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
2209} 2208}
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 94a114aa8e28..b1396e5b2953 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -81,6 +81,19 @@ static void __iomem *rtl_cmd_addr;
81static u8 rtl_cmd_type; 81static u8 rtl_cmd_type;
82static u8 rtl_cmd_width; 82static u8 rtl_cmd_width;
83 83
84#ifndef readq
85static inline __u64 readq(const volatile void __iomem *addr)
86{
87 const volatile u32 __iomem *p = addr;
88 u32 low, high;
89
90 low = readl(p);
91 high = readl(p + 1);
92
93 return low + ((u64)high << 32);
94}
95#endif
96
84static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len) 97static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len)
85{ 98{
86 if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO) 99 if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO)
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 85c8ad43c0c5..5ffe7c398148 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -344,6 +344,19 @@ struct ips_driver {
344static bool 344static bool
345ips_gpu_turbo_enabled(struct ips_driver *ips); 345ips_gpu_turbo_enabled(struct ips_driver *ips);
346 346
347#ifndef readq
348static inline __u64 readq(const volatile void __iomem *addr)
349{
350 const volatile u32 __iomem *p = addr;
351 u32 low, high;
352
353 low = readl(p);
354 high = readl(p + 1);
355
356 return low + ((u64)high << 32);
357}
358#endif
359
347/** 360/**
348 * ips_cpu_busy - is CPU busy? 361 * ips_cpu_busy - is CPU busy?
349 * @ips: IPS driver struct 362 * @ips: IPS driver struct
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 52a462fc6b84..e57b50b38565 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -68,6 +68,13 @@ config BATTERY_DS2760
68 help 68 help
69 Say Y here to enable support for batteries with ds2760 chip. 69 Say Y here to enable support for batteries with ds2760 chip.
70 70
71config BATTERY_DS2780
72 tristate "DS2780 battery driver"
73 select W1
74 select W1_SLAVE_DS2780
75 help
76 Say Y here to enable support for batteries with ds2780 chip.
77
71config BATTERY_DS2782 78config BATTERY_DS2782
72 tristate "DS2782/DS2786 standalone gas-gauge" 79 tristate "DS2782/DS2786 standalone gas-gauge"
73 depends on I2C 80 depends on I2C
@@ -203,6 +210,15 @@ config CHARGER_ISP1704
203 Say Y to enable support for USB Charger Detection with 210 Say Y to enable support for USB Charger Detection with
204 ISP1707/ISP1704 USB transceivers. 211 ISP1707/ISP1704 USB transceivers.
205 212
213config CHARGER_MAX8903
214 tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power"
215 depends on GENERIC_HARDIRQS
216 help
217 Say Y to enable support for the MAX8903 DC-DC charger and sysfs.
218 The driver supports controlling charger-enable and current-limit
219 pins based on the status of charger connections with interrupt
220 handlers.
221
206config CHARGER_TWL4030 222config CHARGER_TWL4030
207 tristate "OMAP TWL4030 BCI charger driver" 223 tristate "OMAP TWL4030 BCI charger driver"
208 depends on TWL4030_CORE 224 depends on TWL4030_CORE
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 8385bfae8728..009a90fa8ac9 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
15obj-$(CONFIG_TEST_POWER) += test_power.o 15obj-$(CONFIG_TEST_POWER) += test_power.o
16 16
17obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o 17obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
18obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
18obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o 19obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
19obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o 20obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
20obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o 21obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
@@ -32,5 +33,6 @@ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
32obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o 33obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
33obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o 34obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
34obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o 35obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
36obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
35obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o 37obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
36obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o 38obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 59e68dbd028b..bb16f5b7e167 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> 4 * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
5 * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> 5 * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
6 * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de> 6 * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
7 * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
7 * 8 *
8 * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. 9 * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
9 * 10 *
@@ -76,7 +77,7 @@ struct bq27x00_reg_cache {
76 int time_to_empty_avg; 77 int time_to_empty_avg;
77 int time_to_full; 78 int time_to_full;
78 int charge_full; 79 int charge_full;
79 int charge_counter; 80 int cycle_count;
80 int capacity; 81 int capacity;
81 int flags; 82 int flags;
82 83
@@ -115,7 +116,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
115 POWER_SUPPLY_PROP_CHARGE_FULL, 116 POWER_SUPPLY_PROP_CHARGE_FULL,
116 POWER_SUPPLY_PROP_CHARGE_NOW, 117 POWER_SUPPLY_PROP_CHARGE_NOW,
117 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 118 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
118 POWER_SUPPLY_PROP_CHARGE_COUNTER, 119 POWER_SUPPLY_PROP_CYCLE_COUNT,
119 POWER_SUPPLY_PROP_ENERGY_NOW, 120 POWER_SUPPLY_PROP_ENERGY_NOW,
120}; 121};
121 122
@@ -267,7 +268,7 @@ static void bq27x00_update(struct bq27x00_device_info *di)
267 cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); 268 cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
268 cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); 269 cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
269 cache.charge_full = bq27x00_battery_read_lmd(di); 270 cache.charge_full = bq27x00_battery_read_lmd(di);
270 cache.charge_counter = bq27x00_battery_read_cyct(di); 271 cache.cycle_count = bq27x00_battery_read_cyct(di);
271 272
272 if (!is_bq27500) 273 if (!is_bq27500)
273 cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false); 274 cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
@@ -496,8 +497,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
496 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: 497 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
497 ret = bq27x00_simple_value(di->charge_design_full, val); 498 ret = bq27x00_simple_value(di->charge_design_full, val);
498 break; 499 break;
499 case POWER_SUPPLY_PROP_CHARGE_COUNTER: 500 case POWER_SUPPLY_PROP_CYCLE_COUNT:
500 ret = bq27x00_simple_value(di->cache.charge_counter, val); 501 ret = bq27x00_simple_value(di->cache.cycle_count, val);
501 break; 502 break;
502 case POWER_SUPPLY_PROP_ENERGY_NOW: 503 case POWER_SUPPLY_PROP_ENERGY_NOW:
503 ret = bq27x00_battery_energy(di, val); 504 ret = bq27x00_battery_energy(di, val);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index e534290f3256..f2c9cc33c0f9 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -86,7 +86,11 @@ static int rated_capacities[] = {
86 920, /* NEC */ 86 920, /* NEC */
87 1440, /* Samsung */ 87 1440, /* Samsung */
88 1440, /* BYD */ 88 1440, /* BYD */
89#ifdef CONFIG_MACH_H4700
90 1800, /* HP iPAQ hx4700 3.7V 1800mAh (359113-001) */
91#else
89 1440, /* Lishen */ 92 1440, /* Lishen */
93#endif
90 1440, /* NEC */ 94 1440, /* NEC */
91 2880, /* Samsung */ 95 2880, /* Samsung */
92 2880, /* BYD */ 96 2880, /* BYD */
@@ -186,7 +190,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
186 190
187 scale[0] = di->full_active_uAh; 191 scale[0] = di->full_active_uAh;
188 for (i = 1; i < 5; i++) 192 for (i = 1; i < 5; i++)
189 scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 2 + i]; 193 scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 1 + i];
190 194
191 di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10); 195 di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10);
192 di->full_active_uAh *= 1000; /* convert to µAh */ 196 di->full_active_uAh *= 1000; /* convert to µAh */
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
new file mode 100644
index 000000000000..1fefe82e12e3
--- /dev/null
+++ b/drivers/power/ds2780_battery.c
@@ -0,0 +1,853 @@
1/*
2 * 1-wire client/driver for the Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC
3 *
4 * Copyright (C) 2010 Indesign, LLC
5 *
6 * Author: Clifton Barnes <cabarnes@indesign-llc.com>
7 *
8 * Based on ds2760_battery and ds2782_battery drivers
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/param.h>
19#include <linux/pm.h>
20#include <linux/platform_device.h>
21#include <linux/power_supply.h>
22#include <linux/idr.h>
23
24#include "../w1/w1.h"
25#include "../w1/slaves/w1_ds2780.h"
26
27/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
28#define DS2780_CURRENT_UNITS 1563
29/* Charge unit measurement in uAh for a 1 milli-ohm sense resistor */
30#define DS2780_CHARGE_UNITS 6250
31/* Number of bytes in user EEPROM space */
32#define DS2780_USER_EEPROM_SIZE (DS2780_EEPROM_BLOCK0_END - \
33 DS2780_EEPROM_BLOCK0_START + 1)
34/* Number of bytes in parameter EEPROM space */
35#define DS2780_PARAM_EEPROM_SIZE (DS2780_EEPROM_BLOCK1_END - \
36 DS2780_EEPROM_BLOCK1_START + 1)
37
38struct ds2780_device_info {
39 struct device *dev;
40 struct power_supply bat;
41 struct device *w1_dev;
42};
43
44enum current_types {
45 CURRENT_NOW,
46 CURRENT_AVG,
47};
48
49static const char model[] = "DS2780";
50static const char manufacturer[] = "Maxim/Dallas";
51
52static inline struct ds2780_device_info *to_ds2780_device_info(
53 struct power_supply *psy)
54{
55 return container_of(psy, struct ds2780_device_info, bat);
56}
57
58static inline struct power_supply *to_power_supply(struct device *dev)
59{
60 return dev_get_drvdata(dev);
61}
62
63static inline int ds2780_read8(struct device *dev, u8 *val, int addr)
64{
65 return w1_ds2780_io(dev, val, addr, sizeof(u8), 0);
66}
67
68static int ds2780_read16(struct device *dev, s16 *val, int addr)
69{
70 int ret;
71 u8 raw[2];
72
73 ret = w1_ds2780_io(dev, raw, addr, sizeof(u8) * 2, 0);
74 if (ret < 0)
75 return ret;
76
77 *val = (raw[0] << 8) | raw[1];
78
79 return 0;
80}
81
82static inline int ds2780_read_block(struct device *dev, u8 *val, int addr,
83 size_t count)
84{
85 return w1_ds2780_io(dev, val, addr, count, 0);
86}
87
88static inline int ds2780_write(struct device *dev, u8 *val, int addr,
89 size_t count)
90{
91 return w1_ds2780_io(dev, val, addr, count, 1);
92}
93
94static inline int ds2780_store_eeprom(struct device *dev, int addr)
95{
96 return w1_ds2780_eeprom_cmd(dev, addr, W1_DS2780_COPY_DATA);
97}
98
99static inline int ds2780_recall_eeprom(struct device *dev, int addr)
100{
101 return w1_ds2780_eeprom_cmd(dev, addr, W1_DS2780_RECALL_DATA);
102}
103
104static int ds2780_save_eeprom(struct ds2780_device_info *dev_info, int reg)
105{
106 int ret;
107
108 ret = ds2780_store_eeprom(dev_info->w1_dev, reg);
109 if (ret < 0)
110 return ret;
111
112 ret = ds2780_recall_eeprom(dev_info->w1_dev, reg);
113 if (ret < 0)
114 return ret;
115
116 return 0;
117}
118
119/* Set sense resistor value in mhos */
120static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
121 u8 conductance)
122{
123 int ret;
124
125 ret = ds2780_write(dev_info->w1_dev, &conductance,
126 DS2780_RSNSP_REG, sizeof(u8));
127 if (ret < 0)
128 return ret;
129
130 return ds2780_save_eeprom(dev_info, DS2780_RSNSP_REG);
131}
132
133/* Get RSGAIN value from 0 to 1.999 in steps of 0.001 */
134static int ds2780_get_rsgain_register(struct ds2780_device_info *dev_info,
135 u16 *rsgain)
136{
137 return ds2780_read16(dev_info->w1_dev, rsgain, DS2780_RSGAIN_MSB_REG);
138}
139
140/* Set RSGAIN value from 0 to 1.999 in steps of 0.001 */
141static int ds2780_set_rsgain_register(struct ds2780_device_info *dev_info,
142 u16 rsgain)
143{
144 int ret;
145 u8 raw[] = {rsgain >> 8, rsgain & 0xFF};
146
147 ret = ds2780_write(dev_info->w1_dev, raw,
148 DS2780_RSGAIN_MSB_REG, sizeof(u8) * 2);
149 if (ret < 0)
150 return ret;
151
152 return ds2780_save_eeprom(dev_info, DS2780_RSGAIN_MSB_REG);
153}
154
155static int ds2780_get_voltage(struct ds2780_device_info *dev_info,
156 int *voltage_uV)
157{
158 int ret;
159 s16 voltage_raw;
160
161 /*
162 * The voltage value is located in 10 bits across the voltage MSB
163 * and LSB registers in two's compliment form
164 * Sign bit of the voltage value is in bit 7 of the voltage MSB register
165 * Bits 9 - 3 of the voltage value are in bits 6 - 0 of the
166 * voltage MSB register
167 * Bits 2 - 0 of the voltage value are in bits 7 - 5 of the
168 * voltage LSB register
169 */
170 ret = ds2780_read16(dev_info->w1_dev, &voltage_raw,
171 DS2780_VOLT_MSB_REG);
172 if (ret < 0)
173 return ret;
174
175 /*
176 * DS2780 reports voltage in units of 4.88mV, but the battery class
177 * reports in units of uV, so convert by multiplying by 4880.
178 */
179 *voltage_uV = (voltage_raw / 32) * 4880;
180 return 0;
181}
182
183static int ds2780_get_temperature(struct ds2780_device_info *dev_info,
184 int *temperature)
185{
186 int ret;
187 s16 temperature_raw;
188
189 /*
190 * The temperature value is located in 10 bits across the temperature
191 * MSB and LSB registers in two's compliment form
192 * Sign bit of the temperature value is in bit 7 of the temperature
193 * MSB register
194 * Bits 9 - 3 of the temperature value are in bits 6 - 0 of the
195 * temperature MSB register
196 * Bits 2 - 0 of the temperature value are in bits 7 - 5 of the
197 * temperature LSB register
198 */
199 ret = ds2780_read16(dev_info->w1_dev, &temperature_raw,
200 DS2780_TEMP_MSB_REG);
201 if (ret < 0)
202 return ret;
203
204 /*
205 * Temperature is measured in units of 0.125 degrees celcius, the
206 * power_supply class measures temperature in tenths of degrees
207 * celsius. The temperature value is stored as a 10 bit number, plus
208 * sign in the upper bits of a 16 bit register.
209 */
210 *temperature = ((temperature_raw / 32) * 125) / 100;
211 return 0;
212}
213
214static int ds2780_get_current(struct ds2780_device_info *dev_info,
215 enum current_types type, int *current_uA)
216{
217 int ret, sense_res;
218 s16 current_raw;
219 u8 sense_res_raw, reg_msb;
220
221 /*
222 * The units of measurement for current are dependent on the value of
223 * the sense resistor.
224 */
225 ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
226 if (ret < 0)
227 return ret;
228
229 if (sense_res_raw == 0) {
230 dev_err(dev_info->dev, "sense resistor value is 0\n");
231 return -ENXIO;
232 }
233 sense_res = 1000 / sense_res_raw;
234
235 if (type == CURRENT_NOW)
236 reg_msb = DS2780_CURRENT_MSB_REG;
237 else if (type == CURRENT_AVG)
238 reg_msb = DS2780_IAVG_MSB_REG;
239 else
240 return -EINVAL;
241
242 /*
243 * The current value is located in 16 bits across the current MSB
244 * and LSB registers in two's compliment form
245 * Sign bit of the current value is in bit 7 of the current MSB register
246 * Bits 14 - 8 of the current value are in bits 6 - 0 of the current
247 * MSB register
248 * Bits 7 - 0 of the current value are in bits 7 - 0 of the current
249 * LSB register
250 */
251 ret = ds2780_read16(dev_info->w1_dev, &current_raw, reg_msb);
252 if (ret < 0)
253 return ret;
254
255 *current_uA = current_raw * (DS2780_CURRENT_UNITS / sense_res);
256 return 0;
257}
258
259static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
260 int *accumulated_current)
261{
262 int ret, sense_res;
263 s16 current_raw;
264 u8 sense_res_raw;
265
266 /*
267 * The units of measurement for accumulated current are dependent on
268 * the value of the sense resistor.
269 */
270 ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
271 if (ret < 0)
272 return ret;
273
274 if (sense_res_raw == 0) {
275 dev_err(dev_info->dev, "sense resistor value is 0\n");
276 return -ENXIO;
277 }
278 sense_res = 1000 / sense_res_raw;
279
280 /*
281 * The ACR value is located in 16 bits across the ACR MSB and
282 * LSB registers
283 * Bits 15 - 8 of the ACR value are in bits 7 - 0 of the ACR
284 * MSB register
285 * Bits 7 - 0 of the ACR value are in bits 7 - 0 of the ACR
286 * LSB register
287 */
288 ret = ds2780_read16(dev_info->w1_dev, &current_raw, DS2780_ACR_MSB_REG);
289 if (ret < 0)
290 return ret;
291
292 *accumulated_current = current_raw * (DS2780_CHARGE_UNITS / sense_res);
293 return 0;
294}
295
296static int ds2780_get_capacity(struct ds2780_device_info *dev_info,
297 int *capacity)
298{
299 int ret;
300 u8 raw;
301
302 ret = ds2780_read8(dev_info->w1_dev, &raw, DS2780_RARC_REG);
303 if (ret < 0)
304 return ret;
305
306 *capacity = raw;
307 return raw;
308}
309
310static int ds2780_get_status(struct ds2780_device_info *dev_info, int *status)
311{
312 int ret, current_uA, capacity;
313
314 ret = ds2780_get_current(dev_info, CURRENT_NOW, &current_uA);
315 if (ret < 0)
316 return ret;
317
318 ret = ds2780_get_capacity(dev_info, &capacity);
319 if (ret < 0)
320 return ret;
321
322 if (capacity == 100)
323 *status = POWER_SUPPLY_STATUS_FULL;
324 else if (current_uA == 0)
325 *status = POWER_SUPPLY_STATUS_NOT_CHARGING;
326 else if (current_uA < 0)
327 *status = POWER_SUPPLY_STATUS_DISCHARGING;
328 else
329 *status = POWER_SUPPLY_STATUS_CHARGING;
330
331 return 0;
332}
333
334static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
335 int *charge_now)
336{
337 int ret;
338 u16 charge_raw;
339
340 /*
341 * The RAAC value is located in 16 bits across the RAAC MSB and
342 * LSB registers
343 * Bits 15 - 8 of the RAAC value are in bits 7 - 0 of the RAAC
344 * MSB register
345 * Bits 7 - 0 of the RAAC value are in bits 7 - 0 of the RAAC
346 * LSB register
347 */
348 ret = ds2780_read16(dev_info->w1_dev, &charge_raw, DS2780_RAAC_MSB_REG);
349 if (ret < 0)
350 return ret;
351
352 *charge_now = charge_raw * 1600;
353 return 0;
354}
355
356static int ds2780_get_control_register(struct ds2780_device_info *dev_info,
357 u8 *control_reg)
358{
359 return ds2780_read8(dev_info->w1_dev, control_reg, DS2780_CONTROL_REG);
360}
361
362static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
363 u8 control_reg)
364{
365 int ret;
366
367 ret = ds2780_write(dev_info->w1_dev, &control_reg,
368 DS2780_CONTROL_REG, sizeof(u8));
369 if (ret < 0)
370 return ret;
371
372 return ds2780_save_eeprom(dev_info, DS2780_CONTROL_REG);
373}
374
375static int ds2780_battery_get_property(struct power_supply *psy,
376 enum power_supply_property psp,
377 union power_supply_propval *val)
378{
379 int ret = 0;
380 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
381
382 switch (psp) {
383 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
384 ret = ds2780_get_voltage(dev_info, &val->intval);
385 break;
386
387 case POWER_SUPPLY_PROP_TEMP:
388 ret = ds2780_get_temperature(dev_info, &val->intval);
389 break;
390
391 case POWER_SUPPLY_PROP_MODEL_NAME:
392 val->strval = model;
393 break;
394
395 case POWER_SUPPLY_PROP_MANUFACTURER:
396 val->strval = manufacturer;
397 break;
398
399 case POWER_SUPPLY_PROP_CURRENT_NOW:
400 ret = ds2780_get_current(dev_info, CURRENT_NOW, &val->intval);
401 break;
402
403 case POWER_SUPPLY_PROP_CURRENT_AVG:
404 ret = ds2780_get_current(dev_info, CURRENT_AVG, &val->intval);
405 break;
406
407 case POWER_SUPPLY_PROP_STATUS:
408 ret = ds2780_get_status(dev_info, &val->intval);
409 break;
410
411 case POWER_SUPPLY_PROP_CAPACITY:
412 ret = ds2780_get_capacity(dev_info, &val->intval);
413 break;
414
415 case POWER_SUPPLY_PROP_CHARGE_COUNTER:
416 ret = ds2780_get_accumulated_current(dev_info, &val->intval);
417 break;
418
419 case POWER_SUPPLY_PROP_CHARGE_NOW:
420 ret = ds2780_get_charge_now(dev_info, &val->intval);
421 break;
422
423 default:
424 ret = -EINVAL;
425 }
426
427 return ret;
428}
429
430static enum power_supply_property ds2780_battery_props[] = {
431 POWER_SUPPLY_PROP_STATUS,
432 POWER_SUPPLY_PROP_VOLTAGE_NOW,
433 POWER_SUPPLY_PROP_TEMP,
434 POWER_SUPPLY_PROP_MODEL_NAME,
435 POWER_SUPPLY_PROP_MANUFACTURER,
436 POWER_SUPPLY_PROP_CURRENT_NOW,
437 POWER_SUPPLY_PROP_CURRENT_AVG,
438 POWER_SUPPLY_PROP_CAPACITY,
439 POWER_SUPPLY_PROP_CHARGE_COUNTER,
440 POWER_SUPPLY_PROP_CHARGE_NOW,
441};
442
443static ssize_t ds2780_get_pmod_enabled(struct device *dev,
444 struct device_attribute *attr,
445 char *buf)
446{
447 int ret;
448 u8 control_reg;
449 struct power_supply *psy = to_power_supply(dev);
450 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
451
452 /* Get power mode */
453 ret = ds2780_get_control_register(dev_info, &control_reg);
454 if (ret < 0)
455 return ret;
456
457 return sprintf(buf, "%d\n",
458 !!(control_reg & DS2780_CONTROL_REG_PMOD));
459}
460
461static ssize_t ds2780_set_pmod_enabled(struct device *dev,
462 struct device_attribute *attr,
463 const char *buf,
464 size_t count)
465{
466 int ret;
467 u8 control_reg, new_setting;
468 struct power_supply *psy = to_power_supply(dev);
469 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
470
471 /* Set power mode */
472 ret = ds2780_get_control_register(dev_info, &control_reg);
473 if (ret < 0)
474 return ret;
475
476 ret = kstrtou8(buf, 0, &new_setting);
477 if (ret < 0)
478 return ret;
479
480 if ((new_setting != 0) && (new_setting != 1)) {
481 dev_err(dev_info->dev, "Invalid pmod setting (0 or 1)\n");
482 return -EINVAL;
483 }
484
485 if (new_setting)
486 control_reg |= DS2780_CONTROL_REG_PMOD;
487 else
488 control_reg &= ~DS2780_CONTROL_REG_PMOD;
489
490 ret = ds2780_set_control_register(dev_info, control_reg);
491 if (ret < 0)
492 return ret;
493
494 return count;
495}
496
497static ssize_t ds2780_get_sense_resistor_value(struct device *dev,
498 struct device_attribute *attr,
499 char *buf)
500{
501 int ret;
502 u8 sense_resistor;
503 struct power_supply *psy = to_power_supply(dev);
504 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
505
506 ret = ds2780_read8(dev_info->w1_dev, &sense_resistor, DS2780_RSNSP_REG);
507 if (ret < 0)
508 return ret;
509
510 ret = sprintf(buf, "%d\n", sense_resistor);
511 return ret;
512}
513
514static ssize_t ds2780_set_sense_resistor_value(struct device *dev,
515 struct device_attribute *attr,
516 const char *buf,
517 size_t count)
518{
519 int ret;
520 u8 new_setting;
521 struct power_supply *psy = to_power_supply(dev);
522 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
523
524 ret = kstrtou8(buf, 0, &new_setting);
525 if (ret < 0)
526 return ret;
527
528 ret = ds2780_set_sense_register(dev_info, new_setting);
529 if (ret < 0)
530 return ret;
531
532 return count;
533}
534
535static ssize_t ds2780_get_rsgain_setting(struct device *dev,
536 struct device_attribute *attr,
537 char *buf)
538{
539 int ret;
540 u16 rsgain;
541 struct power_supply *psy = to_power_supply(dev);
542 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
543
544 ret = ds2780_get_rsgain_register(dev_info, &rsgain);
545 if (ret < 0)
546 return ret;
547
548 return sprintf(buf, "%d\n", rsgain);
549}
550
551static ssize_t ds2780_set_rsgain_setting(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf,
554 size_t count)
555{
556 int ret;
557 u16 new_setting;
558 struct power_supply *psy = to_power_supply(dev);
559 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
560
561 ret = kstrtou16(buf, 0, &new_setting);
562 if (ret < 0)
563 return ret;
564
565 /* Gain can only be from 0 to 1.999 in steps of .001 */
566 if (new_setting > 1999) {
567 dev_err(dev_info->dev, "Invalid rsgain setting (0 - 1999)\n");
568 return -EINVAL;
569 }
570
571 ret = ds2780_set_rsgain_register(dev_info, new_setting);
572 if (ret < 0)
573 return ret;
574
575 return count;
576}
577
578static ssize_t ds2780_get_pio_pin(struct device *dev,
579 struct device_attribute *attr,
580 char *buf)
581{
582 int ret;
583 u8 sfr;
584 struct power_supply *psy = to_power_supply(dev);
585 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
586
587 ret = ds2780_read8(dev_info->w1_dev, &sfr, DS2780_SFR_REG);
588 if (ret < 0)
589 return ret;
590
591 ret = sprintf(buf, "%d\n", sfr & DS2780_SFR_REG_PIOSC);
592 return ret;
593}
594
595static ssize_t ds2780_set_pio_pin(struct device *dev,
596 struct device_attribute *attr,
597 const char *buf,
598 size_t count)
599{
600 int ret;
601 u8 new_setting;
602 struct power_supply *psy = to_power_supply(dev);
603 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
604
605 ret = kstrtou8(buf, 0, &new_setting);
606 if (ret < 0)
607 return ret;
608
609 if ((new_setting != 0) && (new_setting != 1)) {
610 dev_err(dev_info->dev, "Invalid pio_pin setting (0 or 1)\n");
611 return -EINVAL;
612 }
613
614 ret = ds2780_write(dev_info->w1_dev, &new_setting,
615 DS2780_SFR_REG, sizeof(u8));
616 if (ret < 0)
617 return ret;
618
619 return count;
620}
621
622static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
623 struct kobject *kobj,
624 struct bin_attribute *bin_attr,
625 char *buf, loff_t off, size_t count)
626{
627 struct device *dev = container_of(kobj, struct device, kobj);
628 struct power_supply *psy = to_power_supply(dev);
629 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
630
631 count = min_t(loff_t, count,
632 DS2780_EEPROM_BLOCK1_END -
633 DS2780_EEPROM_BLOCK1_START + 1 - off);
634
635 return ds2780_read_block(dev_info->w1_dev, buf,
636 DS2780_EEPROM_BLOCK1_START + off, count);
637}
638
639static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
640 struct kobject *kobj,
641 struct bin_attribute *bin_attr,
642 char *buf, loff_t off, size_t count)
643{
644 struct device *dev = container_of(kobj, struct device, kobj);
645 struct power_supply *psy = to_power_supply(dev);
646 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
647 int ret;
648
649 count = min_t(loff_t, count,
650 DS2780_EEPROM_BLOCK1_END -
651 DS2780_EEPROM_BLOCK1_START + 1 - off);
652
653 ret = ds2780_write(dev_info->w1_dev, buf,
654 DS2780_EEPROM_BLOCK1_START + off, count);
655 if (ret < 0)
656 return ret;
657
658 ret = ds2780_save_eeprom(dev_info, DS2780_EEPROM_BLOCK1_START);
659 if (ret < 0)
660 return ret;
661
662 return count;
663}
664
665static struct bin_attribute ds2780_param_eeprom_bin_attr = {
666 .attr = {
667 .name = "param_eeprom",
668 .mode = S_IRUGO | S_IWUSR,
669 },
670 .size = DS2780_EEPROM_BLOCK1_END - DS2780_EEPROM_BLOCK1_START + 1,
671 .read = ds2780_read_param_eeprom_bin,
672 .write = ds2780_write_param_eeprom_bin,
673};
674
675static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
676 struct kobject *kobj,
677 struct bin_attribute *bin_attr,
678 char *buf, loff_t off, size_t count)
679{
680 struct device *dev = container_of(kobj, struct device, kobj);
681 struct power_supply *psy = to_power_supply(dev);
682 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
683
684 count = min_t(loff_t, count,
685 DS2780_EEPROM_BLOCK0_END -
686 DS2780_EEPROM_BLOCK0_START + 1 - off);
687
688 return ds2780_read_block(dev_info->w1_dev, buf,
689 DS2780_EEPROM_BLOCK0_START + off, count);
690
691}
692
693static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
694 struct kobject *kobj,
695 struct bin_attribute *bin_attr,
696 char *buf, loff_t off, size_t count)
697{
698 struct device *dev = container_of(kobj, struct device, kobj);
699 struct power_supply *psy = to_power_supply(dev);
700 struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
701 int ret;
702
703 count = min_t(loff_t, count,
704 DS2780_EEPROM_BLOCK0_END -
705 DS2780_EEPROM_BLOCK0_START + 1 - off);
706
707 ret = ds2780_write(dev_info->w1_dev, buf,
708 DS2780_EEPROM_BLOCK0_START + off, count);
709 if (ret < 0)
710 return ret;
711
712 ret = ds2780_save_eeprom(dev_info, DS2780_EEPROM_BLOCK0_START);
713 if (ret < 0)
714 return ret;
715
716 return count;
717}
718
719static struct bin_attribute ds2780_user_eeprom_bin_attr = {
720 .attr = {
721 .name = "user_eeprom",
722 .mode = S_IRUGO | S_IWUSR,
723 },
724 .size = DS2780_EEPROM_BLOCK0_END - DS2780_EEPROM_BLOCK0_START + 1,
725 .read = ds2780_read_user_eeprom_bin,
726 .write = ds2780_write_user_eeprom_bin,
727};
728
729static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2780_get_pmod_enabled,
730 ds2780_set_pmod_enabled);
731static DEVICE_ATTR(sense_resistor_value, S_IRUGO | S_IWUSR,
732 ds2780_get_sense_resistor_value, ds2780_set_sense_resistor_value);
733static DEVICE_ATTR(rsgain_setting, S_IRUGO | S_IWUSR, ds2780_get_rsgain_setting,
734 ds2780_set_rsgain_setting);
735static DEVICE_ATTR(pio_pin, S_IRUGO | S_IWUSR, ds2780_get_pio_pin,
736 ds2780_set_pio_pin);
737
738
739static struct attribute *ds2780_attributes[] = {
740 &dev_attr_pmod_enabled.attr,
741 &dev_attr_sense_resistor_value.attr,
742 &dev_attr_rsgain_setting.attr,
743 &dev_attr_pio_pin.attr,
744 NULL
745};
746
747static const struct attribute_group ds2780_attr_group = {
748 .attrs = ds2780_attributes,
749};
750
751static int __devinit ds2780_battery_probe(struct platform_device *pdev)
752{
753 int ret = 0;
754 struct ds2780_device_info *dev_info;
755
756 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
757 if (!dev_info) {
758 ret = -ENOMEM;
759 goto fail;
760 }
761
762 platform_set_drvdata(pdev, dev_info);
763
764 dev_info->dev = &pdev->dev;
765 dev_info->w1_dev = pdev->dev.parent;
766 dev_info->bat.name = dev_name(&pdev->dev);
767 dev_info->bat.type = POWER_SUPPLY_TYPE_BATTERY;
768 dev_info->bat.properties = ds2780_battery_props;
769 dev_info->bat.num_properties = ARRAY_SIZE(ds2780_battery_props);
770 dev_info->bat.get_property = ds2780_battery_get_property;
771
772 ret = power_supply_register(&pdev->dev, &dev_info->bat);
773 if (ret) {
774 dev_err(dev_info->dev, "failed to register battery\n");
775 goto fail_free_info;
776 }
777
778 ret = sysfs_create_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
779 if (ret) {
780 dev_err(dev_info->dev, "failed to create sysfs group\n");
781 goto fail_unregister;
782 }
783
784 ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj,
785 &ds2780_param_eeprom_bin_attr);
786 if (ret) {
787 dev_err(dev_info->dev,
788 "failed to create param eeprom bin file");
789 goto fail_remove_group;
790 }
791
792 ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj,
793 &ds2780_user_eeprom_bin_attr);
794 if (ret) {
795 dev_err(dev_info->dev,
796 "failed to create user eeprom bin file");
797 goto fail_remove_bin_file;
798 }
799
800 return 0;
801
802fail_remove_bin_file:
803 sysfs_remove_bin_file(&dev_info->bat.dev->kobj,
804 &ds2780_param_eeprom_bin_attr);
805fail_remove_group:
806 sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
807fail_unregister:
808 power_supply_unregister(&dev_info->bat);
809fail_free_info:
810 kfree(dev_info);
811fail:
812 return ret;
813}
814
815static int __devexit ds2780_battery_remove(struct platform_device *pdev)
816{
817 struct ds2780_device_info *dev_info = platform_get_drvdata(pdev);
818
819 /* remove attributes */
820 sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
821
822 power_supply_unregister(&dev_info->bat);
823
824 kfree(dev_info);
825 return 0;
826}
827
828MODULE_ALIAS("platform:ds2780-battery");
829
830static struct platform_driver ds2780_battery_driver = {
831 .driver = {
832 .name = "ds2780-battery",
833 },
834 .probe = ds2780_battery_probe,
835 .remove = ds2780_battery_remove,
836};
837
838static int __init ds2780_battery_init(void)
839{
840 return platform_driver_register(&ds2780_battery_driver);
841}
842
843static void __exit ds2780_battery_exit(void)
844{
845 platform_driver_unregister(&ds2780_battery_driver);
846}
847
848module_init(ds2780_battery_init);
849module_exit(ds2780_battery_exit);
850
851MODULE_LICENSE("GPL");
852MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
853MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver");
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 25b88ac1d44c..718f2c537827 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -161,12 +161,27 @@ static int __devexit gpio_charger_remove(struct platform_device *pdev)
161 return 0; 161 return 0;
162} 162}
163 163
164#ifdef CONFIG_PM_SLEEP
165static int gpio_charger_resume(struct device *dev)
166{
167 struct platform_device *pdev = to_platform_device(dev);
168 struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
169
170 power_supply_changed(&gpio_charger->charger);
171
172 return 0;
173}
174#endif
175
176static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops, NULL, gpio_charger_resume);
177
164static struct platform_driver gpio_charger_driver = { 178static struct platform_driver gpio_charger_driver = {
165 .probe = gpio_charger_probe, 179 .probe = gpio_charger_probe,
166 .remove = __devexit_p(gpio_charger_remove), 180 .remove = __devexit_p(gpio_charger_remove),
167 .driver = { 181 .driver = {
168 .name = "gpio-charger", 182 .name = "gpio-charger",
169 .owner = THIS_MODULE, 183 .owner = THIS_MODULE,
184 .pm = &gpio_charger_pm_ops,
170 }, 185 },
171}; 186};
172 187
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 2ad9b14a5ce3..f6d72b402a8e 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -33,6 +33,7 @@
33#include <linux/usb/ulpi.h> 33#include <linux/usb/ulpi.h>
34#include <linux/usb/ch9.h> 34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h> 35#include <linux/usb/gadget.h>
36#include <linux/power/isp1704_charger.h>
36 37
37/* Vendor specific Power Control register */ 38/* Vendor specific Power Control register */
38#define ISP1704_PWR_CTRL 0x3d 39#define ISP1704_PWR_CTRL 0x3d
@@ -71,6 +72,18 @@ struct isp1704_charger {
71}; 72};
72 73
73/* 74/*
75 * Disable/enable the power from the isp1704 if a function for it
76 * has been provided with platform data.
77 */
78static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on)
79{
80 struct isp1704_charger_data *board = isp->dev->platform_data;
81
82 if (board->set_power)
83 board->set_power(on);
84}
85
86/*
74 * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB 87 * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
75 * chargers). 88 * chargers).
76 * 89 *
@@ -222,6 +235,9 @@ static void isp1704_charger_work(struct work_struct *data)
222 235
223 mutex_lock(&lock); 236 mutex_lock(&lock);
224 237
238 if (event != USB_EVENT_NONE)
239 isp1704_charger_set_power(isp, 1);
240
225 switch (event) { 241 switch (event) {
226 case USB_EVENT_VBUS: 242 case USB_EVENT_VBUS:
227 isp->online = true; 243 isp->online = true;
@@ -269,6 +285,8 @@ static void isp1704_charger_work(struct work_struct *data)
269 */ 285 */
270 if (isp->otg->gadget) 286 if (isp->otg->gadget)
271 usb_gadget_disconnect(isp->otg->gadget); 287 usb_gadget_disconnect(isp->otg->gadget);
288
289 isp1704_charger_set_power(isp, 0);
272 break; 290 break;
273 case USB_EVENT_ENUMERATED: 291 case USB_EVENT_ENUMERATED:
274 if (isp->present) 292 if (isp->present)
@@ -394,6 +412,8 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
394 isp->dev = &pdev->dev; 412 isp->dev = &pdev->dev;
395 platform_set_drvdata(pdev, isp); 413 platform_set_drvdata(pdev, isp);
396 414
415 isp1704_charger_set_power(isp, 1);
416
397 ret = isp1704_test_ulpi(isp); 417 ret = isp1704_test_ulpi(isp);
398 if (ret < 0) 418 if (ret < 0)
399 goto fail1; 419 goto fail1;
@@ -434,6 +454,7 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
434 454
435 /* Detect charger if VBUS is valid (the cable was already plugged). */ 455 /* Detect charger if VBUS is valid (the cable was already plugged). */
436 ret = otg_io_read(isp->otg, ULPI_USB_INT_STS); 456 ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
457 isp1704_charger_set_power(isp, 0);
437 if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) { 458 if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
438 isp->event = USB_EVENT_VBUS; 459 isp->event = USB_EVENT_VBUS;
439 schedule_work(&isp->work); 460 schedule_work(&isp->work);
@@ -459,6 +480,7 @@ static int __devexit isp1704_charger_remove(struct platform_device *pdev)
459 otg_unregister_notifier(isp->otg, &isp->nb); 480 otg_unregister_notifier(isp->otg, &isp->nb);
460 power_supply_unregister(&isp->psy); 481 power_supply_unregister(&isp->psy);
461 otg_put_transceiver(isp->otg); 482 otg_put_transceiver(isp->otg);
483 isp1704_charger_set_power(isp, 0);
462 kfree(isp); 484 kfree(isp);
463 485
464 return 0; 486 return 0;
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
new file mode 100644
index 000000000000..33ff0e37809e
--- /dev/null
+++ b/drivers/power/max8903_charger.c
@@ -0,0 +1,391 @@
1/*
2 * max8903_charger.c - Maxim 8903 USB/Adapter Charger Driver
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/gpio.h>
24#include <linux/interrupt.h>
25#include <linux/slab.h>
26#include <linux/power_supply.h>
27#include <linux/platform_device.h>
28#include <linux/power/max8903_charger.h>
29
30struct max8903_data {
31 struct max8903_pdata *pdata;
32 struct device *dev;
33 struct power_supply psy;
34 bool fault;
35 bool usb_in;
36 bool ta_in;
37};
38
39static enum power_supply_property max8903_charger_props[] = {
40 POWER_SUPPLY_PROP_STATUS, /* Charger status output */
41 POWER_SUPPLY_PROP_ONLINE, /* External power source */
42 POWER_SUPPLY_PROP_HEALTH, /* Fault or OK */
43};
44
45static int max8903_get_property(struct power_supply *psy,
46 enum power_supply_property psp,
47 union power_supply_propval *val)
48{
49 struct max8903_data *data = container_of(psy,
50 struct max8903_data, psy);
51
52 switch (psp) {
53 case POWER_SUPPLY_PROP_STATUS:
54 val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
55 if (data->pdata->chg) {
56 if (gpio_get_value(data->pdata->chg) == 0)
57 val->intval = POWER_SUPPLY_STATUS_CHARGING;
58 else if (data->usb_in || data->ta_in)
59 val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
60 else
61 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
62 }
63 break;
64 case POWER_SUPPLY_PROP_ONLINE:
65 val->intval = 0;
66 if (data->usb_in || data->ta_in)
67 val->intval = 1;
68 break;
69 case POWER_SUPPLY_PROP_HEALTH:
70 val->intval = POWER_SUPPLY_HEALTH_GOOD;
71 if (data->fault)
72 val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
73 break;
74 default:
75 return -EINVAL;
76 }
77 return 0;
78}
79
80static irqreturn_t max8903_dcin(int irq, void *_data)
81{
82 struct max8903_data *data = _data;
83 struct max8903_pdata *pdata = data->pdata;
84 bool ta_in;
85 enum power_supply_type old_type;
86
87 ta_in = gpio_get_value(pdata->dok) ? false : true;
88
89 if (ta_in == data->ta_in)
90 return IRQ_HANDLED;
91
92 data->ta_in = ta_in;
93
94 /* Set Current-Limit-Mode 1:DC 0:USB */
95 if (pdata->dcm)
96 gpio_set_value(pdata->dcm, ta_in ? 1 : 0);
97
98 /* Charger Enable / Disable (cen is negated) */
99 if (pdata->cen)
100 gpio_set_value(pdata->cen, ta_in ? 0 :
101 (data->usb_in ? 0 : 1));
102
103 dev_dbg(data->dev, "TA(DC-IN) Charger %s.\n", ta_in ?
104 "Connected" : "Disconnected");
105
106 old_type = data->psy.type;
107
108 if (data->ta_in)
109 data->psy.type = POWER_SUPPLY_TYPE_MAINS;
110 else if (data->usb_in)
111 data->psy.type = POWER_SUPPLY_TYPE_USB;
112 else
113 data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
114
115 if (old_type != data->psy.type)
116 power_supply_changed(&data->psy);
117
118 return IRQ_HANDLED;
119}
120
121static irqreturn_t max8903_usbin(int irq, void *_data)
122{
123 struct max8903_data *data = _data;
124 struct max8903_pdata *pdata = data->pdata;
125 bool usb_in;
126 enum power_supply_type old_type;
127
128 usb_in = gpio_get_value(pdata->uok) ? false : true;
129
130 if (usb_in == data->usb_in)
131 return IRQ_HANDLED;
132
133 data->usb_in = usb_in;
134
135 /* Do not touch Current-Limit-Mode */
136
137 /* Charger Enable / Disable (cen is negated) */
138 if (pdata->cen)
139 gpio_set_value(pdata->cen, usb_in ? 0 :
140 (data->ta_in ? 0 : 1));
141
142 dev_dbg(data->dev, "USB Charger %s.\n", usb_in ?
143 "Connected" : "Disconnected");
144
145 old_type = data->psy.type;
146
147 if (data->ta_in)
148 data->psy.type = POWER_SUPPLY_TYPE_MAINS;
149 else if (data->usb_in)
150 data->psy.type = POWER_SUPPLY_TYPE_USB;
151 else
152 data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
153
154 if (old_type != data->psy.type)
155 power_supply_changed(&data->psy);
156
157 return IRQ_HANDLED;
158}
159
160static irqreturn_t max8903_fault(int irq, void *_data)
161{
162 struct max8903_data *data = _data;
163 struct max8903_pdata *pdata = data->pdata;
164 bool fault;
165
166 fault = gpio_get_value(pdata->flt) ? false : true;
167
168 if (fault == data->fault)
169 return IRQ_HANDLED;
170
171 data->fault = fault;
172
173 if (fault)
174 dev_err(data->dev, "Charger suffers a fault and stops.\n");
175 else
176 dev_err(data->dev, "Charger recovered from a fault.\n");
177
178 return IRQ_HANDLED;
179}
180
181static __devinit int max8903_probe(struct platform_device *pdev)
182{
183 struct max8903_data *data;
184 struct device *dev = &pdev->dev;
185 struct max8903_pdata *pdata = pdev->dev.platform_data;
186 int ret = 0;
187 int gpio;
188 int ta_in = 0;
189 int usb_in = 0;
190
191 data = kzalloc(sizeof(struct max8903_data), GFP_KERNEL);
192 if (data == NULL) {
193 dev_err(dev, "Cannot allocate memory.\n");
194 return -ENOMEM;
195 }
196 data->pdata = pdata;
197 data->dev = dev;
198 platform_set_drvdata(pdev, data);
199
200 if (pdata->dc_valid == false && pdata->usb_valid == false) {
201 dev_err(dev, "No valid power sources.\n");
202 ret = -EINVAL;
203 goto err;
204 }
205
206 if (pdata->dc_valid) {
207 if (pdata->dok && gpio_is_valid(pdata->dok) &&
208 pdata->dcm && gpio_is_valid(pdata->dcm)) {
209 gpio = pdata->dok; /* PULL_UPed Interrupt */
210 ta_in = gpio_get_value(gpio) ? 0 : 1;
211
212 gpio = pdata->dcm; /* Output */
213 gpio_set_value(gpio, ta_in);
214 } else {
215 dev_err(dev, "When DC is wired, DOK and DCM should"
216 " be wired as well.\n");
217 ret = -EINVAL;
218 goto err;
219 }
220 } else {
221 if (pdata->dcm) {
222 if (gpio_is_valid(pdata->dcm))
223 gpio_set_value(pdata->dcm, 0);
224 else {
225 dev_err(dev, "Invalid pin: dcm.\n");
226 ret = -EINVAL;
227 goto err;
228 }
229 }
230 }
231
232 if (pdata->usb_valid) {
233 if (pdata->uok && gpio_is_valid(pdata->uok)) {
234 gpio = pdata->uok;
235 usb_in = gpio_get_value(gpio) ? 0 : 1;
236 } else {
237 dev_err(dev, "When USB is wired, UOK should be wired."
238 "as well.\n");
239 ret = -EINVAL;
240 goto err;
241 }
242 }
243
244 if (pdata->cen) {
245 if (gpio_is_valid(pdata->cen)) {
246 gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
247 } else {
248 dev_err(dev, "Invalid pin: cen.\n");
249 ret = -EINVAL;
250 goto err;
251 }
252 }
253
254 if (pdata->chg) {
255 if (!gpio_is_valid(pdata->chg)) {
256 dev_err(dev, "Invalid pin: chg.\n");
257 ret = -EINVAL;
258 goto err;
259 }
260 }
261
262 if (pdata->flt) {
263 if (!gpio_is_valid(pdata->flt)) {
264 dev_err(dev, "Invalid pin: flt.\n");
265 ret = -EINVAL;
266 goto err;
267 }
268 }
269
270 if (pdata->usus) {
271 if (!gpio_is_valid(pdata->usus)) {
272 dev_err(dev, "Invalid pin: usus.\n");
273 ret = -EINVAL;
274 goto err;
275 }
276 }
277
278 data->fault = false;
279 data->ta_in = ta_in;
280 data->usb_in = usb_in;
281
282 data->psy.name = "max8903_charger";
283 data->psy.type = (ta_in) ? POWER_SUPPLY_TYPE_MAINS :
284 ((usb_in) ? POWER_SUPPLY_TYPE_USB :
285 POWER_SUPPLY_TYPE_BATTERY);
286 data->psy.get_property = max8903_get_property;
287 data->psy.properties = max8903_charger_props;
288 data->psy.num_properties = ARRAY_SIZE(max8903_charger_props);
289
290 ret = power_supply_register(dev, &data->psy);
291 if (ret) {
292 dev_err(dev, "failed: power supply register.\n");
293 goto err;
294 }
295
296 if (pdata->dc_valid) {
297 ret = request_threaded_irq(gpio_to_irq(pdata->dok),
298 NULL, max8903_dcin,
299 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
300 "MAX8903 DC IN", data);
301 if (ret) {
302 dev_err(dev, "Cannot request irq %d for DC (%d)\n",
303 gpio_to_irq(pdata->dok), ret);
304 goto err_psy;
305 }
306 }
307
308 if (pdata->usb_valid) {
309 ret = request_threaded_irq(gpio_to_irq(pdata->uok),
310 NULL, max8903_usbin,
311 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
312 "MAX8903 USB IN", data);
313 if (ret) {
314 dev_err(dev, "Cannot request irq %d for USB (%d)\n",
315 gpio_to_irq(pdata->uok), ret);
316 goto err_dc_irq;
317 }
318 }
319
320 if (pdata->flt) {
321 ret = request_threaded_irq(gpio_to_irq(pdata->flt),
322 NULL, max8903_fault,
323 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
324 "MAX8903 Fault", data);
325 if (ret) {
326 dev_err(dev, "Cannot request irq %d for Fault (%d)\n",
327 gpio_to_irq(pdata->flt), ret);
328 goto err_usb_irq;
329 }
330 }
331
332 return 0;
333
334err_usb_irq:
335 if (pdata->usb_valid)
336 free_irq(gpio_to_irq(pdata->uok), data);
337err_dc_irq:
338 if (pdata->dc_valid)
339 free_irq(gpio_to_irq(pdata->dok), data);
340err_psy:
341 power_supply_unregister(&data->psy);
342err:
343 kfree(data);
344 return ret;
345}
346
347static __devexit int max8903_remove(struct platform_device *pdev)
348{
349 struct max8903_data *data = platform_get_drvdata(pdev);
350
351 if (data) {
352 struct max8903_pdata *pdata = data->pdata;
353
354 if (pdata->flt)
355 free_irq(gpio_to_irq(pdata->flt), data);
356 if (pdata->usb_valid)
357 free_irq(gpio_to_irq(pdata->uok), data);
358 if (pdata->dc_valid)
359 free_irq(gpio_to_irq(pdata->dok), data);
360 power_supply_unregister(&data->psy);
361 kfree(data);
362 }
363
364 return 0;
365}
366
367static struct platform_driver max8903_driver = {
368 .probe = max8903_probe,
369 .remove = __devexit_p(max8903_remove),
370 .driver = {
371 .name = "max8903-charger",
372 .owner = THIS_MODULE,
373 },
374};
375
376static int __init max8903_init(void)
377{
378 return platform_driver_register(&max8903_driver);
379}
380module_init(max8903_init);
381
382static void __exit max8903_exit(void)
383{
384 platform_driver_unregister(&max8903_driver);
385}
386module_exit(max8903_exit);
387
388MODULE_LICENSE("GPL");
389MODULE_DESCRIPTION("MAX8903 Charger Driver");
390MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
391MODULE_ALIAS("max8903-charger");
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 8e5aec260866..a70e16d3a3dc 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -425,16 +425,11 @@ static __devexit int max8925_deinit_charger(struct max8925_power_info *info)
425static __devinit int max8925_power_probe(struct platform_device *pdev) 425static __devinit int max8925_power_probe(struct platform_device *pdev)
426{ 426{
427 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); 427 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
428 struct max8925_platform_data *max8925_pdata;
429 struct max8925_power_pdata *pdata = NULL; 428 struct max8925_power_pdata *pdata = NULL;
430 struct max8925_power_info *info; 429 struct max8925_power_info *info;
431 int ret; 430 int ret;
432 431
433 if (pdev->dev.parent->platform_data) { 432 pdata = pdev->dev.platform_data;
434 max8925_pdata = pdev->dev.parent->platform_data;
435 pdata = max8925_pdata->power;
436 }
437
438 if (!pdata) { 433 if (!pdata) {
439 dev_err(&pdev->dev, "platform data isn't assigned to " 434 dev_err(&pdev->dev, "platform data isn't assigned to "
440 "power supply\n"); 435 "power supply\n");
@@ -447,6 +442,7 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
447 info->chip = chip; 442 info->chip = chip;
448 info->gpm = chip->i2c; 443 info->gpm = chip->i2c;
449 info->adc = chip->adc; 444 info->adc = chip->adc;
445 platform_set_drvdata(pdev, info);
450 446
451 info->ac.name = "max8925-ac"; 447 info->ac.name = "max8925-ac";
452 info->ac.type = POWER_SUPPLY_TYPE_MAINS; 448 info->ac.type = POWER_SUPPLY_TYPE_MAINS;
@@ -482,8 +478,6 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
482 info->topoff_threshold = pdata->topoff_threshold; 478 info->topoff_threshold = pdata->topoff_threshold;
483 info->fast_charge = pdata->fast_charge; 479 info->fast_charge = pdata->fast_charge;
484 info->set_charger = pdata->set_charger; 480 info->set_charger = pdata->set_charger;
485 dev_set_drvdata(&pdev->dev, info);
486 platform_set_drvdata(pdev, info);
487 481
488 max8925_init_charger(chip, info); 482 max8925_init_charger(chip, info);
489 return 0; 483 return 0;
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
index 0cd9f67d33e5..b527c93bf2f3 100644
--- a/drivers/power/test_power.c
+++ b/drivers/power/test_power.c
@@ -3,6 +3,12 @@
3 * 3 *
4 * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com> 4 * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com>
5 * 5 *
6 * Dynamic module parameter code from the Virtual Battery Driver
7 * Copyright (C) 2008 Pylone, Inc.
8 * By: Masashi YOKOTA <yokota@pylone.jp>
9 * Originally found here:
10 * http://downloads.pylone.jp/src/virtual_battery/virtual_battery-0.0.1.tar.bz2
11 *
6 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
@@ -15,8 +21,12 @@
15#include <linux/delay.h> 21#include <linux/delay.h>
16#include <linux/vermagic.h> 22#include <linux/vermagic.h>
17 23
18static int test_power_ac_online = 1; 24static int ac_online = 1;
19static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING; 25static int battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
26static int battery_health = POWER_SUPPLY_HEALTH_GOOD;
27static int battery_present = 1; /* true */
28static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
29static int battery_capacity = 50;
20 30
21static int test_power_get_ac_property(struct power_supply *psy, 31static int test_power_get_ac_property(struct power_supply *psy,
22 enum power_supply_property psp, 32 enum power_supply_property psp,
@@ -24,7 +34,7 @@ static int test_power_get_ac_property(struct power_supply *psy,
24{ 34{
25 switch (psp) { 35 switch (psp) {
26 case POWER_SUPPLY_PROP_ONLINE: 36 case POWER_SUPPLY_PROP_ONLINE:
27 val->intval = test_power_ac_online; 37 val->intval = ac_online;
28 break; 38 break;
29 default: 39 default:
30 return -EINVAL; 40 return -EINVAL;
@@ -47,22 +57,30 @@ static int test_power_get_battery_property(struct power_supply *psy,
47 val->strval = UTS_RELEASE; 57 val->strval = UTS_RELEASE;
48 break; 58 break;
49 case POWER_SUPPLY_PROP_STATUS: 59 case POWER_SUPPLY_PROP_STATUS:
50 val->intval = test_power_battery_status; 60 val->intval = battery_status;
51 break; 61 break;
52 case POWER_SUPPLY_PROP_CHARGE_TYPE: 62 case POWER_SUPPLY_PROP_CHARGE_TYPE:
53 val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; 63 val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
54 break; 64 break;
55 case POWER_SUPPLY_PROP_HEALTH: 65 case POWER_SUPPLY_PROP_HEALTH:
56 val->intval = POWER_SUPPLY_HEALTH_GOOD; 66 val->intval = battery_health;
67 break;
68 case POWER_SUPPLY_PROP_PRESENT:
69 val->intval = battery_present;
57 break; 70 break;
58 case POWER_SUPPLY_PROP_TECHNOLOGY: 71 case POWER_SUPPLY_PROP_TECHNOLOGY:
59 val->intval = POWER_SUPPLY_TECHNOLOGY_LION; 72 val->intval = battery_technology;
60 break; 73 break;
61 case POWER_SUPPLY_PROP_CAPACITY_LEVEL: 74 case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
62 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; 75 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
63 break; 76 break;
64 case POWER_SUPPLY_PROP_CAPACITY: 77 case POWER_SUPPLY_PROP_CAPACITY:
65 val->intval = 50; 78 case POWER_SUPPLY_PROP_CHARGE_NOW:
79 val->intval = battery_capacity;
80 break;
81 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
82 case POWER_SUPPLY_PROP_CHARGE_FULL:
83 val->intval = 100;
66 break; 84 break;
67 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: 85 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
68 case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: 86 case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
@@ -84,9 +102,11 @@ static enum power_supply_property test_power_battery_props[] = {
84 POWER_SUPPLY_PROP_STATUS, 102 POWER_SUPPLY_PROP_STATUS,
85 POWER_SUPPLY_PROP_CHARGE_TYPE, 103 POWER_SUPPLY_PROP_CHARGE_TYPE,
86 POWER_SUPPLY_PROP_HEALTH, 104 POWER_SUPPLY_PROP_HEALTH,
105 POWER_SUPPLY_PROP_PRESENT,
87 POWER_SUPPLY_PROP_TECHNOLOGY, 106 POWER_SUPPLY_PROP_TECHNOLOGY,
107 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
88 POWER_SUPPLY_PROP_CHARGE_FULL, 108 POWER_SUPPLY_PROP_CHARGE_FULL,
89 POWER_SUPPLY_PROP_CHARGE_EMPTY, 109 POWER_SUPPLY_PROP_CHARGE_NOW,
90 POWER_SUPPLY_PROP_CAPACITY, 110 POWER_SUPPLY_PROP_CAPACITY,
91 POWER_SUPPLY_PROP_CAPACITY_LEVEL, 111 POWER_SUPPLY_PROP_CAPACITY_LEVEL,
92 POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 112 POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -118,6 +138,7 @@ static struct power_supply test_power_supplies[] = {
118 }, 138 },
119}; 139};
120 140
141
121static int __init test_power_init(void) 142static int __init test_power_init(void)
122{ 143{
123 int i; 144 int i;
@@ -145,8 +166,8 @@ static void __exit test_power_exit(void)
145 int i; 166 int i;
146 167
147 /* Let's see how we handle changes... */ 168 /* Let's see how we handle changes... */
148 test_power_ac_online = 0; 169 ac_online = 0;
149 test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING; 170 battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
150 for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) 171 for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
151 power_supply_changed(&test_power_supplies[i]); 172 power_supply_changed(&test_power_supplies[i]);
152 pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n", 173 pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n",
@@ -158,6 +179,241 @@ static void __exit test_power_exit(void)
158} 179}
159module_exit(test_power_exit); 180module_exit(test_power_exit);
160 181
182
183
184#define MAX_KEYLENGTH 256
185struct battery_property_map {
186 int value;
187 char const *key;
188};
189
190static struct battery_property_map map_ac_online[] = {
191 { 0, "on" },
192 { 1, "off" },
193 { -1, NULL },
194};
195
196static struct battery_property_map map_status[] = {
197 { POWER_SUPPLY_STATUS_CHARGING, "charging" },
198 { POWER_SUPPLY_STATUS_DISCHARGING, "discharging" },
199 { POWER_SUPPLY_STATUS_NOT_CHARGING, "not-charging" },
200 { POWER_SUPPLY_STATUS_FULL, "full" },
201 { -1, NULL },
202};
203
204static struct battery_property_map map_health[] = {
205 { POWER_SUPPLY_HEALTH_GOOD, "good" },
206 { POWER_SUPPLY_HEALTH_OVERHEAT, "overheat" },
207 { POWER_SUPPLY_HEALTH_DEAD, "dead" },
208 { POWER_SUPPLY_HEALTH_OVERVOLTAGE, "overvoltage" },
209 { POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, "failure" },
210 { -1, NULL },
211};
212
213static struct battery_property_map map_present[] = {
214 { 0, "false" },
215 { 1, "true" },
216 { -1, NULL },
217};
218
219static struct battery_property_map map_technology[] = {
220 { POWER_SUPPLY_TECHNOLOGY_NiMH, "NiMH" },
221 { POWER_SUPPLY_TECHNOLOGY_LION, "LION" },
222 { POWER_SUPPLY_TECHNOLOGY_LIPO, "LIPO" },
223 { POWER_SUPPLY_TECHNOLOGY_LiFe, "LiFe" },
224 { POWER_SUPPLY_TECHNOLOGY_NiCd, "NiCd" },
225 { POWER_SUPPLY_TECHNOLOGY_LiMn, "LiMn" },
226 { -1, NULL },
227};
228
229
230static int map_get_value(struct battery_property_map *map, const char *key,
231 int def_val)
232{
233 char buf[MAX_KEYLENGTH];
234 int cr;
235
236 strncpy(buf, key, MAX_KEYLENGTH);
237 buf[MAX_KEYLENGTH-1] = '\0';
238
239 cr = strnlen(buf, MAX_KEYLENGTH) - 1;
240 if (buf[cr] == '\n')
241 buf[cr] = '\0';
242
243 while (map->key) {
244 if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0)
245 return map->value;
246 map++;
247 }
248
249 return def_val;
250}
251
252
253static const char *map_get_key(struct battery_property_map *map, int value,
254 const char *def_key)
255{
256 while (map->key) {
257 if (map->value == value)
258 return map->key;
259 map++;
260 }
261
262 return def_key;
263}
264
265static int param_set_ac_online(const char *key, const struct kernel_param *kp)
266{
267 ac_online = map_get_value(map_ac_online, key, ac_online);
268 power_supply_changed(&test_power_supplies[0]);
269 return 0;
270}
271
272static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
273{
274 strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
275 return strlen(buffer);
276}
277
278static int param_set_battery_status(const char *key,
279 const struct kernel_param *kp)
280{
281 battery_status = map_get_value(map_status, key, battery_status);
282 power_supply_changed(&test_power_supplies[1]);
283 return 0;
284}
285
286static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
287{
288 strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
289 return strlen(buffer);
290}
291
292static int param_set_battery_health(const char *key,
293 const struct kernel_param *kp)
294{
295 battery_health = map_get_value(map_health, key, battery_health);
296 power_supply_changed(&test_power_supplies[1]);
297 return 0;
298}
299
300static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
301{
302 strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
303 return strlen(buffer);
304}
305
306static int param_set_battery_present(const char *key,
307 const struct kernel_param *kp)
308{
309 battery_present = map_get_value(map_present, key, battery_present);
310 power_supply_changed(&test_power_supplies[0]);
311 return 0;
312}
313
314static int param_get_battery_present(char *buffer,
315 const struct kernel_param *kp)
316{
317 strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
318 return strlen(buffer);
319}
320
321static int param_set_battery_technology(const char *key,
322 const struct kernel_param *kp)
323{
324 battery_technology = map_get_value(map_technology, key,
325 battery_technology);
326 power_supply_changed(&test_power_supplies[1]);
327 return 0;
328}
329
330static int param_get_battery_technology(char *buffer,
331 const struct kernel_param *kp)
332{
333 strcpy(buffer,
334 map_get_key(map_technology, battery_technology, "unknown"));
335 return strlen(buffer);
336}
337
338static int param_set_battery_capacity(const char *key,
339 const struct kernel_param *kp)
340{
341 int tmp;
342
343 if (1 != sscanf(key, "%d", &tmp))
344 return -EINVAL;
345
346 battery_capacity = tmp;
347 power_supply_changed(&test_power_supplies[1]);
348 return 0;
349}
350
351#define param_get_battery_capacity param_get_int
352
353
354
355static struct kernel_param_ops param_ops_ac_online = {
356 .set = param_set_ac_online,
357 .get = param_get_ac_online,
358};
359
360static struct kernel_param_ops param_ops_battery_status = {
361 .set = param_set_battery_status,
362 .get = param_get_battery_status,
363};
364
365static struct kernel_param_ops param_ops_battery_present = {
366 .set = param_set_battery_present,
367 .get = param_get_battery_present,
368};
369
370static struct kernel_param_ops param_ops_battery_technology = {
371 .set = param_set_battery_technology,
372 .get = param_get_battery_technology,
373};
374
375static struct kernel_param_ops param_ops_battery_health = {
376 .set = param_set_battery_health,
377 .get = param_get_battery_health,
378};
379
380static struct kernel_param_ops param_ops_battery_capacity = {
381 .set = param_set_battery_capacity,
382 .get = param_get_battery_capacity,
383};
384
385
386#define param_check_ac_online(name, p) __param_check(name, p, void);
387#define param_check_battery_status(name, p) __param_check(name, p, void);
388#define param_check_battery_present(name, p) __param_check(name, p, void);
389#define param_check_battery_technology(name, p) __param_check(name, p, void);
390#define param_check_battery_health(name, p) __param_check(name, p, void);
391#define param_check_battery_capacity(name, p) __param_check(name, p, void);
392
393
394module_param(ac_online, ac_online, 0644);
395MODULE_PARM_DESC(ac_online, "AC charging state <on|off>");
396
397module_param(battery_status, battery_status, 0644);
398MODULE_PARM_DESC(battery_status,
399 "battery status <charging|discharging|not-charging|full>");
400
401module_param(battery_present, battery_present, 0644);
402MODULE_PARM_DESC(battery_present,
403 "battery presence state <good|overheat|dead|overvoltage|failure>");
404
405module_param(battery_technology, battery_technology, 0644);
406MODULE_PARM_DESC(battery_technology,
407 "battery technology <NiMH|LION|LIPO|LiFe|NiCd|LiMn>");
408
409module_param(battery_health, battery_health, 0644);
410MODULE_PARM_DESC(battery_health,
411 "battery health state <good|overheat|dead|overvoltage|failure>");
412
413module_param(battery_capacity, battery_capacity, 0644);
414MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
415
416
161MODULE_DESCRIPTION("Power supply driver for testing"); 417MODULE_DESCRIPTION("Power supply driver for testing");
162MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); 418MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
163MODULE_LICENSE("GPL"); 419MODULE_LICENSE("GPL");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index e5ced3a4c1ed..d119c38b3ff6 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -271,24 +271,33 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
271} 271}
272 272
273#ifdef CONFIG_PM 273#ifdef CONFIG_PM
274static int z2_batt_suspend(struct i2c_client *client, pm_message_t state) 274static int z2_batt_suspend(struct device *dev)
275{ 275{
276 struct i2c_client *client = to_i2c_client(dev);
276 struct z2_charger *charger = i2c_get_clientdata(client); 277 struct z2_charger *charger = i2c_get_clientdata(client);
277 278
278 flush_work_sync(&charger->bat_work); 279 flush_work_sync(&charger->bat_work);
279 return 0; 280 return 0;
280} 281}
281 282
282static int z2_batt_resume(struct i2c_client *client) 283static int z2_batt_resume(struct device *dev)
283{ 284{
285 struct i2c_client *client = to_i2c_client(dev);
284 struct z2_charger *charger = i2c_get_clientdata(client); 286 struct z2_charger *charger = i2c_get_clientdata(client);
285 287
286 schedule_work(&charger->bat_work); 288 schedule_work(&charger->bat_work);
287 return 0; 289 return 0;
288} 290}
291
292static const struct dev_pm_ops z2_battery_pm_ops = {
293 .suspend = z2_batt_suspend,
294 .resume = z2_batt_resume,
295};
296
297#define Z2_BATTERY_PM_OPS (&z2_battery_pm_ops)
298
289#else 299#else
290#define z2_batt_suspend NULL 300#define Z2_BATTERY_PM_OPS (NULL)
291#define z2_batt_resume NULL
292#endif 301#endif
293 302
294static const struct i2c_device_id z2_batt_id[] = { 303static const struct i2c_device_id z2_batt_id[] = {
@@ -301,11 +310,10 @@ static struct i2c_driver z2_batt_driver = {
301 .driver = { 310 .driver = {
302 .name = "z2-battery", 311 .name = "z2-battery",
303 .owner = THIS_MODULE, 312 .owner = THIS_MODULE,
313 .pm = Z2_BATTERY_PM_OPS
304 }, 314 },
305 .probe = z2_batt_probe, 315 .probe = z2_batt_probe,
306 .remove = z2_batt_remove, 316 .remove = z2_batt_remove,
307 .suspend = z2_batt_suspend,
308 .resume = z2_batt_resume,
309 .id_table = z2_batt_id, 317 .id_table = z2_batt_id,
310}; 318};
311 319
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
new file mode 100644
index 000000000000..68d720102296
--- /dev/null
+++ b/drivers/ptp/Kconfig
@@ -0,0 +1,75 @@
1#
2# PTP clock support configuration
3#
4
5menu "PTP clock support"
6
7comment "Enable Device Drivers -> PPS to see the PTP clock options."
8 depends on PPS=n
9
10config PTP_1588_CLOCK
11 tristate "PTP clock support"
12 depends on EXPERIMENTAL
13 depends on PPS
14 help
15 The IEEE 1588 standard defines a method to precisely
16 synchronize distributed clocks over Ethernet networks. The
17 standard defines a Precision Time Protocol (PTP), which can
18 be used to achieve synchronization within a few dozen
19 microseconds. In addition, with the help of special hardware
20 time stamping units, it can be possible to achieve
21 synchronization to within a few hundred nanoseconds.
22
23 This driver adds support for PTP clocks as character
24 devices. If you want to use a PTP clock, then you should
25 also enable at least one clock driver as well.
26
27 To compile this driver as a module, choose M here: the module
28 will be called ptp.
29
30config PTP_1588_CLOCK_GIANFAR
31 tristate "Freescale eTSEC as PTP clock"
32 depends on PTP_1588_CLOCK
33 depends on GIANFAR
34 help
35 This driver adds support for using the eTSEC as a PTP
36 clock. This clock is only useful if your PTP programs are
37 getting hardware time stamps on the PTP Ethernet packets
38 using the SO_TIMESTAMPING API.
39
40 To compile this driver as a module, choose M here: the module
41 will be called gianfar_ptp.
42
43config PTP_1588_CLOCK_IXP46X
44 tristate "Intel IXP46x as PTP clock"
45 depends on PTP_1588_CLOCK
46 depends on IXP4XX_ETH
47 help
48 This driver adds support for using the IXP46X as a PTP
49 clock. This clock is only useful if your PTP programs are
50 getting hardware time stamps on the PTP Ethernet packets
51 using the SO_TIMESTAMPING API.
52
53 To compile this driver as a module, choose M here: the module
54 will be called ptp_ixp46x.
55
56comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
57 depends on PTP_1588_CLOCK && (PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n)
58
59config DP83640_PHY
60 tristate "Driver for the National Semiconductor DP83640 PHYTER"
61 depends on PTP_1588_CLOCK
62 depends on NETWORK_PHY_TIMESTAMPING
63 depends on PHYLIB
64 ---help---
65 Supports the DP83640 PHYTER with IEEE 1588 features.
66
67 This driver adds support for using the DP83640 as a PTP
68 clock. This clock is only useful if your PTP programs are
69 getting hardware time stamps on the PTP Ethernet packets
70 using the SO_TIMESTAMPING API.
71
72 In order for this to work, your MAC driver must also
73 implement the skb_tx_timetamp() function.
74
75endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
new file mode 100644
index 000000000000..f6933e83de72
--- /dev/null
+++ b/drivers/ptp/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for PTP 1588 clock support.
3#
4
5ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
6obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
7obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
new file mode 100644
index 000000000000..a8d03aeb4051
--- /dev/null
+++ b/drivers/ptp/ptp_chardev.c
@@ -0,0 +1,159 @@
1/*
2 * PTP 1588 clock support - character device implementation.
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/module.h>
21#include <linux/posix-clock.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24
25#include "ptp_private.h"
26
27int ptp_open(struct posix_clock *pc, fmode_t fmode)
28{
29 return 0;
30}
31
32long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
33{
34 struct ptp_clock_caps caps;
35 struct ptp_clock_request req;
36 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
37 struct ptp_clock_info *ops = ptp->info;
38 int enable, err = 0;
39
40 switch (cmd) {
41
42 case PTP_CLOCK_GETCAPS:
43 memset(&caps, 0, sizeof(caps));
44 caps.max_adj = ptp->info->max_adj;
45 caps.n_alarm = ptp->info->n_alarm;
46 caps.n_ext_ts = ptp->info->n_ext_ts;
47 caps.n_per_out = ptp->info->n_per_out;
48 caps.pps = ptp->info->pps;
49 err = copy_to_user((void __user *)arg, &caps, sizeof(caps));
50 break;
51
52 case PTP_EXTTS_REQUEST:
53 if (copy_from_user(&req.extts, (void __user *)arg,
54 sizeof(req.extts))) {
55 err = -EFAULT;
56 break;
57 }
58 if (req.extts.index >= ops->n_ext_ts) {
59 err = -EINVAL;
60 break;
61 }
62 req.type = PTP_CLK_REQ_EXTTS;
63 enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
64 err = ops->enable(ops, &req, enable);
65 break;
66
67 case PTP_PEROUT_REQUEST:
68 if (copy_from_user(&req.perout, (void __user *)arg,
69 sizeof(req.perout))) {
70 err = -EFAULT;
71 break;
72 }
73 if (req.perout.index >= ops->n_per_out) {
74 err = -EINVAL;
75 break;
76 }
77 req.type = PTP_CLK_REQ_PEROUT;
78 enable = req.perout.period.sec || req.perout.period.nsec;
79 err = ops->enable(ops, &req, enable);
80 break;
81
82 case PTP_ENABLE_PPS:
83 if (!capable(CAP_SYS_TIME))
84 return -EPERM;
85 req.type = PTP_CLK_REQ_PPS;
86 enable = arg ? 1 : 0;
87 err = ops->enable(ops, &req, enable);
88 break;
89
90 default:
91 err = -ENOTTY;
92 break;
93 }
94 return err;
95}
96
97unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
98{
99 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
100
101 poll_wait(fp, &ptp->tsev_wq, wait);
102
103 return queue_cnt(&ptp->tsevq) ? POLLIN : 0;
104}
105
106ssize_t ptp_read(struct posix_clock *pc,
107 uint rdflags, char __user *buf, size_t cnt)
108{
109 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
110 struct timestamp_event_queue *queue = &ptp->tsevq;
111 struct ptp_extts_event event[PTP_BUF_TIMESTAMPS];
112 unsigned long flags;
113 size_t qcnt, i;
114
115 if (cnt % sizeof(struct ptp_extts_event) != 0)
116 return -EINVAL;
117
118 if (cnt > sizeof(event))
119 cnt = sizeof(event);
120
121 cnt = cnt / sizeof(struct ptp_extts_event);
122
123 if (mutex_lock_interruptible(&ptp->tsevq_mux))
124 return -ERESTARTSYS;
125
126 if (wait_event_interruptible(ptp->tsev_wq,
127 ptp->defunct || queue_cnt(queue))) {
128 mutex_unlock(&ptp->tsevq_mux);
129 return -ERESTARTSYS;
130 }
131
132 if (ptp->defunct)
133 return -ENODEV;
134
135 spin_lock_irqsave(&queue->lock, flags);
136
137 qcnt = queue_cnt(queue);
138
139 if (cnt > qcnt)
140 cnt = qcnt;
141
142 for (i = 0; i < cnt; i++) {
143 event[i] = queue->buf[queue->head];
144 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
145 }
146
147 spin_unlock_irqrestore(&queue->lock, flags);
148
149 cnt = cnt * sizeof(struct ptp_extts_event);
150
151 mutex_unlock(&ptp->tsevq_mux);
152
153 if (copy_to_user(buf, event, cnt)) {
154 mutex_unlock(&ptp->tsevq_mux);
155 return -EFAULT;
156 }
157
158 return cnt;
159}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
new file mode 100644
index 000000000000..cf3f9997546d
--- /dev/null
+++ b/drivers/ptp/ptp_clock.c
@@ -0,0 +1,343 @@
1/*
2 * PTP 1588 clock support
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/bitops.h>
21#include <linux/device.h>
22#include <linux/err.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/posix-clock.h>
27#include <linux/pps_kernel.h>
28#include <linux/slab.h>
29#include <linux/syscalls.h>
30#include <linux/uaccess.h>
31
32#include "ptp_private.h"
33
34#define PTP_MAX_ALARMS 4
35#define PTP_MAX_CLOCKS 8
36#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
37#define PTP_PPS_EVENT PPS_CAPTUREASSERT
38#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
39
40/* private globals */
41
42static dev_t ptp_devt;
43static struct class *ptp_class;
44
45static DECLARE_BITMAP(ptp_clocks_map, PTP_MAX_CLOCKS);
46static DEFINE_MUTEX(ptp_clocks_mutex); /* protects 'ptp_clocks_map' */
47
48/* time stamp event queue operations */
49
50static inline int queue_free(struct timestamp_event_queue *q)
51{
52 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
53}
54
55static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
56 struct ptp_clock_event *src)
57{
58 struct ptp_extts_event *dst;
59 unsigned long flags;
60 s64 seconds;
61 u32 remainder;
62
63 seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
64
65 spin_lock_irqsave(&queue->lock, flags);
66
67 dst = &queue->buf[queue->tail];
68 dst->index = src->index;
69 dst->t.sec = seconds;
70 dst->t.nsec = remainder;
71
72 if (!queue_free(queue))
73 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
74
75 queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
76
77 spin_unlock_irqrestore(&queue->lock, flags);
78}
79
80static s32 scaled_ppm_to_ppb(long ppm)
81{
82 /*
83 * The 'freq' field in the 'struct timex' is in parts per
84 * million, but with a 16 bit binary fractional field.
85 *
86 * We want to calculate
87 *
88 * ppb = scaled_ppm * 1000 / 2^16
89 *
90 * which simplifies to
91 *
92 * ppb = scaled_ppm * 125 / 2^13
93 */
94 s64 ppb = 1 + ppm;
95 ppb *= 125;
96 ppb >>= 13;
97 return (s32) ppb;
98}
99
100/* posix clock implementation */
101
102static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
103{
104 return 1; /* always round timer functions to one nanosecond */
105}
106
107static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
108{
109 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
110 return ptp->info->settime(ptp->info, tp);
111}
112
113static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp)
114{
115 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
116 return ptp->info->gettime(ptp->info, tp);
117}
118
119static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
120{
121 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
122 struct ptp_clock_info *ops;
123 int err = -EOPNOTSUPP;
124
125 ops = ptp->info;
126
127 if (tx->modes & ADJ_SETOFFSET) {
128 struct timespec ts;
129 ktime_t kt;
130 s64 delta;
131
132 ts.tv_sec = tx->time.tv_sec;
133 ts.tv_nsec = tx->time.tv_usec;
134
135 if (!(tx->modes & ADJ_NANO))
136 ts.tv_nsec *= 1000;
137
138 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
139 return -EINVAL;
140
141 kt = timespec_to_ktime(ts);
142 delta = ktime_to_ns(kt);
143 err = ops->adjtime(ops, delta);
144
145 } else if (tx->modes & ADJ_FREQUENCY) {
146
147 err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
148 }
149
150 return err;
151}
152
153static struct posix_clock_operations ptp_clock_ops = {
154 .owner = THIS_MODULE,
155 .clock_adjtime = ptp_clock_adjtime,
156 .clock_gettime = ptp_clock_gettime,
157 .clock_getres = ptp_clock_getres,
158 .clock_settime = ptp_clock_settime,
159 .ioctl = ptp_ioctl,
160 .open = ptp_open,
161 .poll = ptp_poll,
162 .read = ptp_read,
163};
164
165static void delete_ptp_clock(struct posix_clock *pc)
166{
167 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
168
169 mutex_destroy(&ptp->tsevq_mux);
170
171 /* Remove the clock from the bit map. */
172 mutex_lock(&ptp_clocks_mutex);
173 clear_bit(ptp->index, ptp_clocks_map);
174 mutex_unlock(&ptp_clocks_mutex);
175
176 kfree(ptp);
177}
178
179/* public interface */
180
181struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info)
182{
183 struct ptp_clock *ptp;
184 int err = 0, index, major = MAJOR(ptp_devt);
185
186 if (info->n_alarm > PTP_MAX_ALARMS)
187 return ERR_PTR(-EINVAL);
188
189 /* Find a free clock slot and reserve it. */
190 err = -EBUSY;
191 mutex_lock(&ptp_clocks_mutex);
192 index = find_first_zero_bit(ptp_clocks_map, PTP_MAX_CLOCKS);
193 if (index < PTP_MAX_CLOCKS)
194 set_bit(index, ptp_clocks_map);
195 else
196 goto no_slot;
197
198 /* Initialize a clock structure. */
199 err = -ENOMEM;
200 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
201 if (ptp == NULL)
202 goto no_memory;
203
204 ptp->clock.ops = ptp_clock_ops;
205 ptp->clock.release = delete_ptp_clock;
206 ptp->info = info;
207 ptp->devid = MKDEV(major, index);
208 ptp->index = index;
209 spin_lock_init(&ptp->tsevq.lock);
210 mutex_init(&ptp->tsevq_mux);
211 init_waitqueue_head(&ptp->tsev_wq);
212
213 /* Create a new device in our class. */
214 ptp->dev = device_create(ptp_class, NULL, ptp->devid, ptp,
215 "ptp%d", ptp->index);
216 if (IS_ERR(ptp->dev))
217 goto no_device;
218
219 dev_set_drvdata(ptp->dev, ptp);
220
221 err = ptp_populate_sysfs(ptp);
222 if (err)
223 goto no_sysfs;
224
225 /* Register a new PPS source. */
226 if (info->pps) {
227 struct pps_source_info pps;
228 memset(&pps, 0, sizeof(pps));
229 snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
230 pps.mode = PTP_PPS_MODE;
231 pps.owner = info->owner;
232 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
233 if (!ptp->pps_source) {
234 pr_err("failed to register pps source\n");
235 goto no_pps;
236 }
237 }
238
239 /* Create a posix clock. */
240 err = posix_clock_register(&ptp->clock, ptp->devid);
241 if (err) {
242 pr_err("failed to create posix clock\n");
243 goto no_clock;
244 }
245
246 mutex_unlock(&ptp_clocks_mutex);
247 return ptp;
248
249no_clock:
250 if (ptp->pps_source)
251 pps_unregister_source(ptp->pps_source);
252no_pps:
253 ptp_cleanup_sysfs(ptp);
254no_sysfs:
255 device_destroy(ptp_class, ptp->devid);
256no_device:
257 mutex_destroy(&ptp->tsevq_mux);
258 kfree(ptp);
259no_memory:
260 clear_bit(index, ptp_clocks_map);
261no_slot:
262 mutex_unlock(&ptp_clocks_mutex);
263 return ERR_PTR(err);
264}
265EXPORT_SYMBOL(ptp_clock_register);
266
267int ptp_clock_unregister(struct ptp_clock *ptp)
268{
269 ptp->defunct = 1;
270 wake_up_interruptible(&ptp->tsev_wq);
271
272 /* Release the clock's resources. */
273 if (ptp->pps_source)
274 pps_unregister_source(ptp->pps_source);
275 ptp_cleanup_sysfs(ptp);
276 device_destroy(ptp_class, ptp->devid);
277
278 posix_clock_unregister(&ptp->clock);
279 return 0;
280}
281EXPORT_SYMBOL(ptp_clock_unregister);
282
283void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
284{
285 struct pps_event_time evt;
286
287 switch (event->type) {
288
289 case PTP_CLOCK_ALARM:
290 break;
291
292 case PTP_CLOCK_EXTTS:
293 enqueue_external_timestamp(&ptp->tsevq, event);
294 wake_up_interruptible(&ptp->tsev_wq);
295 break;
296
297 case PTP_CLOCK_PPS:
298 pps_get_ts(&evt);
299 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
300 break;
301 }
302}
303EXPORT_SYMBOL(ptp_clock_event);
304
305/* module operations */
306
307static void __exit ptp_exit(void)
308{
309 class_destroy(ptp_class);
310 unregister_chrdev_region(ptp_devt, PTP_MAX_CLOCKS);
311}
312
313static int __init ptp_init(void)
314{
315 int err;
316
317 ptp_class = class_create(THIS_MODULE, "ptp");
318 if (IS_ERR(ptp_class)) {
319 pr_err("ptp: failed to allocate class\n");
320 return PTR_ERR(ptp_class);
321 }
322
323 err = alloc_chrdev_region(&ptp_devt, 0, PTP_MAX_CLOCKS, "ptp");
324 if (err < 0) {
325 pr_err("ptp: failed to allocate device region\n");
326 goto no_region;
327 }
328
329 ptp_class->dev_attrs = ptp_dev_attrs;
330 pr_info("PTP clock support registered\n");
331 return 0;
332
333no_region:
334 class_destroy(ptp_class);
335 return err;
336}
337
338subsys_initcall(ptp_init);
339module_exit(ptp_exit);
340
341MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
342MODULE_DESCRIPTION("PTP clocks support");
343MODULE_LICENSE("GPL");
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
new file mode 100644
index 000000000000..803d665b15ef
--- /dev/null
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -0,0 +1,332 @@
1/*
2 * PTP 1588 clock using the IXP46X
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/gpio.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29
30#include <linux/ptp_clock_kernel.h>
31#include <mach/ixp46x_ts.h>
32
33#define DRIVER "ptp_ixp46x"
34#define N_EXT_TS 2
35#define MASTER_GPIO 8
36#define MASTER_IRQ 25
37#define SLAVE_GPIO 7
38#define SLAVE_IRQ 24
39
40struct ixp_clock {
41 struct ixp46x_ts_regs *regs;
42 struct ptp_clock *ptp_clock;
43 struct ptp_clock_info caps;
44 int exts0_enabled;
45 int exts1_enabled;
46};
47
48DEFINE_SPINLOCK(register_lock);
49
50/*
51 * Register access functions
52 */
53
54static u64 ixp_systime_read(struct ixp46x_ts_regs *regs)
55{
56 u64 ns;
57 u32 lo, hi;
58
59 lo = __raw_readl(&regs->systime_lo);
60 hi = __raw_readl(&regs->systime_hi);
61
62 ns = ((u64) hi) << 32;
63 ns |= lo;
64 ns <<= TICKS_NS_SHIFT;
65
66 return ns;
67}
68
69static void ixp_systime_write(struct ixp46x_ts_regs *regs, u64 ns)
70{
71 u32 hi, lo;
72
73 ns >>= TICKS_NS_SHIFT;
74 hi = ns >> 32;
75 lo = ns & 0xffffffff;
76
77 __raw_writel(lo, &regs->systime_lo);
78 __raw_writel(hi, &regs->systime_hi);
79}
80
81/*
82 * Interrupt service routine
83 */
84
85static irqreturn_t isr(int irq, void *priv)
86{
87 struct ixp_clock *ixp_clock = priv;
88 struct ixp46x_ts_regs *regs = ixp_clock->regs;
89 struct ptp_clock_event event;
90 u32 ack = 0, lo, hi, val;
91
92 val = __raw_readl(&regs->event);
93
94 if (val & TSER_SNS) {
95 ack |= TSER_SNS;
96 if (ixp_clock->exts0_enabled) {
97 hi = __raw_readl(&regs->asms_hi);
98 lo = __raw_readl(&regs->asms_lo);
99 event.type = PTP_CLOCK_EXTTS;
100 event.index = 0;
101 event.timestamp = ((u64) hi) << 32;
102 event.timestamp |= lo;
103 event.timestamp <<= TICKS_NS_SHIFT;
104 ptp_clock_event(ixp_clock->ptp_clock, &event);
105 }
106 }
107
108 if (val & TSER_SNM) {
109 ack |= TSER_SNM;
110 if (ixp_clock->exts1_enabled) {
111 hi = __raw_readl(&regs->amms_hi);
112 lo = __raw_readl(&regs->amms_lo);
113 event.type = PTP_CLOCK_EXTTS;
114 event.index = 1;
115 event.timestamp = ((u64) hi) << 32;
116 event.timestamp |= lo;
117 event.timestamp <<= TICKS_NS_SHIFT;
118 ptp_clock_event(ixp_clock->ptp_clock, &event);
119 }
120 }
121
122 if (val & TTIPEND)
123 ack |= TTIPEND; /* this bit seems to be always set */
124
125 if (ack) {
126 __raw_writel(ack, &regs->event);
127 return IRQ_HANDLED;
128 } else
129 return IRQ_NONE;
130}
131
132/*
133 * PTP clock operations
134 */
135
136static int ptp_ixp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
137{
138 u64 adj;
139 u32 diff, addend;
140 int neg_adj = 0;
141 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
142 struct ixp46x_ts_regs *regs = ixp_clock->regs;
143
144 if (ppb < 0) {
145 neg_adj = 1;
146 ppb = -ppb;
147 }
148 addend = DEFAULT_ADDEND;
149 adj = addend;
150 adj *= ppb;
151 diff = div_u64(adj, 1000000000ULL);
152
153 addend = neg_adj ? addend - diff : addend + diff;
154
155 __raw_writel(addend, &regs->addend);
156
157 return 0;
158}
159
160static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
161{
162 s64 now;
163 unsigned long flags;
164 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
165 struct ixp46x_ts_regs *regs = ixp_clock->regs;
166
167 spin_lock_irqsave(&register_lock, flags);
168
169 now = ixp_systime_read(regs);
170 now += delta;
171 ixp_systime_write(regs, now);
172
173 spin_unlock_irqrestore(&register_lock, flags);
174
175 return 0;
176}
177
178static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
179{
180 u64 ns;
181 u32 remainder;
182 unsigned long flags;
183 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
184 struct ixp46x_ts_regs *regs = ixp_clock->regs;
185
186 spin_lock_irqsave(&register_lock, flags);
187
188 ns = ixp_systime_read(regs);
189
190 spin_unlock_irqrestore(&register_lock, flags);
191
192 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
193 ts->tv_nsec = remainder;
194 return 0;
195}
196
197static int ptp_ixp_settime(struct ptp_clock_info *ptp,
198 const struct timespec *ts)
199{
200 u64 ns;
201 unsigned long flags;
202 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
203 struct ixp46x_ts_regs *regs = ixp_clock->regs;
204
205 ns = ts->tv_sec * 1000000000ULL;
206 ns += ts->tv_nsec;
207
208 spin_lock_irqsave(&register_lock, flags);
209
210 ixp_systime_write(regs, ns);
211
212 spin_unlock_irqrestore(&register_lock, flags);
213
214 return 0;
215}
216
217static int ptp_ixp_enable(struct ptp_clock_info *ptp,
218 struct ptp_clock_request *rq, int on)
219{
220 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
221
222 switch (rq->type) {
223 case PTP_CLK_REQ_EXTTS:
224 switch (rq->extts.index) {
225 case 0:
226 ixp_clock->exts0_enabled = on ? 1 : 0;
227 break;
228 case 1:
229 ixp_clock->exts1_enabled = on ? 1 : 0;
230 break;
231 default:
232 return -EINVAL;
233 }
234 return 0;
235 default:
236 break;
237 }
238
239 return -EOPNOTSUPP;
240}
241
242static struct ptp_clock_info ptp_ixp_caps = {
243 .owner = THIS_MODULE,
244 .name = "IXP46X timer",
245 .max_adj = 66666655,
246 .n_ext_ts = N_EXT_TS,
247 .pps = 0,
248 .adjfreq = ptp_ixp_adjfreq,
249 .adjtime = ptp_ixp_adjtime,
250 .gettime = ptp_ixp_gettime,
251 .settime = ptp_ixp_settime,
252 .enable = ptp_ixp_enable,
253};
254
255/* module operations */
256
257static struct ixp_clock ixp_clock;
258
259static int setup_interrupt(int gpio)
260{
261 int irq;
262
263 gpio_line_config(gpio, IXP4XX_GPIO_IN);
264
265 irq = gpio_to_irq(gpio);
266
267 if (NO_IRQ == irq)
268 return NO_IRQ;
269
270 if (irq_set_irq_type(irq, IRQF_TRIGGER_FALLING)) {
271 pr_err("cannot set trigger type for irq %d\n", irq);
272 return NO_IRQ;
273 }
274
275 if (request_irq(irq, isr, 0, DRIVER, &ixp_clock)) {
276 pr_err("request_irq failed for irq %d\n", irq);
277 return NO_IRQ;
278 }
279
280 return irq;
281}
282
283static void __exit ptp_ixp_exit(void)
284{
285 free_irq(MASTER_IRQ, &ixp_clock);
286 free_irq(SLAVE_IRQ, &ixp_clock);
287 ptp_clock_unregister(ixp_clock.ptp_clock);
288}
289
290static int __init ptp_ixp_init(void)
291{
292 if (!cpu_is_ixp46x())
293 return -ENODEV;
294
295 ixp_clock.regs =
296 (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
297
298 ixp_clock.caps = ptp_ixp_caps;
299
300 ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps);
301
302 if (IS_ERR(ixp_clock.ptp_clock))
303 return PTR_ERR(ixp_clock.ptp_clock);
304
305 __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend);
306 __raw_writel(1, &ixp_clock.regs->trgt_lo);
307 __raw_writel(0, &ixp_clock.regs->trgt_hi);
308 __raw_writel(TTIPEND, &ixp_clock.regs->event);
309
310 if (MASTER_IRQ != setup_interrupt(MASTER_GPIO)) {
311 pr_err("failed to setup gpio %d as irq\n", MASTER_GPIO);
312 goto no_master;
313 }
314 if (SLAVE_IRQ != setup_interrupt(SLAVE_GPIO)) {
315 pr_err("failed to setup gpio %d as irq\n", SLAVE_GPIO);
316 goto no_slave;
317 }
318
319 return 0;
320no_slave:
321 free_irq(MASTER_IRQ, &ixp_clock);
322no_master:
323 ptp_clock_unregister(ixp_clock.ptp_clock);
324 return -ENODEV;
325}
326
327module_init(ptp_ixp_init);
328module_exit(ptp_ixp_exit);
329
330MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
331MODULE_DESCRIPTION("PTP clock using the IXP46X timer");
332MODULE_LICENSE("GPL");
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
new file mode 100644
index 000000000000..4d5b5082c3b1
--- /dev/null
+++ b/drivers/ptp/ptp_private.h
@@ -0,0 +1,92 @@
1/*
2 * PTP 1588 clock support - private declarations for the core module.
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#ifndef _PTP_PRIVATE_H_
21#define _PTP_PRIVATE_H_
22
23#include <linux/cdev.h>
24#include <linux/device.h>
25#include <linux/mutex.h>
26#include <linux/posix-clock.h>
27#include <linux/ptp_clock.h>
28#include <linux/ptp_clock_kernel.h>
29#include <linux/time.h>
30
31#define PTP_MAX_TIMESTAMPS 128
32#define PTP_BUF_TIMESTAMPS 30
33
34struct timestamp_event_queue {
35 struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
36 int head;
37 int tail;
38 spinlock_t lock;
39};
40
41struct ptp_clock {
42 struct posix_clock clock;
43 struct device *dev;
44 struct ptp_clock_info *info;
45 dev_t devid;
46 int index; /* index into clocks.map */
47 struct pps_device *pps_source;
48 struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
49 struct mutex tsevq_mux; /* one process at a time reading the fifo */
50 wait_queue_head_t tsev_wq;
51 int defunct; /* tells readers to go away when clock is being removed */
52};
53
54/*
55 * The function queue_cnt() is safe for readers to call without
56 * holding q->lock. Readers use this function to verify that the queue
57 * is nonempty before proceeding with a dequeue operation. The fact
58 * that a writer might concurrently increment the tail does not
59 * matter, since the queue remains nonempty nonetheless.
60 */
61static inline int queue_cnt(struct timestamp_event_queue *q)
62{
63 int cnt = q->tail - q->head;
64 return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
65}
66
67/*
68 * see ptp_chardev.c
69 */
70
71long ptp_ioctl(struct posix_clock *pc,
72 unsigned int cmd, unsigned long arg);
73
74int ptp_open(struct posix_clock *pc, fmode_t fmode);
75
76ssize_t ptp_read(struct posix_clock *pc,
77 uint flags, char __user *buf, size_t cnt);
78
79uint ptp_poll(struct posix_clock *pc,
80 struct file *fp, poll_table *wait);
81
82/*
83 * see ptp_sysfs.c
84 */
85
86extern struct device_attribute ptp_dev_attrs[];
87
88int ptp_cleanup_sysfs(struct ptp_clock *ptp);
89
90int ptp_populate_sysfs(struct ptp_clock *ptp);
91
92#endif
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
new file mode 100644
index 000000000000..2f93926ac976
--- /dev/null
+++ b/drivers/ptp/ptp_sysfs.c
@@ -0,0 +1,230 @@
1/*
2 * PTP 1588 clock support - sysfs interface.
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/capability.h>
21
22#include "ptp_private.h"
23
24static ssize_t clock_name_show(struct device *dev,
25 struct device_attribute *attr, char *page)
26{
27 struct ptp_clock *ptp = dev_get_drvdata(dev);
28 return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
29}
30
31#define PTP_SHOW_INT(name) \
32static ssize_t name##_show(struct device *dev, \
33 struct device_attribute *attr, char *page) \
34{ \
35 struct ptp_clock *ptp = dev_get_drvdata(dev); \
36 return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->name); \
37}
38
39PTP_SHOW_INT(max_adj);
40PTP_SHOW_INT(n_alarm);
41PTP_SHOW_INT(n_ext_ts);
42PTP_SHOW_INT(n_per_out);
43PTP_SHOW_INT(pps);
44
45#define PTP_RO_ATTR(_var, _name) { \
46 .attr = { .name = __stringify(_name), .mode = 0444 }, \
47 .show = _var##_show, \
48}
49
50struct device_attribute ptp_dev_attrs[] = {
51 PTP_RO_ATTR(clock_name, clock_name),
52 PTP_RO_ATTR(max_adj, max_adjustment),
53 PTP_RO_ATTR(n_alarm, n_alarms),
54 PTP_RO_ATTR(n_ext_ts, n_external_timestamps),
55 PTP_RO_ATTR(n_per_out, n_periodic_outputs),
56 PTP_RO_ATTR(pps, pps_available),
57 __ATTR_NULL,
58};
59
60static ssize_t extts_enable_store(struct device *dev,
61 struct device_attribute *attr,
62 const char *buf, size_t count)
63{
64 struct ptp_clock *ptp = dev_get_drvdata(dev);
65 struct ptp_clock_info *ops = ptp->info;
66 struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
67 int cnt, enable;
68 int err = -EINVAL;
69
70 cnt = sscanf(buf, "%u %d", &req.extts.index, &enable);
71 if (cnt != 2)
72 goto out;
73 if (req.extts.index >= ops->n_ext_ts)
74 goto out;
75
76 err = ops->enable(ops, &req, enable ? 1 : 0);
77 if (err)
78 goto out;
79
80 return count;
81out:
82 return err;
83}
84
85static ssize_t extts_fifo_show(struct device *dev,
86 struct device_attribute *attr, char *page)
87{
88 struct ptp_clock *ptp = dev_get_drvdata(dev);
89 struct timestamp_event_queue *queue = &ptp->tsevq;
90 struct ptp_extts_event event;
91 unsigned long flags;
92 size_t qcnt;
93 int cnt = 0;
94
95 memset(&event, 0, sizeof(event));
96
97 if (mutex_lock_interruptible(&ptp->tsevq_mux))
98 return -ERESTARTSYS;
99
100 spin_lock_irqsave(&queue->lock, flags);
101 qcnt = queue_cnt(queue);
102 if (qcnt) {
103 event = queue->buf[queue->head];
104 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
105 }
106 spin_unlock_irqrestore(&queue->lock, flags);
107
108 if (!qcnt)
109 goto out;
110
111 cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
112 event.index, event.t.sec, event.t.nsec);
113out:
114 mutex_unlock(&ptp->tsevq_mux);
115 return cnt;
116}
117
118static ssize_t period_store(struct device *dev,
119 struct device_attribute *attr,
120 const char *buf, size_t count)
121{
122 struct ptp_clock *ptp = dev_get_drvdata(dev);
123 struct ptp_clock_info *ops = ptp->info;
124 struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
125 int cnt, enable, err = -EINVAL;
126
127 cnt = sscanf(buf, "%u %lld %u %lld %u", &req.perout.index,
128 &req.perout.start.sec, &req.perout.start.nsec,
129 &req.perout.period.sec, &req.perout.period.nsec);
130 if (cnt != 5)
131 goto out;
132 if (req.perout.index >= ops->n_per_out)
133 goto out;
134
135 enable = req.perout.period.sec || req.perout.period.nsec;
136 err = ops->enable(ops, &req, enable);
137 if (err)
138 goto out;
139
140 return count;
141out:
142 return err;
143}
144
145static ssize_t pps_enable_store(struct device *dev,
146 struct device_attribute *attr,
147 const char *buf, size_t count)
148{
149 struct ptp_clock *ptp = dev_get_drvdata(dev);
150 struct ptp_clock_info *ops = ptp->info;
151 struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
152 int cnt, enable;
153 int err = -EINVAL;
154
155 if (!capable(CAP_SYS_TIME))
156 return -EPERM;
157
158 cnt = sscanf(buf, "%d", &enable);
159 if (cnt != 1)
160 goto out;
161
162 err = ops->enable(ops, &req, enable ? 1 : 0);
163 if (err)
164 goto out;
165
166 return count;
167out:
168 return err;
169}
170
171static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
172static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
173static DEVICE_ATTR(period, 0220, NULL, period_store);
174static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
175
176int ptp_cleanup_sysfs(struct ptp_clock *ptp)
177{
178 struct device *dev = ptp->dev;
179 struct ptp_clock_info *info = ptp->info;
180
181 if (info->n_ext_ts) {
182 device_remove_file(dev, &dev_attr_extts_enable);
183 device_remove_file(dev, &dev_attr_fifo);
184 }
185 if (info->n_per_out)
186 device_remove_file(dev, &dev_attr_period);
187
188 if (info->pps)
189 device_remove_file(dev, &dev_attr_pps_enable);
190
191 return 0;
192}
193
194int ptp_populate_sysfs(struct ptp_clock *ptp)
195{
196 struct device *dev = ptp->dev;
197 struct ptp_clock_info *info = ptp->info;
198 int err;
199
200 if (info->n_ext_ts) {
201 err = device_create_file(dev, &dev_attr_extts_enable);
202 if (err)
203 goto out1;
204 err = device_create_file(dev, &dev_attr_fifo);
205 if (err)
206 goto out2;
207 }
208 if (info->n_per_out) {
209 err = device_create_file(dev, &dev_attr_period);
210 if (err)
211 goto out3;
212 }
213 if (info->pps) {
214 err = device_create_file(dev, &dev_attr_pps_enable);
215 if (err)
216 goto out4;
217 }
218 return 0;
219out4:
220 if (info->n_per_out)
221 device_remove_file(dev, &dev_attr_period);
222out3:
223 if (info->n_ext_ts)
224 device_remove_file(dev, &dev_attr_fifo);
225out2:
226 if (info->n_ext_ts)
227 device_remove_file(dev, &dev_attr_extts_enable);
228out1:
229 return err;
230}
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 859251250b55..d63fddb0fbb0 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -15,7 +15,6 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/regulator/driver.h> 16#include <linux/regulator/driver.h>
17#include <linux/regulator/machine.h> 17#include <linux/regulator/machine.h>
18#include <linux/mfd/core.h>
19#include <linux/mfd/88pm860x.h> 18#include <linux/mfd/88pm860x.h>
20 19
21struct pm8607_regulator_info { 20struct pm8607_regulator_info {
@@ -399,36 +398,33 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
399{ 398{
400 struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); 399 struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
401 struct pm8607_regulator_info *info = NULL; 400 struct pm8607_regulator_info *info = NULL;
402 struct regulator_init_data *pdata; 401 struct regulator_init_data *pdata = pdev->dev.platform_data;
403 struct mfd_cell *cell; 402 struct resource *res;
404 int i; 403 int i;
405 404
406 cell = pdev->dev.platform_data; 405 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
407 if (cell == NULL) 406 if (res == NULL) {
408 return -ENODEV; 407 dev_err(&pdev->dev, "No I/O resource!\n");
409 pdata = cell->mfd_data;
410 if (pdata == NULL)
411 return -EINVAL; 408 return -EINVAL;
412 409 }
413 for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) { 410 for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
414 info = &pm8607_regulator_info[i]; 411 info = &pm8607_regulator_info[i];
415 if (!strcmp(info->desc.name, pdata->constraints.name)) 412 if (info->desc.id == res->start)
416 break; 413 break;
417 } 414 }
418 if (i > ARRAY_SIZE(pm8607_regulator_info)) { 415 if ((i < 0) || (i > PM8607_ID_RG_MAX)) {
419 dev_err(&pdev->dev, "Failed to find regulator %s\n", 416 dev_err(&pdev->dev, "Failed to find regulator %llu\n",
420 pdata->constraints.name); 417 (unsigned long long)res->start);
421 return -EINVAL; 418 return -EINVAL;
422 } 419 }
423
424 info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; 420 info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
425 info->chip = chip; 421 info->chip = chip;
426 422
427 /* check DVC ramp slope double */ 423 /* check DVC ramp slope double */
428 if (!strcmp(info->desc.name, "BUCK3")) 424 if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double)
429 if (info->chip->buck3_double) 425 info->slope_double = 1;
430 info->slope_double = 1;
431 426
427 /* replace driver_data with info */
432 info->regulator = regulator_register(&info->desc, &pdev->dev, 428 info->regulator = regulator_register(&info->desc, &pdev->dev,
433 pdata, info); 429 pdata, info);
434 if (IS_ERR(info->regulator)) { 430 if (IS_ERR(info->regulator)) {
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index b9f29e0d4295..d7ed20f293d7 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -274,6 +274,13 @@ config REGULATOR_AB8500
274 This driver supports the regulators found on the ST-Ericsson mixed 274 This driver supports the regulators found on the ST-Ericsson mixed
275 signal AB8500 PMIC 275 signal AB8500 PMIC
276 276
277config REGULATOR_DB8500_PRCMU
278 bool "ST-Ericsson DB8500 Voltage Domain Regulators"
279 depends on MFD_DB8500_PRCMU
280 help
281 This driver supports the voltage domain regulators controlled by the
282 DB8500 PRCMU
283
277config REGULATOR_TPS6586X 284config REGULATOR_TPS6586X
278 tristate "TI TPS6586X Power regulators" 285 tristate "TI TPS6586X Power regulators"
279 depends on MFD_TPS6586X 286 depends on MFD_TPS6586X
@@ -290,5 +297,11 @@ config REGULATOR_TPS6524X
290 serial interface currently supported on the sequencer serial 297 serial interface currently supported on the sequencer serial
291 port controller. 298 port controller.
292 299
300config REGULATOR_TPS65910
301 tristate "TI TPS65910 Power Regulator"
302 depends on MFD_TPS65910
303 help
304 This driver supports TPS65910 voltage regulator chips.
305
293endif 306endif
294 307
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index d72a42756778..3932d2ec38f3 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -41,5 +41,7 @@ obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
41obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o 41obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
42obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o 42obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
43obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o 43obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
44obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
45obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
44 46
45ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG 47ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index b1d77946e9c6..585e4946fe0a 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -17,7 +17,6 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/regulator/driver.h> 18#include <linux/regulator/driver.h>
19#include <linux/mfd/abx500.h> 19#include <linux/mfd/abx500.h>
20#include <linux/mfd/core.h>
21 20
22/* LDO registers and some handy masking definitions for AB3100 */ 21/* LDO registers and some handy masking definitions for AB3100 */
23#define AB3100_LDO_A 0x40 22#define AB3100_LDO_A 0x40
@@ -582,7 +581,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
582 581
583static int __devinit ab3100_regulators_probe(struct platform_device *pdev) 582static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
584{ 583{
585 struct ab3100_platform_data *plfdata = mfd_get_data(pdev); 584 struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
586 int err = 0; 585 int err = 0;
587 u8 data; 586 u8 data;
588 int i; 587 int i;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 0fae51c4845a..d3e38790906e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -158,6 +158,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
158 struct regulator *regulator; 158 struct regulator *regulator;
159 159
160 list_for_each_entry(regulator, &rdev->consumer_list, list) { 160 list_for_each_entry(regulator, &rdev->consumer_list, list) {
161 /*
162 * Assume consumers that didn't say anything are OK
163 * with anything in the constraint range.
164 */
165 if (!regulator->min_uV && !regulator->max_uV)
166 continue;
167
161 if (*max_uV > regulator->max_uV) 168 if (*max_uV > regulator->max_uV)
162 *max_uV = regulator->max_uV; 169 *max_uV = regulator->max_uV;
163 if (*min_uV < regulator->min_uV) 170 if (*min_uV < regulator->min_uV)
@@ -197,9 +204,9 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
197} 204}
198 205
199/* operating mode constraint check */ 206/* operating mode constraint check */
200static int regulator_check_mode(struct regulator_dev *rdev, int mode) 207static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
201{ 208{
202 switch (mode) { 209 switch (*mode) {
203 case REGULATOR_MODE_FAST: 210 case REGULATOR_MODE_FAST:
204 case REGULATOR_MODE_NORMAL: 211 case REGULATOR_MODE_NORMAL:
205 case REGULATOR_MODE_IDLE: 212 case REGULATOR_MODE_IDLE:
@@ -217,11 +224,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
217 rdev_err(rdev, "operation not allowed\n"); 224 rdev_err(rdev, "operation not allowed\n");
218 return -EPERM; 225 return -EPERM;
219 } 226 }
220 if (!(rdev->constraints->valid_modes_mask & mode)) { 227
221 rdev_err(rdev, "invalid mode %x\n", mode); 228 /* The modes are bitmasks, the most power hungry modes having
222 return -EINVAL; 229 * the lowest values. If the requested mode isn't supported
230 * try higher modes. */
231 while (*mode) {
232 if (rdev->constraints->valid_modes_mask & *mode)
233 return 0;
234 *mode /= 2;
223 } 235 }
224 return 0; 236
237 return -EINVAL;
225} 238}
226 239
227/* dynamic regulator mode switching constraint check */ 240/* dynamic regulator mode switching constraint check */
@@ -612,7 +625,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
612 output_uV, current_uA); 625 output_uV, current_uA);
613 626
614 /* check the new mode is allowed */ 627 /* check the new mode is allowed */
615 err = regulator_check_mode(rdev, mode); 628 err = regulator_mode_constrain(rdev, &mode);
616 if (err == 0) 629 if (err == 0)
617 rdev->desc->ops->set_mode(rdev, mode); 630 rdev->desc->ops->set_mode(rdev, mode);
618} 631}
@@ -718,6 +731,10 @@ static void print_constraints(struct regulator_dev *rdev)
718 count += sprintf(buf + count, "at %d mV ", ret / 1000); 731 count += sprintf(buf + count, "at %d mV ", ret / 1000);
719 } 732 }
720 733
734 if (constraints->uV_offset)
735 count += sprintf(buf, "%dmV offset ",
736 constraints->uV_offset / 1000);
737
721 if (constraints->min_uA && constraints->max_uA) { 738 if (constraints->min_uA && constraints->max_uA) {
722 if (constraints->min_uA == constraints->max_uA) 739 if (constraints->min_uA == constraints->max_uA)
723 count += sprintf(buf + count, "%d mA ", 740 count += sprintf(buf + count, "%d mA ",
@@ -1498,13 +1515,14 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
1498 */ 1515 */
1499int regulator_force_disable(struct regulator *regulator) 1516int regulator_force_disable(struct regulator *regulator)
1500{ 1517{
1518 struct regulator_dev *rdev = regulator->rdev;
1501 struct regulator_dev *supply_rdev = NULL; 1519 struct regulator_dev *supply_rdev = NULL;
1502 int ret; 1520 int ret;
1503 1521
1504 mutex_lock(&regulator->rdev->mutex); 1522 mutex_lock(&rdev->mutex);
1505 regulator->uA_load = 0; 1523 regulator->uA_load = 0;
1506 ret = _regulator_force_disable(regulator->rdev, &supply_rdev); 1524 ret = _regulator_force_disable(rdev, &supply_rdev);
1507 mutex_unlock(&regulator->rdev->mutex); 1525 mutex_unlock(&rdev->mutex);
1508 1526
1509 if (supply_rdev) 1527 if (supply_rdev)
1510 regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev))); 1528 regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
@@ -1634,6 +1652,9 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
1634 1652
1635 trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); 1653 trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
1636 1654
1655 min_uV += rdev->constraints->uV_offset;
1656 max_uV += rdev->constraints->uV_offset;
1657
1637 if (rdev->desc->ops->set_voltage) { 1658 if (rdev->desc->ops->set_voltage) {
1638 ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, 1659 ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
1639 &selector); 1660 &selector);
@@ -1858,18 +1879,22 @@ EXPORT_SYMBOL_GPL(regulator_sync_voltage);
1858 1879
1859static int _regulator_get_voltage(struct regulator_dev *rdev) 1880static int _regulator_get_voltage(struct regulator_dev *rdev)
1860{ 1881{
1861 int sel; 1882 int sel, ret;
1862 1883
1863 if (rdev->desc->ops->get_voltage_sel) { 1884 if (rdev->desc->ops->get_voltage_sel) {
1864 sel = rdev->desc->ops->get_voltage_sel(rdev); 1885 sel = rdev->desc->ops->get_voltage_sel(rdev);
1865 if (sel < 0) 1886 if (sel < 0)
1866 return sel; 1887 return sel;
1867 return rdev->desc->ops->list_voltage(rdev, sel); 1888 ret = rdev->desc->ops->list_voltage(rdev, sel);
1868 } 1889 } else if (rdev->desc->ops->get_voltage) {
1869 if (rdev->desc->ops->get_voltage) 1890 ret = rdev->desc->ops->get_voltage(rdev);
1870 return rdev->desc->ops->get_voltage(rdev); 1891 } else {
1871 else
1872 return -EINVAL; 1892 return -EINVAL;
1893 }
1894
1895 if (ret < 0)
1896 return ret;
1897 return ret - rdev->constraints->uV_offset;
1873} 1898}
1874 1899
1875/** 1900/**
@@ -2005,7 +2030,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
2005 } 2030 }
2006 2031
2007 /* constraints check */ 2032 /* constraints check */
2008 ret = regulator_check_mode(rdev, mode); 2033 ret = regulator_mode_constrain(rdev, &mode);
2009 if (ret < 0) 2034 if (ret < 0)
2010 goto out; 2035 goto out;
2011 2036
@@ -2081,16 +2106,26 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
2081 2106
2082 mutex_lock(&rdev->mutex); 2107 mutex_lock(&rdev->mutex);
2083 2108
2109 /*
2110 * first check to see if we can set modes at all, otherwise just
2111 * tell the consumer everything is OK.
2112 */
2084 regulator->uA_load = uA_load; 2113 regulator->uA_load = uA_load;
2085 ret = regulator_check_drms(rdev); 2114 ret = regulator_check_drms(rdev);
2086 if (ret < 0) 2115 if (ret < 0) {
2116 ret = 0;
2087 goto out; 2117 goto out;
2088 ret = -EINVAL; 2118 }
2089 2119
2090 /* sanity check */
2091 if (!rdev->desc->ops->get_optimum_mode) 2120 if (!rdev->desc->ops->get_optimum_mode)
2092 goto out; 2121 goto out;
2093 2122
2123 /*
2124 * we can actually do this so any errors are indicators of
2125 * potential real failure.
2126 */
2127 ret = -EINVAL;
2128
2094 /* get output voltage */ 2129 /* get output voltage */
2095 output_uV = _regulator_get_voltage(rdev); 2130 output_uV = _regulator_get_voltage(rdev);
2096 if (output_uV <= 0) { 2131 if (output_uV <= 0) {
@@ -2116,7 +2151,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
2116 mode = rdev->desc->ops->get_optimum_mode(rdev, 2151 mode = rdev->desc->ops->get_optimum_mode(rdev,
2117 input_uV, output_uV, 2152 input_uV, output_uV,
2118 total_uA_load); 2153 total_uA_load);
2119 ret = regulator_check_mode(rdev, mode); 2154 ret = regulator_mode_constrain(rdev, &mode);
2120 if (ret < 0) { 2155 if (ret < 0) {
2121 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", 2156 rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
2122 total_uA_load, input_uV, output_uV); 2157 total_uA_load, input_uV, output_uV);
@@ -2589,14 +2624,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2589 if (ret < 0) 2624 if (ret < 0)
2590 goto scrub; 2625 goto scrub;
2591 2626
2592 /* set supply regulator if it exists */
2593 if (init_data->supply_regulator && init_data->supply_regulator_dev) {
2594 dev_err(dev,
2595 "Supply regulator specified by both name and dev\n");
2596 ret = -EINVAL;
2597 goto scrub;
2598 }
2599
2600 if (init_data->supply_regulator) { 2627 if (init_data->supply_regulator) {
2601 struct regulator_dev *r; 2628 struct regulator_dev *r;
2602 int found = 0; 2629 int found = 0;
@@ -2621,14 +2648,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2621 goto scrub; 2648 goto scrub;
2622 } 2649 }
2623 2650
2624 if (init_data->supply_regulator_dev) {
2625 dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n");
2626 ret = set_supply(rdev,
2627 dev_get_drvdata(init_data->supply_regulator_dev));
2628 if (ret < 0)
2629 goto scrub;
2630 }
2631
2632 /* add consumers devices */ 2651 /* add consumers devices */
2633 for (i = 0; i < init_data->num_consumer_supplies; i++) { 2652 for (i = 0; i < init_data->num_consumer_supplies; i++) {
2634 ret = set_consumer_device_supply(rdev, 2653 ret = set_consumer_device_supply(rdev,
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
new file mode 100644
index 000000000000..e5f7b8fe51f4
--- /dev/null
+++ b/drivers/regulator/db8500-prcmu.c
@@ -0,0 +1,558 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
6 * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
7 *
8 * Power domain regulators on DB8500
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/err.h>
14#include <linux/spinlock.h>
15#include <linux/platform_device.h>
16#include <linux/mfd/db8500-prcmu.h>
17#include <linux/regulator/driver.h>
18#include <linux/regulator/machine.h>
19#include <linux/regulator/db8500-prcmu.h>
20
21/*
22 * power state reference count
23 */
24static int power_state_active_cnt; /* will initialize to zero */
25static DEFINE_SPINLOCK(power_state_active_lock);
26
27static void power_state_active_enable(void)
28{
29 unsigned long flags;
30
31 spin_lock_irqsave(&power_state_active_lock, flags);
32 power_state_active_cnt++;
33 spin_unlock_irqrestore(&power_state_active_lock, flags);
34}
35
36static int power_state_active_disable(void)
37{
38 int ret = 0;
39 unsigned long flags;
40
41 spin_lock_irqsave(&power_state_active_lock, flags);
42 if (power_state_active_cnt <= 0) {
43 pr_err("power state: unbalanced enable/disable calls\n");
44 ret = -EINVAL;
45 goto out;
46 }
47
48 power_state_active_cnt--;
49out:
50 spin_unlock_irqrestore(&power_state_active_lock, flags);
51 return ret;
52}
53
54/*
55 * Exported interface for CPUIdle only. This function is called when interrupts
56 * are turned off. Hence, no locking.
57 */
58int power_state_active_is_enabled(void)
59{
60 return (power_state_active_cnt > 0);
61}
62
63/**
64 * struct db8500_regulator_info - db8500 regulator information
65 * @dev: device pointer
66 * @desc: regulator description
67 * @rdev: regulator device pointer
68 * @is_enabled: status of the regulator
69 * @epod_id: id for EPOD (power domain)
70 * @is_ramret: RAM retention switch for EPOD (power domain)
71 * @operating_point: operating point (only for vape, to be removed)
72 *
73 */
74struct db8500_regulator_info {
75 struct device *dev;
76 struct regulator_desc desc;
77 struct regulator_dev *rdev;
78 bool is_enabled;
79 u16 epod_id;
80 bool is_ramret;
81 bool exclude_from_power_state;
82 unsigned int operating_point;
83};
84
85static int db8500_regulator_enable(struct regulator_dev *rdev)
86{
87 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
88
89 if (info == NULL)
90 return -EINVAL;
91
92 dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n",
93 info->desc.name);
94
95 info->is_enabled = true;
96 if (!info->exclude_from_power_state)
97 power_state_active_enable();
98
99 return 0;
100}
101
102static int db8500_regulator_disable(struct regulator_dev *rdev)
103{
104 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
105 int ret = 0;
106
107 if (info == NULL)
108 return -EINVAL;
109
110 dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n",
111 info->desc.name);
112
113 info->is_enabled = false;
114 if (!info->exclude_from_power_state)
115 ret = power_state_active_disable();
116
117 return ret;
118}
119
120static int db8500_regulator_is_enabled(struct regulator_dev *rdev)
121{
122 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
123
124 if (info == NULL)
125 return -EINVAL;
126
127 dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):"
128 " %i\n", info->desc.name, info->is_enabled);
129
130 return info->is_enabled;
131}
132
133/* db8500 regulator operations */
134static struct regulator_ops db8500_regulator_ops = {
135 .enable = db8500_regulator_enable,
136 .disable = db8500_regulator_disable,
137 .is_enabled = db8500_regulator_is_enabled,
138};
139
140/*
141 * EPOD control
142 */
143static bool epod_on[NUM_EPOD_ID];
144static bool epod_ramret[NUM_EPOD_ID];
145
146static int enable_epod(u16 epod_id, bool ramret)
147{
148 int ret;
149
150 if (ramret) {
151 if (!epod_on[epod_id]) {
152 ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
153 if (ret < 0)
154 return ret;
155 }
156 epod_ramret[epod_id] = true;
157 } else {
158 ret = prcmu_set_epod(epod_id, EPOD_STATE_ON);
159 if (ret < 0)
160 return ret;
161 epod_on[epod_id] = true;
162 }
163
164 return 0;
165}
166
167static int disable_epod(u16 epod_id, bool ramret)
168{
169 int ret;
170
171 if (ramret) {
172 if (!epod_on[epod_id]) {
173 ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
174 if (ret < 0)
175 return ret;
176 }
177 epod_ramret[epod_id] = false;
178 } else {
179 if (epod_ramret[epod_id]) {
180 ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
181 if (ret < 0)
182 return ret;
183 } else {
184 ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
185 if (ret < 0)
186 return ret;
187 }
188 epod_on[epod_id] = false;
189 }
190
191 return 0;
192}
193
194/*
195 * Regulator switch
196 */
197static int db8500_regulator_switch_enable(struct regulator_dev *rdev)
198{
199 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
200 int ret;
201
202 if (info == NULL)
203 return -EINVAL;
204
205 dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n",
206 info->desc.name);
207
208 ret = enable_epod(info->epod_id, info->is_ramret);
209 if (ret < 0) {
210 dev_err(rdev_get_dev(rdev),
211 "regulator-switch-%s-enable: prcmu call failed\n",
212 info->desc.name);
213 goto out;
214 }
215
216 info->is_enabled = true;
217out:
218 return ret;
219}
220
221static int db8500_regulator_switch_disable(struct regulator_dev *rdev)
222{
223 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
224 int ret;
225
226 if (info == NULL)
227 return -EINVAL;
228
229 dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n",
230 info->desc.name);
231
232 ret = disable_epod(info->epod_id, info->is_ramret);
233 if (ret < 0) {
234 dev_err(rdev_get_dev(rdev),
235 "regulator_switch-%s-disable: prcmu call failed\n",
236 info->desc.name);
237 goto out;
238 }
239
240 info->is_enabled = 0;
241out:
242 return ret;
243}
244
245static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev)
246{
247 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
248
249 if (info == NULL)
250 return -EINVAL;
251
252 dev_vdbg(rdev_get_dev(rdev),
253 "regulator-switch-%s-is_enabled (is_enabled): %i\n",
254 info->desc.name, info->is_enabled);
255
256 return info->is_enabled;
257}
258
259static struct regulator_ops db8500_regulator_switch_ops = {
260 .enable = db8500_regulator_switch_enable,
261 .disable = db8500_regulator_switch_disable,
262 .is_enabled = db8500_regulator_switch_is_enabled,
263};
264
265/*
266 * Regulator information
267 */
268static struct db8500_regulator_info
269 db8500_regulator_info[DB8500_NUM_REGULATORS] = {
270 [DB8500_REGULATOR_VAPE] = {
271 .desc = {
272 .name = "db8500-vape",
273 .id = DB8500_REGULATOR_VAPE,
274 .ops = &db8500_regulator_ops,
275 .type = REGULATOR_VOLTAGE,
276 .owner = THIS_MODULE,
277 },
278 },
279 [DB8500_REGULATOR_VARM] = {
280 .desc = {
281 .name = "db8500-varm",
282 .id = DB8500_REGULATOR_VARM,
283 .ops = &db8500_regulator_ops,
284 .type = REGULATOR_VOLTAGE,
285 .owner = THIS_MODULE,
286 },
287 },
288 [DB8500_REGULATOR_VMODEM] = {
289 .desc = {
290 .name = "db8500-vmodem",
291 .id = DB8500_REGULATOR_VMODEM,
292 .ops = &db8500_regulator_ops,
293 .type = REGULATOR_VOLTAGE,
294 .owner = THIS_MODULE,
295 },
296 },
297 [DB8500_REGULATOR_VPLL] = {
298 .desc = {
299 .name = "db8500-vpll",
300 .id = DB8500_REGULATOR_VPLL,
301 .ops = &db8500_regulator_ops,
302 .type = REGULATOR_VOLTAGE,
303 .owner = THIS_MODULE,
304 },
305 },
306 [DB8500_REGULATOR_VSMPS1] = {
307 .desc = {
308 .name = "db8500-vsmps1",
309 .id = DB8500_REGULATOR_VSMPS1,
310 .ops = &db8500_regulator_ops,
311 .type = REGULATOR_VOLTAGE,
312 .owner = THIS_MODULE,
313 },
314 },
315 [DB8500_REGULATOR_VSMPS2] = {
316 .desc = {
317 .name = "db8500-vsmps2",
318 .id = DB8500_REGULATOR_VSMPS2,
319 .ops = &db8500_regulator_ops,
320 .type = REGULATOR_VOLTAGE,
321 .owner = THIS_MODULE,
322 },
323 .exclude_from_power_state = true,
324 },
325 [DB8500_REGULATOR_VSMPS3] = {
326 .desc = {
327 .name = "db8500-vsmps3",
328 .id = DB8500_REGULATOR_VSMPS3,
329 .ops = &db8500_regulator_ops,
330 .type = REGULATOR_VOLTAGE,
331 .owner = THIS_MODULE,
332 },
333 },
334 [DB8500_REGULATOR_VRF1] = {
335 .desc = {
336 .name = "db8500-vrf1",
337 .id = DB8500_REGULATOR_VRF1,
338 .ops = &db8500_regulator_ops,
339 .type = REGULATOR_VOLTAGE,
340 .owner = THIS_MODULE,
341 },
342 },
343 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
344 .desc = {
345 .name = "db8500-sva-mmdsp",
346 .id = DB8500_REGULATOR_SWITCH_SVAMMDSP,
347 .ops = &db8500_regulator_switch_ops,
348 .type = REGULATOR_VOLTAGE,
349 .owner = THIS_MODULE,
350 },
351 .epod_id = EPOD_ID_SVAMMDSP,
352 },
353 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
354 .desc = {
355 .name = "db8500-sva-mmdsp-ret",
356 .id = DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
357 .ops = &db8500_regulator_switch_ops,
358 .type = REGULATOR_VOLTAGE,
359 .owner = THIS_MODULE,
360 },
361 .epod_id = EPOD_ID_SVAMMDSP,
362 .is_ramret = true,
363 },
364 [DB8500_REGULATOR_SWITCH_SVAPIPE] = {
365 .desc = {
366 .name = "db8500-sva-pipe",
367 .id = DB8500_REGULATOR_SWITCH_SVAPIPE,
368 .ops = &db8500_regulator_switch_ops,
369 .type = REGULATOR_VOLTAGE,
370 .owner = THIS_MODULE,
371 },
372 .epod_id = EPOD_ID_SVAPIPE,
373 },
374 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
375 .desc = {
376 .name = "db8500-sia-mmdsp",
377 .id = DB8500_REGULATOR_SWITCH_SIAMMDSP,
378 .ops = &db8500_regulator_switch_ops,
379 .type = REGULATOR_VOLTAGE,
380 .owner = THIS_MODULE,
381 },
382 .epod_id = EPOD_ID_SIAMMDSP,
383 },
384 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
385 .desc = {
386 .name = "db8500-sia-mmdsp-ret",
387 .id = DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
388 .ops = &db8500_regulator_switch_ops,
389 .type = REGULATOR_VOLTAGE,
390 .owner = THIS_MODULE,
391 },
392 .epod_id = EPOD_ID_SIAMMDSP,
393 .is_ramret = true,
394 },
395 [DB8500_REGULATOR_SWITCH_SIAPIPE] = {
396 .desc = {
397 .name = "db8500-sia-pipe",
398 .id = DB8500_REGULATOR_SWITCH_SIAPIPE,
399 .ops = &db8500_regulator_switch_ops,
400 .type = REGULATOR_VOLTAGE,
401 .owner = THIS_MODULE,
402 },
403 .epod_id = EPOD_ID_SIAPIPE,
404 },
405 [DB8500_REGULATOR_SWITCH_SGA] = {
406 .desc = {
407 .name = "db8500-sga",
408 .id = DB8500_REGULATOR_SWITCH_SGA,
409 .ops = &db8500_regulator_switch_ops,
410 .type = REGULATOR_VOLTAGE,
411 .owner = THIS_MODULE,
412 },
413 .epod_id = EPOD_ID_SGA,
414 },
415 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
416 .desc = {
417 .name = "db8500-b2r2-mcde",
418 .id = DB8500_REGULATOR_SWITCH_B2R2_MCDE,
419 .ops = &db8500_regulator_switch_ops,
420 .type = REGULATOR_VOLTAGE,
421 .owner = THIS_MODULE,
422 },
423 .epod_id = EPOD_ID_B2R2_MCDE,
424 },
425 [DB8500_REGULATOR_SWITCH_ESRAM12] = {
426 .desc = {
427 .name = "db8500-esram12",
428 .id = DB8500_REGULATOR_SWITCH_ESRAM12,
429 .ops = &db8500_regulator_switch_ops,
430 .type = REGULATOR_VOLTAGE,
431 .owner = THIS_MODULE,
432 },
433 .epod_id = EPOD_ID_ESRAM12,
434 .is_enabled = true,
435 },
436 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
437 .desc = {
438 .name = "db8500-esram12-ret",
439 .id = DB8500_REGULATOR_SWITCH_ESRAM12RET,
440 .ops = &db8500_regulator_switch_ops,
441 .type = REGULATOR_VOLTAGE,
442 .owner = THIS_MODULE,
443 },
444 .epod_id = EPOD_ID_ESRAM12,
445 .is_ramret = true,
446 },
447 [DB8500_REGULATOR_SWITCH_ESRAM34] = {
448 .desc = {
449 .name = "db8500-esram34",
450 .id = DB8500_REGULATOR_SWITCH_ESRAM34,
451 .ops = &db8500_regulator_switch_ops,
452 .type = REGULATOR_VOLTAGE,
453 .owner = THIS_MODULE,
454 },
455 .epod_id = EPOD_ID_ESRAM34,
456 .is_enabled = true,
457 },
458 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
459 .desc = {
460 .name = "db8500-esram34-ret",
461 .id = DB8500_REGULATOR_SWITCH_ESRAM34RET,
462 .ops = &db8500_regulator_switch_ops,
463 .type = REGULATOR_VOLTAGE,
464 .owner = THIS_MODULE,
465 },
466 .epod_id = EPOD_ID_ESRAM34,
467 .is_ramret = true,
468 },
469};
470
471static int __devinit db8500_regulator_probe(struct platform_device *pdev)
472{
473 struct regulator_init_data *db8500_init_data =
474 dev_get_platdata(&pdev->dev);
475 int i, err;
476
477 /* register all regulators */
478 for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
479 struct db8500_regulator_info *info;
480 struct regulator_init_data *init_data = &db8500_init_data[i];
481
482 /* assign per-regulator data */
483 info = &db8500_regulator_info[i];
484 info->dev = &pdev->dev;
485
486 /* register with the regulator framework */
487 info->rdev = regulator_register(&info->desc, &pdev->dev,
488 init_data, info);
489 if (IS_ERR(info->rdev)) {
490 err = PTR_ERR(info->rdev);
491 dev_err(&pdev->dev, "failed to register %s: err %i\n",
492 info->desc.name, err);
493
494 /* if failing, unregister all earlier regulators */
495 i--;
496 while (i >= 0) {
497 info = &db8500_regulator_info[i];
498 regulator_unregister(info->rdev);
499 i--;
500 }
501 return err;
502 }
503
504 dev_dbg(rdev_get_dev(info->rdev),
505 "regulator-%s-probed\n", info->desc.name);
506 }
507
508 return 0;
509}
510
511static int __exit db8500_regulator_remove(struct platform_device *pdev)
512{
513 int i;
514
515 for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
516 struct db8500_regulator_info *info;
517 info = &db8500_regulator_info[i];
518
519 dev_vdbg(rdev_get_dev(info->rdev),
520 "regulator-%s-remove\n", info->desc.name);
521
522 regulator_unregister(info->rdev);
523 }
524
525 return 0;
526}
527
528static struct platform_driver db8500_regulator_driver = {
529 .driver = {
530 .name = "db8500-prcmu-regulators",
531 .owner = THIS_MODULE,
532 },
533 .probe = db8500_regulator_probe,
534 .remove = __exit_p(db8500_regulator_remove),
535};
536
537static int __init db8500_regulator_init(void)
538{
539 int ret;
540
541 ret = platform_driver_register(&db8500_regulator_driver);
542 if (ret < 0)
543 return -ENODEV;
544
545 return 0;
546}
547
548static void __exit db8500_regulator_exit(void)
549{
550 platform_driver_unregister(&db8500_regulator_driver);
551}
552
553arch_initcall(db8500_regulator_init);
554module_exit(db8500_regulator_exit);
555
556MODULE_AUTHOR("STMicroelectronics/ST-Ericsson");
557MODULE_DESCRIPTION("DB8500 regulator driver");
558MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 8ae147549c6a..e4dbd667c043 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -23,6 +23,10 @@
23#define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */ 23#define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */
24#define SD1_DVM_EN 6 /* SDV1 bit 6 */ 24#define SD1_DVM_EN 6 /* SDV1 bit 6 */
25 25
26/* bit definitions in SD & LDO control registers */
27#define OUT_ENABLE 0x1f /* Power U/D sequence as I2C */
28#define OUT_DISABLE 0x1e /* Power U/D sequence as I2C */
29
26struct max8925_regulator_info { 30struct max8925_regulator_info {
27 struct regulator_desc desc; 31 struct regulator_desc desc;
28 struct regulator_dev *regulator; 32 struct regulator_dev *regulator;
@@ -93,8 +97,8 @@ static int max8925_enable(struct regulator_dev *rdev)
93 struct max8925_regulator_info *info = rdev_get_drvdata(rdev); 97 struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
94 98
95 return max8925_set_bits(info->i2c, info->enable_reg, 99 return max8925_set_bits(info->i2c, info->enable_reg,
96 1 << info->enable_bit, 100 OUT_ENABLE << info->enable_bit,
97 1 << info->enable_bit); 101 OUT_ENABLE << info->enable_bit);
98} 102}
99 103
100static int max8925_disable(struct regulator_dev *rdev) 104static int max8925_disable(struct regulator_dev *rdev)
@@ -102,7 +106,8 @@ static int max8925_disable(struct regulator_dev *rdev)
102 struct max8925_regulator_info *info = rdev_get_drvdata(rdev); 106 struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
103 107
104 return max8925_set_bits(info->i2c, info->enable_reg, 108 return max8925_set_bits(info->i2c, info->enable_reg,
105 1 << info->enable_bit, 0); 109 OUT_ENABLE << info->enable_bit,
110 OUT_DISABLE << info->enable_bit);
106} 111}
107 112
108static int max8925_is_enabled(struct regulator_dev *rdev) 113static int max8925_is_enabled(struct regulator_dev *rdev)
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 77e0cfb30b23..10d5a1d9768e 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -267,7 +267,6 @@ static int max8997_get_enable_register(struct regulator_dev *rdev,
267 default: 267 default:
268 /* Not controllable or not exists */ 268 /* Not controllable or not exists */
269 return -EINVAL; 269 return -EINVAL;
270 break;
271 } 270 }
272 271
273 return 0; 272 return 0;
@@ -1033,11 +1032,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
1033 1032
1034 /* For the safety, set max voltage before setting up */ 1033 /* For the safety, set max voltage before setting up */
1035 for (i = 0; i < 8; i++) { 1034 for (i = 0; i < 8; i++) {
1036 max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1), 1035 max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
1037 max_buck1, 0x3f); 1036 max_buck1, 0x3f);
1038 max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1), 1037 max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
1039 max_buck2, 0x3f); 1038 max_buck2, 0x3f);
1040 max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1), 1039 max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
1041 max_buck5, 0x3f); 1040 max_buck5, 0x3f);
1042 } 1041 }
1043 1042
@@ -1114,13 +1113,13 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
1114 1113
1115 /* Initialize all the DVS related BUCK registers */ 1114 /* Initialize all the DVS related BUCK registers */
1116 for (i = 0; i < 8; i++) { 1115 for (i = 0; i < 8; i++) {
1117 max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1), 1116 max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
1118 max8997->buck1_vol[i], 1117 max8997->buck1_vol[i],
1119 0x3f); 1118 0x3f);
1120 max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1), 1119 max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
1121 max8997->buck2_vol[i], 1120 max8997->buck2_vol[i],
1122 0x3f); 1121 0x3f);
1123 max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1), 1122 max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
1124 max8997->buck5_vol[i], 1123 max8997->buck5_vol[i],
1125 0x3f); 1124 0x3f);
1126 } 1125 }
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index f57e9c42fdb4..41a1495eec2b 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -732,13 +732,15 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
732 if (!pdata->buck1_set1) { 732 if (!pdata->buck1_set1) {
733 printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n"); 733 printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n");
734 WARN_ON(!pdata->buck1_set1); 734 WARN_ON(!pdata->buck1_set1);
735 return -EIO; 735 ret = -EIO;
736 goto err_free_mem;
736 } 737 }
737 /* Check if SET2 is not equal to 0 */ 738 /* Check if SET2 is not equal to 0 */
738 if (!pdata->buck1_set2) { 739 if (!pdata->buck1_set2) {
739 printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n"); 740 printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n");
740 WARN_ON(!pdata->buck1_set2); 741 WARN_ON(!pdata->buck1_set2);
741 return -EIO; 742 ret = -EIO;
743 goto err_free_mem;
742 } 744 }
743 745
744 gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1"); 746 gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
@@ -758,7 +760,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
758 max8998->buck1_vol[0] = i; 760 max8998->buck1_vol[0] = i;
759 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i); 761 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
760 if (ret) 762 if (ret)
761 return ret; 763 goto err_free_mem;
762 764
763 /* Set predefined value for BUCK1 register 2 */ 765 /* Set predefined value for BUCK1 register 2 */
764 i = 0; 766 i = 0;
@@ -770,7 +772,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
770 max8998->buck1_vol[1] = i; 772 max8998->buck1_vol[1] = i;
771 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i); 773 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
772 if (ret) 774 if (ret)
773 return ret; 775 goto err_free_mem;
774 776
775 /* Set predefined value for BUCK1 register 3 */ 777 /* Set predefined value for BUCK1 register 3 */
776 i = 0; 778 i = 0;
@@ -782,7 +784,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
782 max8998->buck1_vol[2] = i; 784 max8998->buck1_vol[2] = i;
783 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i); 785 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
784 if (ret) 786 if (ret)
785 return ret; 787 goto err_free_mem;
786 788
787 /* Set predefined value for BUCK1 register 4 */ 789 /* Set predefined value for BUCK1 register 4 */
788 i = 0; 790 i = 0;
@@ -794,7 +796,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
794 max8998->buck1_vol[3] = i; 796 max8998->buck1_vol[3] = i;
795 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i); 797 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
796 if (ret) 798 if (ret)
797 return ret; 799 goto err_free_mem;
798 800
799 } 801 }
800 802
@@ -803,7 +805,8 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
803 if (!pdata->buck2_set3) { 805 if (!pdata->buck2_set3) {
804 printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n"); 806 printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n");
805 WARN_ON(!pdata->buck2_set3); 807 WARN_ON(!pdata->buck2_set3);
806 return -EIO; 808 ret = -EIO;
809 goto err_free_mem;
807 } 810 }
808 gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3"); 811 gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
809 gpio_direction_output(pdata->buck2_set3, 812 gpio_direction_output(pdata->buck2_set3,
@@ -818,7 +821,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
818 max8998->buck2_vol[0] = i; 821 max8998->buck2_vol[0] = i;
819 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i); 822 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
820 if (ret) 823 if (ret)
821 return ret; 824 goto err_free_mem;
822 825
823 /* BUCK2 register 2 */ 826 /* BUCK2 register 2 */
824 i = 0; 827 i = 0;
@@ -830,7 +833,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
830 max8998->buck2_vol[1] = i; 833 max8998->buck2_vol[1] = i;
831 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i); 834 ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
832 if (ret) 835 if (ret)
833 return ret; 836 goto err_free_mem;
834 } 837 }
835 838
836 for (i = 0; i < pdata->num_regulators; i++) { 839 for (i = 0; i < pdata->num_regulators; i++) {
@@ -860,6 +863,7 @@ err:
860 if (rdev[i]) 863 if (rdev[i])
861 regulator_unregister(rdev[i]); 864 regulator_unregister(rdev[i]);
862 865
866err_free_mem:
863 kfree(max8998->rdev); 867 kfree(max8998->rdev);
864 kfree(max8998); 868 kfree(max8998);
865 869
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index b8a00c7fa441..730f43ad415b 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -15,7 +15,6 @@
15#include <linux/regulator/driver.h> 15#include <linux/regulator/driver.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mfd/core.h>
19#include <linux/slab.h> 18#include <linux/slab.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/err.h> 20#include <linux/err.h>
@@ -337,7 +336,8 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
337{ 336{
338 struct mc13xxx_regulator_priv *priv; 337 struct mc13xxx_regulator_priv *priv;
339 struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent); 338 struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
340 struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev); 339 struct mc13783_regulator_platform_data *pdata =
340 dev_get_platdata(&pdev->dev);
341 struct mc13783_regulator_init_data *init_data; 341 struct mc13783_regulator_init_data *init_data;
342 int i, ret; 342 int i, ret;
343 343
@@ -381,7 +381,8 @@ err:
381static int __devexit mc13783_regulator_remove(struct platform_device *pdev) 381static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
382{ 382{
383 struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); 383 struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
384 struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev); 384 struct mc13783_regulator_platform_data *pdata =
385 dev_get_platdata(&pdev->dev);
385 int i; 386 int i;
386 387
387 platform_set_drvdata(pdev, NULL); 388 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 6f15168e5ed4..3285d41842f2 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -15,7 +15,6 @@
15#include <linux/regulator/driver.h> 15#include <linux/regulator/driver.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mfd/core.h>
19#include <linux/slab.h> 18#include <linux/slab.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/err.h> 20#include <linux/err.h>
@@ -432,7 +431,8 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
432 int min_uV, int max_uV, unsigned *selector) 431 int min_uV, int max_uV, unsigned *selector)
433{ 432{
434 struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); 433 struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
435 int hi, value, val, mask, id = rdev_get_id(rdev); 434 int hi, value, mask, id = rdev_get_id(rdev);
435 u32 valread;
436 int ret; 436 int ret;
437 437
438 dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", 438 dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
@@ -448,15 +448,16 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
448 448
449 mc13xxx_lock(priv->mc13xxx); 449 mc13xxx_lock(priv->mc13xxx);
450 ret = mc13xxx_reg_read(priv->mc13xxx, 450 ret = mc13xxx_reg_read(priv->mc13xxx,
451 mc13892_regulators[id].vsel_reg, &val); 451 mc13892_regulators[id].vsel_reg, &valread);
452 if (ret) 452 if (ret)
453 goto err; 453 goto err;
454 454
455 hi = val & MC13892_SWITCHERS0_SWxHI; 455 if (value > 1375000)
456 if (value > 1375)
457 hi = 1; 456 hi = 1;
458 if (value < 1100) 457 else if (value < 1100000)
459 hi = 0; 458 hi = 0;
459 else
460 hi = valread & MC13892_SWITCHERS0_SWxHI;
460 461
461 if (hi) { 462 if (hi) {
462 value = (value - 1100000) / 25000; 463 value = (value - 1100000) / 25000;
@@ -465,8 +466,10 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
465 value = (value - 600000) / 25000; 466 value = (value - 600000) / 25000;
466 467
467 mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI; 468 mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
468 ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, 469 valread = (valread & ~mask) |
469 mask, value << mc13892_regulators[id].vsel_shift); 470 (value << mc13892_regulators[id].vsel_shift);
471 ret = mc13xxx_reg_write(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
472 valread);
470err: 473err:
471 mc13xxx_unlock(priv->mc13xxx); 474 mc13xxx_unlock(priv->mc13xxx);
472 475
@@ -521,7 +524,8 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
521{ 524{
522 struct mc13xxx_regulator_priv *priv; 525 struct mc13xxx_regulator_priv *priv;
523 struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent); 526 struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
524 struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev); 527 struct mc13xxx_regulator_platform_data *pdata =
528 dev_get_platdata(&pdev->dev);
525 struct mc13xxx_regulator_init_data *init_data; 529 struct mc13xxx_regulator_init_data *init_data;
526 int i, ret; 530 int i, ret;
527 u32 val; 531 u32 val;
@@ -595,7 +599,8 @@ err_free:
595static int __devexit mc13892_regulator_remove(struct platform_device *pdev) 599static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
596{ 600{
597 struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); 601 struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
598 struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev); 602 struct mc13xxx_regulator_platform_data *pdata =
603 dev_get_platdata(&pdev->dev);
599 int i; 604 int i;
600 605
601 platform_set_drvdata(pdev, NULL); 606 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 2bb5de1f2421..bc27ab136378 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
174 174
175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); 175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
176 176
177 BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages); 177 BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
178 178
179 return mc13xxx_regulators[id].voltages[val]; 179 return mc13xxx_regulators[id].voltages[val];
180} 180}
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index 1661499feda4..1011873896dc 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -137,7 +137,7 @@ static struct regulator_desc tps6105x_regulator_desc = {
137 */ 137 */
138static int __devinit tps6105x_regulator_probe(struct platform_device *pdev) 138static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
139{ 139{
140 struct tps6105x *tps6105x = mfd_get_data(pdev); 140 struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
141 struct tps6105x_platform_data *pdata = tps6105x->pdata; 141 struct tps6105x_platform_data *pdata = tps6105x->pdata;
142 int ret; 142 int ret;
143 143
@@ -158,13 +158,14 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
158 "failed to register regulator\n"); 158 "failed to register regulator\n");
159 return ret; 159 return ret;
160 } 160 }
161 platform_set_drvdata(pdev, tps6105x);
161 162
162 return 0; 163 return 0;
163} 164}
164 165
165static int __devexit tps6105x_regulator_remove(struct platform_device *pdev) 166static int __devexit tps6105x_regulator_remove(struct platform_device *pdev)
166{ 167{
167 struct tps6105x *tps6105x = platform_get_drvdata(pdev); 168 struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
168 regulator_unregister(tps6105x->regulator); 169 regulator_unregister(tps6105x->regulator);
169 return 0; 170 return 0;
170} 171}
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 60a7ca5409e9..fbddc15e1811 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -466,7 +466,6 @@ static struct regulator_ops tps65023_ldo_ops = {
466static int __devinit tps_65023_probe(struct i2c_client *client, 466static int __devinit tps_65023_probe(struct i2c_client *client,
467 const struct i2c_device_id *id) 467 const struct i2c_device_id *id)
468{ 468{
469 static int desc_id;
470 const struct tps_info *info = (void *)id->driver_data; 469 const struct tps_info *info = (void *)id->driver_data;
471 struct regulator_init_data *init_data; 470 struct regulator_init_data *init_data;
472 struct regulator_dev *rdev; 471 struct regulator_dev *rdev;
@@ -499,7 +498,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
499 tps->info[i] = info; 498 tps->info[i] = info;
500 499
501 tps->desc[i].name = info->name; 500 tps->desc[i].name = info->name;
502 tps->desc[i].id = desc_id++; 501 tps->desc[i].id = i;
503 tps->desc[i].n_voltages = num_voltages[i]; 502 tps->desc[i].n_voltages = num_voltages[i];
504 tps->desc[i].ops = (i > TPS65023_DCDC_3 ? 503 tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
505 &tps65023_ldo_ops : &tps65023_dcdc_ops); 504 &tps65023_ldo_ops : &tps65023_dcdc_ops);
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 064755290599..bfffabc21eda 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -553,7 +553,6 @@ static __devinit
553int tps6507x_pmic_probe(struct platform_device *pdev) 553int tps6507x_pmic_probe(struct platform_device *pdev)
554{ 554{
555 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); 555 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
556 static int desc_id;
557 struct tps_info *info = &tps6507x_pmic_regs[0]; 556 struct tps_info *info = &tps6507x_pmic_regs[0];
558 struct regulator_init_data *init_data; 557 struct regulator_init_data *init_data;
559 struct regulator_dev *rdev; 558 struct regulator_dev *rdev;
@@ -598,7 +597,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
598 } 597 }
599 598
600 tps->desc[i].name = info->name; 599 tps->desc[i].name = info->name;
601 tps->desc[i].id = desc_id++; 600 tps->desc[i].id = i;
602 tps->desc[i].n_voltages = num_voltages[i]; 601 tps->desc[i].n_voltages = num_voltages[i];
603 tps->desc[i].ops = (i > TPS6507X_DCDC_3 ? 602 tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
604 &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops); 603 &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
new file mode 100644
index 000000000000..55dd4e6650db
--- /dev/null
+++ b/drivers/regulator/tps65910-regulator.c
@@ -0,0 +1,993 @@
1/*
2 * tps65910.c -- TI tps65910
3 *
4 * Copyright 2010 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/err.h>
20#include <linux/platform_device.h>
21#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h>
23#include <linux/delay.h>
24#include <linux/slab.h>
25#include <linux/gpio.h>
26#include <linux/mfd/tps65910.h>
27
28#define TPS65910_REG_VRTC 0
29#define TPS65910_REG_VIO 1
30#define TPS65910_REG_VDD1 2
31#define TPS65910_REG_VDD2 3
32#define TPS65910_REG_VDD3 4
33#define TPS65910_REG_VDIG1 5
34#define TPS65910_REG_VDIG2 6
35#define TPS65910_REG_VPLL 7
36#define TPS65910_REG_VDAC 8
37#define TPS65910_REG_VAUX1 9
38#define TPS65910_REG_VAUX2 10
39#define TPS65910_REG_VAUX33 11
40#define TPS65910_REG_VMMC 12
41
42#define TPS65911_REG_VDDCTRL 4
43#define TPS65911_REG_LDO1 5
44#define TPS65911_REG_LDO2 6
45#define TPS65911_REG_LDO3 7
46#define TPS65911_REG_LDO4 8
47#define TPS65911_REG_LDO5 9
48#define TPS65911_REG_LDO6 10
49#define TPS65911_REG_LDO7 11
50#define TPS65911_REG_LDO8 12
51
52#define TPS65910_NUM_REGULATOR 13
53#define TPS65910_SUPPLY_STATE_ENABLED 0x1
54
55/* supported VIO voltages in milivolts */
56static const u16 VIO_VSEL_table[] = {
57 1500, 1800, 2500, 3300,
58};
59
60/* VSEL tables for TPS65910 specific LDOs and dcdc's */
61
62/* supported VDD3 voltages in milivolts */
63static const u16 VDD3_VSEL_table[] = {
64 5000,
65};
66
67/* supported VDIG1 voltages in milivolts */
68static const u16 VDIG1_VSEL_table[] = {
69 1200, 1500, 1800, 2700,
70};
71
72/* supported VDIG2 voltages in milivolts */
73static const u16 VDIG2_VSEL_table[] = {
74 1000, 1100, 1200, 1800,
75};
76
77/* supported VPLL voltages in milivolts */
78static const u16 VPLL_VSEL_table[] = {
79 1000, 1100, 1800, 2500,
80};
81
82/* supported VDAC voltages in milivolts */
83static const u16 VDAC_VSEL_table[] = {
84 1800, 2600, 2800, 2850,
85};
86
87/* supported VAUX1 voltages in milivolts */
88static const u16 VAUX1_VSEL_table[] = {
89 1800, 2500, 2800, 2850,
90};
91
92/* supported VAUX2 voltages in milivolts */
93static const u16 VAUX2_VSEL_table[] = {
94 1800, 2800, 2900, 3300,
95};
96
97/* supported VAUX33 voltages in milivolts */
98static const u16 VAUX33_VSEL_table[] = {
99 1800, 2000, 2800, 3300,
100};
101
102/* supported VMMC voltages in milivolts */
103static const u16 VMMC_VSEL_table[] = {
104 1800, 2800, 3000, 3300,
105};
106
107struct tps_info {
108 const char *name;
109 unsigned min_uV;
110 unsigned max_uV;
111 u8 table_len;
112 const u16 *table;
113};
114
115static struct tps_info tps65910_regs[] = {
116 {
117 .name = "VRTC",
118 },
119 {
120 .name = "VIO",
121 .min_uV = 1500000,
122 .max_uV = 3300000,
123 .table_len = ARRAY_SIZE(VIO_VSEL_table),
124 .table = VIO_VSEL_table,
125 },
126 {
127 .name = "VDD1",
128 .min_uV = 600000,
129 .max_uV = 4500000,
130 },
131 {
132 .name = "VDD2",
133 .min_uV = 600000,
134 .max_uV = 4500000,
135 },
136 {
137 .name = "VDD3",
138 .min_uV = 5000000,
139 .max_uV = 5000000,
140 .table_len = ARRAY_SIZE(VDD3_VSEL_table),
141 .table = VDD3_VSEL_table,
142 },
143 {
144 .name = "VDIG1",
145 .min_uV = 1200000,
146 .max_uV = 2700000,
147 .table_len = ARRAY_SIZE(VDIG1_VSEL_table),
148 .table = VDIG1_VSEL_table,
149 },
150 {
151 .name = "VDIG2",
152 .min_uV = 1000000,
153 .max_uV = 1800000,
154 .table_len = ARRAY_SIZE(VDIG2_VSEL_table),
155 .table = VDIG2_VSEL_table,
156 },
157 {
158 .name = "VPLL",
159 .min_uV = 1000000,
160 .max_uV = 2500000,
161 .table_len = ARRAY_SIZE(VPLL_VSEL_table),
162 .table = VPLL_VSEL_table,
163 },
164 {
165 .name = "VDAC",
166 .min_uV = 1800000,
167 .max_uV = 2850000,
168 .table_len = ARRAY_SIZE(VDAC_VSEL_table),
169 .table = VDAC_VSEL_table,
170 },
171 {
172 .name = "VAUX1",
173 .min_uV = 1800000,
174 .max_uV = 2850000,
175 .table_len = ARRAY_SIZE(VAUX1_VSEL_table),
176 .table = VAUX1_VSEL_table,
177 },
178 {
179 .name = "VAUX2",
180 .min_uV = 1800000,
181 .max_uV = 3300000,
182 .table_len = ARRAY_SIZE(VAUX2_VSEL_table),
183 .table = VAUX2_VSEL_table,
184 },
185 {
186 .name = "VAUX33",
187 .min_uV = 1800000,
188 .max_uV = 3300000,
189 .table_len = ARRAY_SIZE(VAUX33_VSEL_table),
190 .table = VAUX33_VSEL_table,
191 },
192 {
193 .name = "VMMC",
194 .min_uV = 1800000,
195 .max_uV = 3300000,
196 .table_len = ARRAY_SIZE(VMMC_VSEL_table),
197 .table = VMMC_VSEL_table,
198 },
199};
200
201static struct tps_info tps65911_regs[] = {
202 {
203 .name = "VIO",
204 .min_uV = 1500000,
205 .max_uV = 3300000,
206 .table_len = ARRAY_SIZE(VIO_VSEL_table),
207 .table = VIO_VSEL_table,
208 },
209 {
210 .name = "VDD1",
211 .min_uV = 600000,
212 .max_uV = 4500000,
213 },
214 {
215 .name = "VDD2",
216 .min_uV = 600000,
217 .max_uV = 4500000,
218 },
219 {
220 .name = "VDDCTRL",
221 .min_uV = 600000,
222 .max_uV = 1400000,
223 },
224 {
225 .name = "LDO1",
226 .min_uV = 1000000,
227 .max_uV = 3300000,
228 },
229 {
230 .name = "LDO2",
231 .min_uV = 1000000,
232 .max_uV = 3300000,
233 },
234 {
235 .name = "LDO3",
236 .min_uV = 1000000,
237 .max_uV = 3300000,
238 },
239 {
240 .name = "LDO4",
241 .min_uV = 1000000,
242 .max_uV = 3300000,
243 },
244 {
245 .name = "LDO5",
246 .min_uV = 1000000,
247 .max_uV = 3300000,
248 },
249 {
250 .name = "LDO6",
251 .min_uV = 1000000,
252 .max_uV = 3300000,
253 },
254 {
255 .name = "LDO7",
256 .min_uV = 1000000,
257 .max_uV = 3300000,
258 },
259 {
260 .name = "LDO8",
261 .min_uV = 1000000,
262 .max_uV = 3300000,
263 },
264};
265
266struct tps65910_reg {
267 struct regulator_desc desc[TPS65910_NUM_REGULATOR];
268 struct tps65910 *mfd;
269 struct regulator_dev *rdev[TPS65910_NUM_REGULATOR];
270 struct tps_info *info[TPS65910_NUM_REGULATOR];
271 struct mutex mutex;
272 int mode;
273 int (*get_ctrl_reg)(int);
274};
275
276static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
277{
278 u8 val;
279 int err;
280
281 err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
282 if (err)
283 return err;
284
285 return val;
286}
287
288static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
289{
290 return pmic->mfd->write(pmic->mfd, reg, 1, &val);
291}
292
293static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
294 u8 set_mask, u8 clear_mask)
295{
296 int err, data;
297
298 mutex_lock(&pmic->mutex);
299
300 data = tps65910_read(pmic, reg);
301 if (data < 0) {
302 dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
303 err = data;
304 goto out;
305 }
306
307 data &= ~clear_mask;
308 data |= set_mask;
309 err = tps65910_write(pmic, reg, data);
310 if (err)
311 dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
312
313out:
314 mutex_unlock(&pmic->mutex);
315 return err;
316}
317
318static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
319{
320 int data;
321
322 mutex_lock(&pmic->mutex);
323
324 data = tps65910_read(pmic, reg);
325 if (data < 0)
326 dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
327
328 mutex_unlock(&pmic->mutex);
329 return data;
330}
331
332static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
333{
334 int err;
335
336 mutex_lock(&pmic->mutex);
337
338 err = tps65910_write(pmic, reg, val);
339 if (err < 0)
340 dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
341
342 mutex_unlock(&pmic->mutex);
343 return err;
344}
345
346static int tps65910_get_ctrl_register(int id)
347{
348 switch (id) {
349 case TPS65910_REG_VRTC:
350 return TPS65910_VRTC;
351 case TPS65910_REG_VIO:
352 return TPS65910_VIO;
353 case TPS65910_REG_VDD1:
354 return TPS65910_VDD1;
355 case TPS65910_REG_VDD2:
356 return TPS65910_VDD2;
357 case TPS65910_REG_VDD3:
358 return TPS65910_VDD3;
359 case TPS65910_REG_VDIG1:
360 return TPS65910_VDIG1;
361 case TPS65910_REG_VDIG2:
362 return TPS65910_VDIG2;
363 case TPS65910_REG_VPLL:
364 return TPS65910_VPLL;
365 case TPS65910_REG_VDAC:
366 return TPS65910_VDAC;
367 case TPS65910_REG_VAUX1:
368 return TPS65910_VAUX1;
369 case TPS65910_REG_VAUX2:
370 return TPS65910_VAUX2;
371 case TPS65910_REG_VAUX33:
372 return TPS65910_VAUX33;
373 case TPS65910_REG_VMMC:
374 return TPS65910_VMMC;
375 default:
376 return -EINVAL;
377 }
378}
379
380static int tps65911_get_ctrl_register(int id)
381{
382 switch (id) {
383 case TPS65910_REG_VRTC:
384 return TPS65910_VRTC;
385 case TPS65910_REG_VIO:
386 return TPS65910_VIO;
387 case TPS65910_REG_VDD1:
388 return TPS65910_VDD1;
389 case TPS65910_REG_VDD2:
390 return TPS65910_VDD2;
391 case TPS65911_REG_VDDCTRL:
392 return TPS65911_VDDCTRL;
393 case TPS65911_REG_LDO1:
394 return TPS65911_LDO1;
395 case TPS65911_REG_LDO2:
396 return TPS65911_LDO2;
397 case TPS65911_REG_LDO3:
398 return TPS65911_LDO3;
399 case TPS65911_REG_LDO4:
400 return TPS65911_LDO4;
401 case TPS65911_REG_LDO5:
402 return TPS65911_LDO5;
403 case TPS65911_REG_LDO6:
404 return TPS65911_LDO6;
405 case TPS65911_REG_LDO7:
406 return TPS65911_LDO7;
407 case TPS65911_REG_LDO8:
408 return TPS65911_LDO8;
409 default:
410 return -EINVAL;
411 }
412}
413
414static int tps65910_is_enabled(struct regulator_dev *dev)
415{
416 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
417 int reg, value, id = rdev_get_id(dev);
418
419 reg = pmic->get_ctrl_reg(id);
420 if (reg < 0)
421 return reg;
422
423 value = tps65910_reg_read(pmic, reg);
424 if (value < 0)
425 return value;
426
427 return value & TPS65910_SUPPLY_STATE_ENABLED;
428}
429
430static int tps65910_enable(struct regulator_dev *dev)
431{
432 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
433 struct tps65910 *mfd = pmic->mfd;
434 int reg, id = rdev_get_id(dev);
435
436 reg = pmic->get_ctrl_reg(id);
437 if (reg < 0)
438 return reg;
439
440 return tps65910_set_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
441}
442
443static int tps65910_disable(struct regulator_dev *dev)
444{
445 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
446 struct tps65910 *mfd = pmic->mfd;
447 int reg, id = rdev_get_id(dev);
448
449 reg = pmic->get_ctrl_reg(id);
450 if (reg < 0)
451 return reg;
452
453 return tps65910_clear_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
454}
455
456
457static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
458{
459 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
460 struct tps65910 *mfd = pmic->mfd;
461 int reg, value, id = rdev_get_id(dev);
462
463 reg = pmic->get_ctrl_reg(id);
464 if (reg < 0)
465 return reg;
466
467 switch (mode) {
468 case REGULATOR_MODE_NORMAL:
469 return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT,
470 LDO_ST_MODE_BIT);
471 case REGULATOR_MODE_IDLE:
472 value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
473 return tps65910_set_bits(mfd, reg, value);
474 case REGULATOR_MODE_STANDBY:
475 return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
476 }
477
478 return -EINVAL;
479}
480
481static unsigned int tps65910_get_mode(struct regulator_dev *dev)
482{
483 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
484 int reg, value, id = rdev_get_id(dev);
485
486 reg = pmic->get_ctrl_reg(id);
487 if (reg < 0)
488 return reg;
489
490 value = tps65910_reg_read(pmic, reg);
491 if (value < 0)
492 return value;
493
494 if (value & LDO_ST_ON_BIT)
495 return REGULATOR_MODE_STANDBY;
496 else if (value & LDO_ST_MODE_BIT)
497 return REGULATOR_MODE_IDLE;
498 else
499 return REGULATOR_MODE_NORMAL;
500}
501
502static int tps65910_get_voltage_dcdc(struct regulator_dev *dev)
503{
504 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
505 int id = rdev_get_id(dev), voltage = 0;
506 int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
507
508 switch (id) {
509 case TPS65910_REG_VDD1:
510 opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
511 mult = tps65910_reg_read(pmic, TPS65910_VDD1);
512 mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
513 srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
514 sr = opvsel & VDD1_OP_CMD_MASK;
515 opvsel &= VDD1_OP_SEL_MASK;
516 srvsel &= VDD1_SR_SEL_MASK;
517 vselmax = 75;
518 break;
519 case TPS65910_REG_VDD2:
520 opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
521 mult = tps65910_reg_read(pmic, TPS65910_VDD2);
522 mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
523 srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
524 sr = opvsel & VDD2_OP_CMD_MASK;
525 opvsel &= VDD2_OP_SEL_MASK;
526 srvsel &= VDD2_SR_SEL_MASK;
527 vselmax = 75;
528 break;
529 case TPS65911_REG_VDDCTRL:
530 opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
531 srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
532 sr = opvsel & VDDCTRL_OP_CMD_MASK;
533 opvsel &= VDDCTRL_OP_SEL_MASK;
534 srvsel &= VDDCTRL_SR_SEL_MASK;
535 vselmax = 64;
536 break;
537 }
538
539 /* multiplier 0 == 1 but 2,3 normal */
540 if (!mult)
541 mult=1;
542
543 if (sr) {
544 /* normalise to valid range */
545 if (srvsel < 3)
546 srvsel = 3;
547 if (srvsel > vselmax)
548 srvsel = vselmax;
549 srvsel -= 3;
550
551 voltage = (srvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
552 } else {
553
554 /* normalise to valid range*/
555 if (opvsel < 3)
556 opvsel = 3;
557 if (opvsel > vselmax)
558 opvsel = vselmax;
559 opvsel -= 3;
560
561 voltage = (opvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
562 }
563
564 voltage *= mult;
565
566 return voltage;
567}
568
569static int tps65910_get_voltage(struct regulator_dev *dev)
570{
571 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
572 int reg, value, id = rdev_get_id(dev), voltage = 0;
573
574 reg = pmic->get_ctrl_reg(id);
575 if (reg < 0)
576 return reg;
577
578 value = tps65910_reg_read(pmic, reg);
579 if (value < 0)
580 return value;
581
582 switch (id) {
583 case TPS65910_REG_VIO:
584 case TPS65910_REG_VDIG1:
585 case TPS65910_REG_VDIG2:
586 case TPS65910_REG_VPLL:
587 case TPS65910_REG_VDAC:
588 case TPS65910_REG_VAUX1:
589 case TPS65910_REG_VAUX2:
590 case TPS65910_REG_VAUX33:
591 case TPS65910_REG_VMMC:
592 value &= LDO_SEL_MASK;
593 value >>= LDO_SEL_SHIFT;
594 break;
595 default:
596 return -EINVAL;
597 }
598
599 voltage = pmic->info[id]->table[value] * 1000;
600
601 return voltage;
602}
603
604static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
605{
606 return 5 * 1000 * 1000;
607}
608
609static int tps65911_get_voltage(struct regulator_dev *dev)
610{
611 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
612 int step_mv, id = rdev_get_id(dev);
613 u8 value, reg;
614
615 reg = pmic->get_ctrl_reg(id);
616
617 value = tps65910_reg_read(pmic, reg);
618
619 switch (id) {
620 case TPS65911_REG_LDO1:
621 case TPS65911_REG_LDO2:
622 case TPS65911_REG_LDO4:
623 value &= LDO1_SEL_MASK;
624 value >>= LDO_SEL_SHIFT;
625 /* The first 5 values of the selector correspond to 1V */
626 if (value < 5)
627 value = 0;
628 else
629 value -= 4;
630
631 step_mv = 50;
632 break;
633 case TPS65911_REG_LDO3:
634 case TPS65911_REG_LDO5:
635 case TPS65911_REG_LDO6:
636 case TPS65911_REG_LDO7:
637 case TPS65911_REG_LDO8:
638 value &= LDO3_SEL_MASK;
639 value >>= LDO_SEL_SHIFT;
640 /* The first 3 values of the selector correspond to 1V */
641 if (value < 3)
642 value = 0;
643 else
644 value -= 2;
645
646 step_mv = 100;
647 break;
648 case TPS65910_REG_VIO:
649 return pmic->info[id]->table[value] * 1000;
650 break;
651 default:
652 return -EINVAL;
653 }
654
655 return (LDO_MIN_VOLT + value * step_mv) * 1000;
656}
657
658static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
659 unsigned selector)
660{
661 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
662 int id = rdev_get_id(dev), vsel;
663 int dcdc_mult = 0;
664
665 switch (id) {
666 case TPS65910_REG_VDD1:
667 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
668 if (dcdc_mult == 1)
669 dcdc_mult--;
670 vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
671
672 tps65910_modify_bits(pmic, TPS65910_VDD1,
673 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
674 VDD1_VGAIN_SEL_MASK);
675 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
676 break;
677 case TPS65910_REG_VDD2:
678 dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
679 if (dcdc_mult == 1)
680 dcdc_mult--;
681 vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
682
683 tps65910_modify_bits(pmic, TPS65910_VDD2,
684 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
685 VDD1_VGAIN_SEL_MASK);
686 tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
687 break;
688 case TPS65911_REG_VDDCTRL:
689 vsel = selector;
690 tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
691 }
692
693 return 0;
694}
695
696static int tps65910_set_voltage(struct regulator_dev *dev, unsigned selector)
697{
698 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
699 int reg, id = rdev_get_id(dev);
700
701 reg = pmic->get_ctrl_reg(id);
702 if (reg < 0)
703 return reg;
704
705 switch (id) {
706 case TPS65910_REG_VIO:
707 case TPS65910_REG_VDIG1:
708 case TPS65910_REG_VDIG2:
709 case TPS65910_REG_VPLL:
710 case TPS65910_REG_VDAC:
711 case TPS65910_REG_VAUX1:
712 case TPS65910_REG_VAUX2:
713 case TPS65910_REG_VAUX33:
714 case TPS65910_REG_VMMC:
715 return tps65910_modify_bits(pmic, reg,
716 (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
717 }
718
719 return -EINVAL;
720}
721
722static int tps65911_set_voltage(struct regulator_dev *dev, unsigned selector)
723{
724 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
725 int reg, id = rdev_get_id(dev);
726
727 reg = pmic->get_ctrl_reg(id);
728 if (reg < 0)
729 return reg;
730
731 switch (id) {
732 case TPS65911_REG_LDO1:
733 case TPS65911_REG_LDO2:
734 case TPS65911_REG_LDO4:
735 return tps65910_modify_bits(pmic, reg,
736 (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK);
737 case TPS65911_REG_LDO3:
738 case TPS65911_REG_LDO5:
739 case TPS65911_REG_LDO6:
740 case TPS65911_REG_LDO7:
741 case TPS65911_REG_LDO8:
742 case TPS65910_REG_VIO:
743 return tps65910_modify_bits(pmic, reg,
744 (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK);
745 }
746
747 return -EINVAL;
748}
749
750
751static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
752 unsigned selector)
753{
754 int volt, mult = 1, id = rdev_get_id(dev);
755
756 switch (id) {
757 case TPS65910_REG_VDD1:
758 case TPS65910_REG_VDD2:
759 mult = (selector / VDD1_2_NUM_VOLTS) + 1;
760 volt = VDD1_2_MIN_VOLT +
761 (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
762 case TPS65911_REG_VDDCTRL:
763 volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
764 }
765
766 return volt * 100 * mult;
767}
768
769static int tps65910_list_voltage(struct regulator_dev *dev,
770 unsigned selector)
771{
772 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
773 int id = rdev_get_id(dev), voltage;
774
775 if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC)
776 return -EINVAL;
777
778 if (selector >= pmic->info[id]->table_len)
779 return -EINVAL;
780 else
781 voltage = pmic->info[id]->table[selector] * 1000;
782
783 return voltage;
784}
785
786static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
787{
788 struct tps65910_reg *pmic = rdev_get_drvdata(dev);
789 int step_mv = 0, id = rdev_get_id(dev);
790
791 switch(id) {
792 case TPS65911_REG_LDO1:
793 case TPS65911_REG_LDO2:
794 case TPS65911_REG_LDO4:
795 /* The first 5 values of the selector correspond to 1V */
796 if (selector < 5)
797 selector = 0;
798 else
799 selector -= 4;
800
801 step_mv = 50;
802 break;
803 case TPS65911_REG_LDO3:
804 case TPS65911_REG_LDO5:
805 case TPS65911_REG_LDO6:
806 case TPS65911_REG_LDO7:
807 case TPS65911_REG_LDO8:
808 /* The first 3 values of the selector correspond to 1V */
809 if (selector < 3)
810 selector = 0;
811 else
812 selector -= 2;
813
814 step_mv = 100;
815 break;
816 case TPS65910_REG_VIO:
817 return pmic->info[id]->table[selector] * 1000;
818 default:
819 return -EINVAL;
820 }
821
822 return (LDO_MIN_VOLT + selector * step_mv) * 1000;
823}
824
825/* Regulator ops (except VRTC) */
826static struct regulator_ops tps65910_ops_dcdc = {
827 .is_enabled = tps65910_is_enabled,
828 .enable = tps65910_enable,
829 .disable = tps65910_disable,
830 .set_mode = tps65910_set_mode,
831 .get_mode = tps65910_get_mode,
832 .get_voltage = tps65910_get_voltage_dcdc,
833 .set_voltage_sel = tps65910_set_voltage_dcdc,
834 .list_voltage = tps65910_list_voltage_dcdc,
835};
836
837static struct regulator_ops tps65910_ops_vdd3 = {
838 .is_enabled = tps65910_is_enabled,
839 .enable = tps65910_enable,
840 .disable = tps65910_disable,
841 .set_mode = tps65910_set_mode,
842 .get_mode = tps65910_get_mode,
843 .get_voltage = tps65910_get_voltage_vdd3,
844 .list_voltage = tps65910_list_voltage,
845};
846
847static struct regulator_ops tps65910_ops = {
848 .is_enabled = tps65910_is_enabled,
849 .enable = tps65910_enable,
850 .disable = tps65910_disable,
851 .set_mode = tps65910_set_mode,
852 .get_mode = tps65910_get_mode,
853 .get_voltage = tps65910_get_voltage,
854 .set_voltage_sel = tps65910_set_voltage,
855 .list_voltage = tps65910_list_voltage,
856};
857
858static struct regulator_ops tps65911_ops = {
859 .is_enabled = tps65910_is_enabled,
860 .enable = tps65910_enable,
861 .disable = tps65910_disable,
862 .set_mode = tps65910_set_mode,
863 .get_mode = tps65910_get_mode,
864 .get_voltage = tps65911_get_voltage,
865 .set_voltage_sel = tps65911_set_voltage,
866 .list_voltage = tps65911_list_voltage,
867};
868
869static __devinit int tps65910_probe(struct platform_device *pdev)
870{
871 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
872 struct tps_info *info;
873 struct regulator_init_data *reg_data;
874 struct regulator_dev *rdev;
875 struct tps65910_reg *pmic;
876 struct tps65910_board *pmic_plat_data;
877 int i, err;
878
879 pmic_plat_data = dev_get_platdata(tps65910->dev);
880 if (!pmic_plat_data)
881 return -EINVAL;
882
883 reg_data = pmic_plat_data->tps65910_pmic_init_data;
884
885 pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
886 if (!pmic)
887 return -ENOMEM;
888
889 mutex_init(&pmic->mutex);
890 pmic->mfd = tps65910;
891 platform_set_drvdata(pdev, pmic);
892
893 /* Give control of all register to control port */
894 tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
895 DEVCTRL_SR_CTL_I2C_SEL_MASK);
896
897 switch(tps65910_chip_id(tps65910)) {
898 case TPS65910:
899 pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
900 info = tps65910_regs;
901 case TPS65911:
902 pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
903 info = tps65911_regs;
904 default:
905 pr_err("Invalid tps chip version\n");
906 return -ENODEV;
907 }
908
909 for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) {
910 /* Register the regulators */
911 pmic->info[i] = info;
912
913 pmic->desc[i].name = info->name;
914 pmic->desc[i].id = i;
915 pmic->desc[i].n_voltages = info->table_len;
916
917 if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
918 pmic->desc[i].ops = &tps65910_ops_dcdc;
919 } else if (i == TPS65910_REG_VDD3) {
920 if (tps65910_chip_id(tps65910) == TPS65910)
921 pmic->desc[i].ops = &tps65910_ops_vdd3;
922 else
923 pmic->desc[i].ops = &tps65910_ops_dcdc;
924 } else {
925 if (tps65910_chip_id(tps65910) == TPS65910)
926 pmic->desc[i].ops = &tps65910_ops;
927 else
928 pmic->desc[i].ops = &tps65911_ops;
929 }
930
931 pmic->desc[i].type = REGULATOR_VOLTAGE;
932 pmic->desc[i].owner = THIS_MODULE;
933
934 rdev = regulator_register(&pmic->desc[i],
935 tps65910->dev, reg_data, pmic);
936 if (IS_ERR(rdev)) {
937 dev_err(tps65910->dev,
938 "failed to register %s regulator\n",
939 pdev->name);
940 err = PTR_ERR(rdev);
941 goto err;
942 }
943
944 /* Save regulator for cleanup */
945 pmic->rdev[i] = rdev;
946 }
947 return 0;
948
949err:
950 while (--i >= 0)
951 regulator_unregister(pmic->rdev[i]);
952
953 kfree(pmic);
954 return err;
955}
956
957static int __devexit tps65910_remove(struct platform_device *pdev)
958{
959 struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev);
960 int i;
961
962 for (i = 0; i < TPS65910_NUM_REGULATOR; i++)
963 regulator_unregister(tps65910_reg->rdev[i]);
964
965 kfree(tps65910_reg);
966 return 0;
967}
968
969static struct platform_driver tps65910_driver = {
970 .driver = {
971 .name = "tps65910-pmic",
972 .owner = THIS_MODULE,
973 },
974 .probe = tps65910_probe,
975 .remove = __devexit_p(tps65910_remove),
976};
977
978static int __init tps65910_init(void)
979{
980 return platform_driver_register(&tps65910_driver);
981}
982subsys_initcall(tps65910_init);
983
984static void __exit tps65910_cleanup(void)
985{
986 platform_driver_unregister(&tps65910_driver);
987}
988module_exit(tps65910_cleanup);
989
990MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
991MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
992MODULE_LICENSE("GPL v2");
993MODULE_ALIAS("platform:tps65910-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 6a292852a358..87fe0f75a56e 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -51,8 +51,13 @@ struct twlreg_info {
51 u16 min_mV; 51 u16 min_mV;
52 u16 max_mV; 52 u16 max_mV;
53 53
54 u8 flags;
55
54 /* used by regulator core */ 56 /* used by regulator core */
55 struct regulator_desc desc; 57 struct regulator_desc desc;
58
59 /* chip specific features */
60 unsigned long features;
56}; 61};
57 62
58 63
@@ -70,12 +75,35 @@ struct twlreg_info {
70#define VREG_TRANS 1 75#define VREG_TRANS 1
71#define VREG_STATE 2 76#define VREG_STATE 2
72#define VREG_VOLTAGE 3 77#define VREG_VOLTAGE 3
78#define VREG_VOLTAGE_SMPS 4
73/* TWL6030 Misc register offsets */ 79/* TWL6030 Misc register offsets */
74#define VREG_BC_ALL 1 80#define VREG_BC_ALL 1
75#define VREG_BC_REF 2 81#define VREG_BC_REF 2
76#define VREG_BC_PROC 3 82#define VREG_BC_PROC 3
77#define VREG_BC_CLK_RST 4 83#define VREG_BC_CLK_RST 4
78 84
85/* TWL6030 LDO register values for CFG_STATE */
86#define TWL6030_CFG_STATE_OFF 0x00
87#define TWL6030_CFG_STATE_ON 0x01
88#define TWL6030_CFG_STATE_OFF2 0x02
89#define TWL6030_CFG_STATE_SLEEP 0x03
90#define TWL6030_CFG_STATE_GRP_SHIFT 5
91#define TWL6030_CFG_STATE_APP_SHIFT 2
92#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
93#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
94 TWL6030_CFG_STATE_APP_SHIFT)
95
96/* Flags for SMPS Voltage reading */
97#define SMPS_OFFSET_EN BIT(0)
98#define SMPS_EXTENDED_EN BIT(1)
99
100/* twl6025 SMPS EPROM values */
101#define TWL6030_SMPS_OFFSET 0xB0
102#define TWL6030_SMPS_MULT 0xB3
103#define SMPS_MULTOFFSET_SMPS4 BIT(0)
104#define SMPS_MULTOFFSET_VIO BIT(1)
105#define SMPS_MULTOFFSET_SMPS3 BIT(6)
106
79static inline int 107static inline int
80twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset) 108twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
81{ 109{
@@ -118,21 +146,38 @@ static int twlreg_grp(struct regulator_dev *rdev)
118#define P2_GRP_6030 BIT(1) /* "peripherals" */ 146#define P2_GRP_6030 BIT(1) /* "peripherals" */
119#define P1_GRP_6030 BIT(0) /* CPU/Linux */ 147#define P1_GRP_6030 BIT(0) /* CPU/Linux */
120 148
121static int twlreg_is_enabled(struct regulator_dev *rdev) 149static int twl4030reg_is_enabled(struct regulator_dev *rdev)
122{ 150{
123 int state = twlreg_grp(rdev); 151 int state = twlreg_grp(rdev);
124 152
125 if (state < 0) 153 if (state < 0)
126 return state; 154 return state;
127 155
128 if (twl_class_is_4030()) 156 return state & P1_GRP_4030;
129 state &= P1_GRP_4030; 157}
158
159static int twl6030reg_is_enabled(struct regulator_dev *rdev)
160{
161 struct twlreg_info *info = rdev_get_drvdata(rdev);
162 int grp = 0, val;
163
164 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
165 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
166 if (grp < 0)
167 return grp;
168
169 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
170 grp &= P1_GRP_6030;
130 else 171 else
131 state &= P1_GRP_6030; 172 grp = 1;
132 return state; 173
174 val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
175 val = TWL6030_CFG_STATE_APP(val);
176
177 return grp && (val == TWL6030_CFG_STATE_ON);
133} 178}
134 179
135static int twlreg_enable(struct regulator_dev *rdev) 180static int twl4030reg_enable(struct regulator_dev *rdev)
136{ 181{
137 struct twlreg_info *info = rdev_get_drvdata(rdev); 182 struct twlreg_info *info = rdev_get_drvdata(rdev);
138 int grp; 183 int grp;
@@ -142,10 +187,7 @@ static int twlreg_enable(struct regulator_dev *rdev)
142 if (grp < 0) 187 if (grp < 0)
143 return grp; 188 return grp;
144 189
145 if (twl_class_is_4030()) 190 grp |= P1_GRP_4030;
146 grp |= P1_GRP_4030;
147 else
148 grp |= P1_GRP_6030;
149 191
150 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); 192 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
151 193
@@ -154,29 +196,63 @@ static int twlreg_enable(struct regulator_dev *rdev)
154 return ret; 196 return ret;
155} 197}
156 198
157static int twlreg_disable(struct regulator_dev *rdev) 199static int twl6030reg_enable(struct regulator_dev *rdev)
200{
201 struct twlreg_info *info = rdev_get_drvdata(rdev);
202 int grp = 0;
203 int ret;
204
205 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
206 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
207 if (grp < 0)
208 return grp;
209
210 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
211 grp << TWL6030_CFG_STATE_GRP_SHIFT |
212 TWL6030_CFG_STATE_ON);
213
214 udelay(info->delay);
215
216 return ret;
217}
218
219static int twl4030reg_disable(struct regulator_dev *rdev)
158{ 220{
159 struct twlreg_info *info = rdev_get_drvdata(rdev); 221 struct twlreg_info *info = rdev_get_drvdata(rdev);
160 int grp; 222 int grp;
223 int ret;
161 224
162 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); 225 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
163 if (grp < 0) 226 if (grp < 0)
164 return grp; 227 return grp;
165 228
166 if (twl_class_is_4030()) 229 grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
167 grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
168 else
169 grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
170 230
171 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); 231 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
232
233 return ret;
172} 234}
173 235
174static int twlreg_get_status(struct regulator_dev *rdev) 236static int twl6030reg_disable(struct regulator_dev *rdev)
175{ 237{
176 int state = twlreg_grp(rdev); 238 struct twlreg_info *info = rdev_get_drvdata(rdev);
239 int grp = 0;
240 int ret;
241
242 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
243 grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030;
244
245 /* For 6030, set the off state for all grps enabled */
246 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
247 (grp) << TWL6030_CFG_STATE_GRP_SHIFT |
248 TWL6030_CFG_STATE_OFF);
249
250 return ret;
251}
177 252
178 if (twl_class_is_6030()) 253static int twl4030reg_get_status(struct regulator_dev *rdev)
179 return 0; /* FIXME return for 6030 regulator */ 254{
255 int state = twlreg_grp(rdev);
180 256
181 if (state < 0) 257 if (state < 0)
182 return state; 258 return state;
@@ -190,15 +266,39 @@ static int twlreg_get_status(struct regulator_dev *rdev)
190 : REGULATOR_STATUS_STANDBY; 266 : REGULATOR_STATUS_STANDBY;
191} 267}
192 268
193static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode) 269static int twl6030reg_get_status(struct regulator_dev *rdev)
270{
271 struct twlreg_info *info = rdev_get_drvdata(rdev);
272 int val;
273
274 val = twlreg_grp(rdev);
275 if (val < 0)
276 return val;
277
278 val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
279
280 switch (TWL6030_CFG_STATE_APP(val)) {
281 case TWL6030_CFG_STATE_ON:
282 return REGULATOR_STATUS_NORMAL;
283
284 case TWL6030_CFG_STATE_SLEEP:
285 return REGULATOR_STATUS_STANDBY;
286
287 case TWL6030_CFG_STATE_OFF:
288 case TWL6030_CFG_STATE_OFF2:
289 default:
290 break;
291 }
292
293 return REGULATOR_STATUS_OFF;
294}
295
296static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
194{ 297{
195 struct twlreg_info *info = rdev_get_drvdata(rdev); 298 struct twlreg_info *info = rdev_get_drvdata(rdev);
196 unsigned message; 299 unsigned message;
197 int status; 300 int status;
198 301
199 if (twl_class_is_6030())
200 return 0; /* FIXME return for 6030 regulator */
201
202 /* We can only set the mode through state machine commands... */ 302 /* We can only set the mode through state machine commands... */
203 switch (mode) { 303 switch (mode) {
204 case REGULATOR_MODE_NORMAL: 304 case REGULATOR_MODE_NORMAL:
@@ -227,6 +327,36 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
227 message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB); 327 message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
228} 328}
229 329
330static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
331{
332 struct twlreg_info *info = rdev_get_drvdata(rdev);
333 int grp = 0;
334 int val;
335
336 if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
337 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
338
339 if (grp < 0)
340 return grp;
341
342 /* Compose the state register settings */
343 val = grp << TWL6030_CFG_STATE_GRP_SHIFT;
344 /* We can only set the mode through state machine commands... */
345 switch (mode) {
346 case REGULATOR_MODE_NORMAL:
347 val |= TWL6030_CFG_STATE_ON;
348 break;
349 case REGULATOR_MODE_STANDBY:
350 val |= TWL6030_CFG_STATE_SLEEP;
351 break;
352
353 default:
354 return -EINVAL;
355 }
356
357 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
358}
359
230/*----------------------------------------------------------------------*/ 360/*----------------------------------------------------------------------*/
231 361
232/* 362/*
@@ -375,13 +505,13 @@ static struct regulator_ops twl4030ldo_ops = {
375 .set_voltage = twl4030ldo_set_voltage, 505 .set_voltage = twl4030ldo_set_voltage,
376 .get_voltage = twl4030ldo_get_voltage, 506 .get_voltage = twl4030ldo_get_voltage,
377 507
378 .enable = twlreg_enable, 508 .enable = twl4030reg_enable,
379 .disable = twlreg_disable, 509 .disable = twl4030reg_disable,
380 .is_enabled = twlreg_is_enabled, 510 .is_enabled = twl4030reg_is_enabled,
381 511
382 .set_mode = twlreg_set_mode, 512 .set_mode = twl4030reg_set_mode,
383 513
384 .get_status = twlreg_get_status, 514 .get_status = twl4030reg_get_status,
385}; 515};
386 516
387static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) 517static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
@@ -433,13 +563,13 @@ static struct regulator_ops twl6030ldo_ops = {
433 .set_voltage = twl6030ldo_set_voltage, 563 .set_voltage = twl6030ldo_set_voltage,
434 .get_voltage = twl6030ldo_get_voltage, 564 .get_voltage = twl6030ldo_get_voltage,
435 565
436 .enable = twlreg_enable, 566 .enable = twl6030reg_enable,
437 .disable = twlreg_disable, 567 .disable = twl6030reg_disable,
438 .is_enabled = twlreg_is_enabled, 568 .is_enabled = twl6030reg_is_enabled,
439 569
440 .set_mode = twlreg_set_mode, 570 .set_mode = twl6030reg_set_mode,
441 571
442 .get_status = twlreg_get_status, 572 .get_status = twl6030reg_get_status,
443}; 573};
444 574
445/*----------------------------------------------------------------------*/ 575/*----------------------------------------------------------------------*/
@@ -461,25 +591,242 @@ static int twlfixed_get_voltage(struct regulator_dev *rdev)
461 return info->min_mV * 1000; 591 return info->min_mV * 1000;
462} 592}
463 593
464static struct regulator_ops twlfixed_ops = { 594static struct regulator_ops twl4030fixed_ops = {
595 .list_voltage = twlfixed_list_voltage,
596
597 .get_voltage = twlfixed_get_voltage,
598
599 .enable = twl4030reg_enable,
600 .disable = twl4030reg_disable,
601 .is_enabled = twl4030reg_is_enabled,
602
603 .set_mode = twl4030reg_set_mode,
604
605 .get_status = twl4030reg_get_status,
606};
607
608static struct regulator_ops twl6030fixed_ops = {
465 .list_voltage = twlfixed_list_voltage, 609 .list_voltage = twlfixed_list_voltage,
466 610
467 .get_voltage = twlfixed_get_voltage, 611 .get_voltage = twlfixed_get_voltage,
468 612
469 .enable = twlreg_enable, 613 .enable = twl6030reg_enable,
470 .disable = twlreg_disable, 614 .disable = twl6030reg_disable,
471 .is_enabled = twlreg_is_enabled, 615 .is_enabled = twl6030reg_is_enabled,
472 616
473 .set_mode = twlreg_set_mode, 617 .set_mode = twl6030reg_set_mode,
474 618
475 .get_status = twlreg_get_status, 619 .get_status = twl6030reg_get_status,
476}; 620};
477 621
478static struct regulator_ops twl6030_fixed_resource = { 622static struct regulator_ops twl6030_fixed_resource = {
479 .enable = twlreg_enable, 623 .enable = twl6030reg_enable,
480 .disable = twlreg_disable, 624 .disable = twl6030reg_disable,
481 .is_enabled = twlreg_is_enabled, 625 .is_enabled = twl6030reg_is_enabled,
482 .get_status = twlreg_get_status, 626 .get_status = twl6030reg_get_status,
627};
628
629/*
630 * SMPS status and control
631 */
632
633static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
634{
635 struct twlreg_info *info = rdev_get_drvdata(rdev);
636
637 int voltage = 0;
638
639 switch (info->flags) {
640 case SMPS_OFFSET_EN:
641 voltage = 100000;
642 /* fall through */
643 case 0:
644 switch (index) {
645 case 0:
646 voltage = 0;
647 break;
648 case 58:
649 voltage = 1350 * 1000;
650 break;
651 case 59:
652 voltage = 1500 * 1000;
653 break;
654 case 60:
655 voltage = 1800 * 1000;
656 break;
657 case 61:
658 voltage = 1900 * 1000;
659 break;
660 case 62:
661 voltage = 2100 * 1000;
662 break;
663 default:
664 voltage += (600000 + (12500 * (index - 1)));
665 }
666 break;
667 case SMPS_EXTENDED_EN:
668 switch (index) {
669 case 0:
670 voltage = 0;
671 break;
672 case 58:
673 voltage = 2084 * 1000;
674 break;
675 case 59:
676 voltage = 2315 * 1000;
677 break;
678 case 60:
679 voltage = 2778 * 1000;
680 break;
681 case 61:
682 voltage = 2932 * 1000;
683 break;
684 case 62:
685 voltage = 3241 * 1000;
686 break;
687 default:
688 voltage = (1852000 + (38600 * (index - 1)));
689 }
690 break;
691 case SMPS_OFFSET_EN | SMPS_EXTENDED_EN:
692 switch (index) {
693 case 0:
694 voltage = 0;
695 break;
696 case 58:
697 voltage = 4167 * 1000;
698 break;
699 case 59:
700 voltage = 2315 * 1000;
701 break;
702 case 60:
703 voltage = 2778 * 1000;
704 break;
705 case 61:
706 voltage = 2932 * 1000;
707 break;
708 case 62:
709 voltage = 3241 * 1000;
710 break;
711 default:
712 voltage = (2161000 + (38600 * (index - 1)));
713 }
714 break;
715 }
716
717 return voltage;
718}
719
720static int
721twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
722 unsigned int *selector)
723{
724 struct twlreg_info *info = rdev_get_drvdata(rdev);
725 int vsel = 0;
726
727 switch (info->flags) {
728 case 0:
729 if (min_uV == 0)
730 vsel = 0;
731 else if ((min_uV >= 600000) && (max_uV <= 1300000)) {
732 vsel = (min_uV - 600000) / 125;
733 if (vsel % 100)
734 vsel += 100;
735 vsel /= 100;
736 vsel++;
737 }
738 /* Values 1..57 for vsel are linear and can be calculated
739 * values 58..62 are non linear.
740 */
741 else if ((min_uV > 1900000) && (max_uV >= 2100000))
742 vsel = 62;
743 else if ((min_uV > 1800000) && (max_uV >= 1900000))
744 vsel = 61;
745 else if ((min_uV > 1500000) && (max_uV >= 1800000))
746 vsel = 60;
747 else if ((min_uV > 1350000) && (max_uV >= 1500000))
748 vsel = 59;
749 else if ((min_uV > 1300000) && (max_uV >= 1350000))
750 vsel = 58;
751 else
752 return -EINVAL;
753 break;
754 case SMPS_OFFSET_EN:
755 if (min_uV == 0)
756 vsel = 0;
757 else if ((min_uV >= 700000) && (max_uV <= 1420000)) {
758 vsel = (min_uV - 700000) / 125;
759 if (vsel % 100)
760 vsel += 100;
761 vsel /= 100;
762 vsel++;
763 }
764 /* Values 1..57 for vsel are linear and can be calculated
765 * values 58..62 are non linear.
766 */
767 else if ((min_uV > 1900000) && (max_uV >= 2100000))
768 vsel = 62;
769 else if ((min_uV > 1800000) && (max_uV >= 1900000))
770 vsel = 61;
771 else if ((min_uV > 1350000) && (max_uV >= 1800000))
772 vsel = 60;
773 else if ((min_uV > 1350000) && (max_uV >= 1500000))
774 vsel = 59;
775 else if ((min_uV > 1300000) && (max_uV >= 1350000))
776 vsel = 58;
777 else
778 return -EINVAL;
779 break;
780 case SMPS_EXTENDED_EN:
781 if (min_uV == 0)
782 vsel = 0;
783 else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
784 vsel = (min_uV - 1852000) / 386;
785 if (vsel % 100)
786 vsel += 100;
787 vsel /= 100;
788 vsel++;
789 }
790 break;
791 case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
792 if (min_uV == 0)
793 vsel = 0;
794 else if ((min_uV >= 2161000) && (max_uV <= 4321000)) {
795 vsel = (min_uV - 1852000) / 386;
796 if (vsel % 100)
797 vsel += 100;
798 vsel /= 100;
799 vsel++;
800 }
801 break;
802 }
803
804 *selector = vsel;
805
806 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS,
807 vsel);
808}
809
810static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
811{
812 struct twlreg_info *info = rdev_get_drvdata(rdev);
813
814 return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
815}
816
817static struct regulator_ops twlsmps_ops = {
818 .list_voltage = twl6030smps_list_voltage,
819
820 .set_voltage = twl6030smps_set_voltage,
821 .get_voltage_sel = twl6030smps_get_voltage_sel,
822
823 .enable = twl6030reg_enable,
824 .disable = twl6030reg_disable,
825 .is_enabled = twl6030reg_is_enabled,
826
827 .set_mode = twl6030reg_set_mode,
828
829 .get_status = twl6030reg_get_status,
483}; 830};
484 831
485/*----------------------------------------------------------------------*/ 832/*----------------------------------------------------------------------*/
@@ -487,11 +834,10 @@ static struct regulator_ops twl6030_fixed_resource = {
487#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 834#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
488 remap_conf) \ 835 remap_conf) \
489 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 836 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
490 remap_conf, TWL4030) 837 remap_conf, TWL4030, twl4030fixed_ops)
491#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 838#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
492 remap_conf) \
493 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 839 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
494 remap_conf, TWL6030) 840 0x0, TWL6030, twl6030fixed_ops)
495 841
496#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ 842#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
497 .base = offset, \ 843 .base = offset, \
@@ -510,13 +856,11 @@ static struct regulator_ops twl6030_fixed_resource = {
510 }, \ 856 }, \
511 } 857 }
512 858
513#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \ 859#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
514 remap_conf) { \
515 .base = offset, \ 860 .base = offset, \
516 .id = num, \ 861 .id = num, \
517 .min_mV = min_mVolts, \ 862 .min_mV = min_mVolts, \
518 .max_mV = max_mVolts, \ 863 .max_mV = max_mVolts, \
519 .remap = remap_conf, \
520 .desc = { \ 864 .desc = { \
521 .name = #label, \ 865 .name = #label, \
522 .id = TWL6030_REG_##label, \ 866 .id = TWL6030_REG_##label, \
@@ -527,9 +871,23 @@ static struct regulator_ops twl6030_fixed_resource = {
527 }, \ 871 }, \
528 } 872 }
529 873
874#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
875 .base = offset, \
876 .id = num, \
877 .min_mV = min_mVolts, \
878 .max_mV = max_mVolts, \
879 .desc = { \
880 .name = #label, \
881 .id = TWL6025_REG_##label, \
882 .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \
883 .ops = &twl6030ldo_ops, \
884 .type = REGULATOR_VOLTAGE, \
885 .owner = THIS_MODULE, \
886 }, \
887 }
530 888
531#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ 889#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
532 family) { \ 890 family, operations) { \
533 .base = offset, \ 891 .base = offset, \
534 .id = num, \ 892 .id = num, \
535 .min_mV = mVolts, \ 893 .min_mV = mVolts, \
@@ -539,17 +897,16 @@ static struct regulator_ops twl6030_fixed_resource = {
539 .name = #label, \ 897 .name = #label, \
540 .id = family##_REG_##label, \ 898 .id = family##_REG_##label, \
541 .n_voltages = 1, \ 899 .n_voltages = 1, \
542 .ops = &twlfixed_ops, \ 900 .ops = &operations, \
543 .type = REGULATOR_VOLTAGE, \ 901 .type = REGULATOR_VOLTAGE, \
544 .owner = THIS_MODULE, \ 902 .owner = THIS_MODULE, \
545 }, \ 903 }, \
546 } 904 }
547 905
548#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \ 906#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
549 .base = offset, \ 907 .base = offset, \
550 .id = num, \ 908 .id = num, \
551 .delay = turnon_delay, \ 909 .delay = turnon_delay, \
552 .remap = remap_conf, \
553 .desc = { \ 910 .desc = { \
554 .name = #label, \ 911 .name = #label, \
555 .id = TWL6030_REG_##label, \ 912 .id = TWL6030_REG_##label, \
@@ -559,6 +916,21 @@ static struct regulator_ops twl6030_fixed_resource = {
559 }, \ 916 }, \
560 } 917 }
561 918
919#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
920 .base = offset, \
921 .id = num, \
922 .min_mV = 600, \
923 .max_mV = 2100, \
924 .desc = { \
925 .name = #label, \
926 .id = TWL6025_REG_##label, \
927 .n_voltages = 63, \
928 .ops = &twlsmps_ops, \
929 .type = REGULATOR_VOLTAGE, \
930 .owner = THIS_MODULE, \
931 }, \
932 }
933
562/* 934/*
563 * We list regulators here if systems need some level of 935 * We list regulators here if systems need some level of
564 * software control over them after boot. 936 * software control over them after boot.
@@ -589,19 +961,52 @@ static struct twlreg_info twl_regs[] = {
589 /* 6030 REG with base as PMC Slave Misc : 0x0030 */ 961 /* 6030 REG with base as PMC Slave Misc : 0x0030 */
590 /* Turnon-delay and remap configuration values for 6030 are not 962 /* Turnon-delay and remap configuration values for 6030 are not
591 verified since the specification is not public */ 963 verified since the specification is not public */
592 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21), 964 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
593 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21), 965 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
594 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21), 966 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
595 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21), 967 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
596 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21), 968 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
597 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21), 969 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
598 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21), 970 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
599 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21), 971 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
600 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21), 972 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
601 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21), 973 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
602 TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21), 974 TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
975
976 /* 6025 are renamed compared to 6030 versions */
977 TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
978 TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
979 TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
980 TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
981 TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
982 TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
983 TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
984 TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
985 TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
986
987 TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
988 TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
989 TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
603}; 990};
604 991
992static u8 twl_get_smps_offset(void)
993{
994 u8 value;
995
996 twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
997 TWL6030_SMPS_OFFSET);
998 return value;
999}
1000
1001static u8 twl_get_smps_mult(void)
1002{
1003 u8 value;
1004
1005 twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
1006 TWL6030_SMPS_MULT);
1007 return value;
1008}
1009
605static int __devinit twlreg_probe(struct platform_device *pdev) 1010static int __devinit twlreg_probe(struct platform_device *pdev)
606{ 1011{
607 int i; 1012 int i;
@@ -623,6 +1028,9 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
623 if (!initdata) 1028 if (!initdata)
624 return -EINVAL; 1029 return -EINVAL;
625 1030
1031 /* copy the features into regulator data */
1032 info->features = (unsigned long)initdata->driver_data;
1033
626 /* Constrain board-specific capabilities according to what 1034 /* Constrain board-specific capabilities according to what
627 * this driver and the chip itself can actually do. 1035 * this driver and the chip itself can actually do.
628 */ 1036 */
@@ -645,6 +1053,27 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
645 break; 1053 break;
646 } 1054 }
647 1055
1056 switch (pdev->id) {
1057 case TWL6025_REG_SMPS3:
1058 if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3)
1059 info->flags |= SMPS_EXTENDED_EN;
1060 if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3)
1061 info->flags |= SMPS_OFFSET_EN;
1062 break;
1063 case TWL6025_REG_SMPS4:
1064 if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4)
1065 info->flags |= SMPS_EXTENDED_EN;
1066 if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4)
1067 info->flags |= SMPS_OFFSET_EN;
1068 break;
1069 case TWL6025_REG_VIO:
1070 if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO)
1071 info->flags |= SMPS_EXTENDED_EN;
1072 if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO)
1073 info->flags |= SMPS_OFFSET_EN;
1074 break;
1075 }
1076
648 rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); 1077 rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
649 if (IS_ERR(rdev)) { 1078 if (IS_ERR(rdev)) {
650 dev_err(&pdev->dev, "can't register %s, %ld\n", 1079 dev_err(&pdev->dev, "can't register %s, %ld\n",
@@ -653,7 +1082,8 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
653 } 1082 }
654 platform_set_drvdata(pdev, rdev); 1083 platform_set_drvdata(pdev, rdev);
655 1084
656 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP, 1085 if (twl_class_is_4030())
1086 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
657 info->remap); 1087 info->remap);
658 1088
659 /* NOTE: many regulators support short-circuit IRQs (presentable 1089 /* NOTE: many regulators support short-circuit IRQs (presentable
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index e93453b1b978..a0982e809851 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -600,7 +600,6 @@ err:
600static __devexit int wm831x_buckv_remove(struct platform_device *pdev) 600static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
601{ 601{
602 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 602 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
603 struct wm831x *wm831x = dcdc->wm831x;
604 603
605 platform_set_drvdata(pdev, NULL); 604 platform_set_drvdata(pdev, NULL);
606 605
@@ -776,7 +775,6 @@ err:
776static __devexit int wm831x_buckp_remove(struct platform_device *pdev) 775static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
777{ 776{
778 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 777 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
779 struct wm831x *wm831x = dcdc->wm831x;
780 778
781 platform_set_drvdata(pdev, NULL); 779 platform_set_drvdata(pdev, NULL);
782 780
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index b42d01cef35a..0f12c70bebc9 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -55,7 +55,7 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
55 return 1600000 + ((selector - 14) * 100000); 55 return 1600000 + ((selector - 14) * 100000);
56} 56}
57 57
58static int wm8400_ldo_get_voltage(struct regulator_dev *dev) 58static int wm8400_ldo_get_voltage_sel(struct regulator_dev *dev)
59{ 59{
60 struct wm8400 *wm8400 = rdev_get_drvdata(dev); 60 struct wm8400 *wm8400 = rdev_get_drvdata(dev);
61 u16 val; 61 u16 val;
@@ -63,7 +63,7 @@ static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
63 val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev)); 63 val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev));
64 val &= WM8400_LDO1_VSEL_MASK; 64 val &= WM8400_LDO1_VSEL_MASK;
65 65
66 return wm8400_ldo_list_voltage(dev, val); 66 return val;
67} 67}
68 68
69static int wm8400_ldo_set_voltage(struct regulator_dev *dev, 69static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
@@ -104,7 +104,7 @@ static struct regulator_ops wm8400_ldo_ops = {
104 .enable = wm8400_ldo_enable, 104 .enable = wm8400_ldo_enable,
105 .disable = wm8400_ldo_disable, 105 .disable = wm8400_ldo_disable,
106 .list_voltage = wm8400_ldo_list_voltage, 106 .list_voltage = wm8400_ldo_list_voltage,
107 .get_voltage = wm8400_ldo_get_voltage, 107 .get_voltage_sel = wm8400_ldo_get_voltage_sel,
108 .set_voltage = wm8400_ldo_set_voltage, 108 .set_voltage = wm8400_ldo_set_voltage,
109}; 109};
110 110
@@ -145,7 +145,7 @@ static int wm8400_dcdc_list_voltage(struct regulator_dev *dev,
145 return 850000 + (selector * 25000); 145 return 850000 + (selector * 25000);
146} 146}
147 147
148static int wm8400_dcdc_get_voltage(struct regulator_dev *dev) 148static int wm8400_dcdc_get_voltage_sel(struct regulator_dev *dev)
149{ 149{
150 struct wm8400 *wm8400 = rdev_get_drvdata(dev); 150 struct wm8400 *wm8400 = rdev_get_drvdata(dev);
151 u16 val; 151 u16 val;
@@ -154,7 +154,7 @@ static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
154 val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset); 154 val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset);
155 val &= WM8400_DC1_VSEL_MASK; 155 val &= WM8400_DC1_VSEL_MASK;
156 156
157 return 850000 + (25000 * val); 157 return val;
158} 158}
159 159
160static int wm8400_dcdc_set_voltage(struct regulator_dev *dev, 160static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
@@ -261,7 +261,7 @@ static struct regulator_ops wm8400_dcdc_ops = {
261 .enable = wm8400_dcdc_enable, 261 .enable = wm8400_dcdc_enable,
262 .disable = wm8400_dcdc_disable, 262 .disable = wm8400_dcdc_disable,
263 .list_voltage = wm8400_dcdc_list_voltage, 263 .list_voltage = wm8400_dcdc_list_voltage,
264 .get_voltage = wm8400_dcdc_get_voltage, 264 .get_voltage_sel = wm8400_dcdc_get_voltage_sel,
265 .set_voltage = wm8400_dcdc_set_voltage, 265 .set_voltage = wm8400_dcdc_set_voltage,
266 .get_mode = wm8400_dcdc_get_mode, 266 .get_mode = wm8400_dcdc_get_mode,
267 .set_mode = wm8400_dcdc_set_mode, 267 .set_mode = wm8400_dcdc_set_mode,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 42891726ea72..f822e13dc04b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -125,6 +125,16 @@ comment "I2C RTC drivers"
125 125
126if I2C 126if I2C
127 127
128config RTC_DRV_88PM860X
129 tristate "Marvell 88PM860x"
130 depends on RTC_CLASS && I2C && MFD_88PM860X
131 help
132 If you say yes here you get support for RTC function in Marvell
133 88PM860x chips.
134
135 This driver can also be built as a module. If so, the module
136 will be called rtc-88pm860x.
137
128config RTC_DRV_DS1307 138config RTC_DRV_DS1307
129 tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025" 139 tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
130 help 140 help
@@ -351,12 +361,39 @@ config RTC_DRV_RX8025
351 This driver can also be built as a module. If so, the module 361 This driver can also be built as a module. If so, the module
352 will be called rtc-rx8025. 362 will be called rtc-rx8025.
353 363
364config RTC_DRV_EM3027
365 tristate "EM Microelectronic EM3027"
366 help
367 If you say yes here you get support for the EM
368 Microelectronic EM3027 RTC chips.
369
370 This driver can also be built as a module. If so, the module
371 will be called rtc-em3027.
372
373config RTC_DRV_RV3029C2
374 tristate "Micro Crystal RTC"
375 help
376 If you say yes here you get support for the Micro Crystal
377 RV3029-C2 RTC chips.
378
379 This driver can also be built as a module. If so, the module
380 will be called rtc-rv3029c2.
381
354endif # I2C 382endif # I2C
355 383
356comment "SPI RTC drivers" 384comment "SPI RTC drivers"
357 385
358if SPI_MASTER 386if SPI_MASTER
359 387
388config RTC_DRV_M41T93
389 tristate "ST M41T93"
390 help
391 If you say yes here you will get support for the
392 ST M41T93 SPI RTC chip.
393
394 This driver can also be built as a module. If so, the module
395 will be called rtc-m41t93.
396
360config RTC_DRV_M41T94 397config RTC_DRV_M41T94
361 tristate "ST M41T94" 398 tristate "ST M41T94"
362 help 399 help
@@ -645,6 +682,14 @@ config RTC_DRV_WM8350
645 This driver can also be built as a module. If so, the module 682 This driver can also be built as a module. If so, the module
646 will be called "rtc-wm8350". 683 will be called "rtc-wm8350".
647 684
685config RTC_DRV_SPEAR
686 tristate "SPEAR ST RTC"
687 depends on PLAT_SPEAR
688 default y
689 help
690 If you say Y here you will get support for the RTC found on
691 spear
692
648config RTC_DRV_PCF50633 693config RTC_DRV_PCF50633
649 depends on MFD_PCF50633 694 depends on MFD_PCF50633
650 tristate "NXP PCF50633 RTC" 695 tristate "NXP PCF50633 RTC"
@@ -874,6 +919,13 @@ config RTC_DRV_PXA
874 This RTC driver uses PXA RTC registers available since pxa27x 919 This RTC driver uses PXA RTC registers available since pxa27x
875 series (RDxR, RYxR) instead of legacy RCNR, RTAR. 920 series (RDxR, RYxR) instead of legacy RCNR, RTAR.
876 921
922config RTC_DRV_VT8500
923 tristate "VIA/WonderMedia 85xx SoC RTC"
924 depends on ARCH_VT8500
925 help
926 If you say Y here you will get access to the real time clock
927 built into your VIA VT8500 SoC or its relatives.
928
877 929
878config RTC_DRV_SUN4V 930config RTC_DRV_SUN4V
879 bool "SUN4V Hypervisor RTC" 931 bool "SUN4V Hypervisor RTC"
@@ -992,4 +1044,11 @@ config RTC_DRV_TEGRA
992 This drive can also be built as a module. If so, the module 1044 This drive can also be built as a module. If so, the module
993 will be called rtc-tegra. 1045 will be called rtc-tegra.
994 1046
1047config RTC_DRV_TILE
1048 tristate "Tilera hypervisor RTC support"
1049 depends on TILE
1050 help
1051 Enable support for the Linux driver side of the Tilera
1052 hypervisor's real-time clock interface.
1053
995endif # RTC_CLASS 1054endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index ca91c3c42e98..213d725f16d4 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -15,6 +15,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
15 15
16# Keep the list ordered. 16# Keep the list ordered.
17 17
18obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o
18obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o 19obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
19obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o 20obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
20obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o 21obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
43obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o 44obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o
44obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o 45obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o
45obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o 46obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o
47obj-$(CONFIG_RTC_DRV_EM3027) += rtc-em3027.o
46obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o 48obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
47obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o 49obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
48obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o 50obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
@@ -52,6 +54,7 @@ obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
52obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o 54obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
53obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o 55obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o
54obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o 56obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
57obj-$(CONFIG_RTC_DRV_M41T93) += rtc-m41t93.o
55obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o 58obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
56obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o 59obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
57obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o 60obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
@@ -81,22 +84,26 @@ obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
81obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o 84obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
82obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o 85obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
83obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o 86obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
87obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
84obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o 88obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
85obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o 89obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
86obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o 90obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
87obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o 91obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
88obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o 92obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
89obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o 93obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
94obj-$(CONFIG_RTC_DRV_SPEAR) += rtc-spear.o
90obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o 95obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
91obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o 96obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
92obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o 97obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
93obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o 98obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
94obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o 99obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
95obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 100obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
101obj-$(CONFIG_RTC_DRV_TILE) += rtc-tile.o
96obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o 102obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
97obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o 103obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
98obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o 104obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
99obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o 105obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
106obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
100obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o 107obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
101obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o 108obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
102obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o 109obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
new file mode 100644
index 000000000000..64b847b7f970
--- /dev/null
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -0,0 +1,427 @@
1/*
2 * Real Time Clock driver for Marvell 88PM860x PMIC
3 *
4 * Copyright (c) 2010 Marvell International Ltd.
5 * Author: Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16#include <linux/mutex.h>
17#include <linux/rtc.h>
18#include <linux/delay.h>
19#include <linux/mfd/core.h>
20#include <linux/mfd/88pm860x.h>
21
22#define VRTC_CALIBRATION
23
24struct pm860x_rtc_info {
25 struct pm860x_chip *chip;
26 struct i2c_client *i2c;
27 struct rtc_device *rtc_dev;
28 struct device *dev;
29 struct delayed_work calib_work;
30
31 int irq;
32 int vrtc;
33 int (*sync)(unsigned int ticks);
34};
35
36#define REG_VRTC_MEAS1 0x7D
37
38#define REG0_ADDR 0xB0
39#define REG1_ADDR 0xB2
40#define REG2_ADDR 0xB4
41#define REG3_ADDR 0xB6
42
43#define REG0_DATA 0xB1
44#define REG1_DATA 0xB3
45#define REG2_DATA 0xB5
46#define REG3_DATA 0xB7
47
48/* bit definitions of Measurement Enable Register 2 (0x51) */
49#define MEAS2_VRTC (1 << 0)
50
51/* bit definitions of RTC Register 1 (0xA0) */
52#define ALARM_EN (1 << 3)
53#define ALARM_WAKEUP (1 << 4)
54#define ALARM (1 << 5)
55#define RTC1_USE_XO (1 << 7)
56
57#define VRTC_CALIB_INTERVAL (HZ * 60 * 10) /* 10 minutes */
58
59static irqreturn_t rtc_update_handler(int irq, void *data)
60{
61 struct pm860x_rtc_info *info = (struct pm860x_rtc_info *)data;
62 int mask;
63
64 mask = ALARM | ALARM_WAKEUP;
65 pm860x_set_bits(info->i2c, PM8607_RTC1, mask | ALARM_EN, mask);
66 rtc_update_irq(info->rtc_dev, 1, RTC_AF);
67 return IRQ_HANDLED;
68}
69
70static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
71{
72 struct pm860x_rtc_info *info = dev_get_drvdata(dev);
73
74 if (enabled)
75 pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM, ALARM);
76 else
77 pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM, 0);
78 return 0;
79}
80
81/*
82 * Calculate the next alarm time given the requested alarm time mask
83 * and the current time.
84 */
85static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
86 struct rtc_time *alrm)
87{
88 unsigned long next_time;
89 unsigned long now_time;
90
91 next->tm_year = now->tm_year;
92 next->tm_mon = now->tm_mon;
93 next->tm_mday = now->tm_mday;
94 next->tm_hour = alrm->tm_hour;
95 next->tm_min = alrm->tm_min;
96 next->tm_sec = alrm->tm_sec;
97
98 rtc_tm_to_time(now, &now_time);
99 rtc_tm_to_time(next, &next_time);
100
101 if (next_time < now_time) {
102 /* Advance one day */
103 next_time += 60 * 60 * 24;
104 rtc_time_to_tm(next_time, next);
105 }
106}
107
108static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
109{
110 struct pm860x_rtc_info *info = dev_get_drvdata(dev);
111 unsigned char buf[8];
112 unsigned long ticks, base, data;
113
114 pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
115 dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
116 buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
117 base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
118
119 /* load 32-bit read-only counter */
120 pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
121 data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
122 ticks = base + data;
123 dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
124 base, data, ticks);
125
126 rtc_time_to_tm(ticks, tm);
127
128 return 0;
129}
130
131static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
132{
133 struct pm860x_rtc_info *info = dev_get_drvdata(dev);
134 unsigned char buf[4];
135 unsigned long ticks, base, data;
136
137 if ((tm->tm_year < 70) || (tm->tm_year > 138)) {
138 dev_dbg(info->dev, "Set time %d out of range. "
139 "Please set time between 1970 to 2038.\n",
140 1900 + tm->tm_year);
141 return -EINVAL;
142 }
143 rtc_tm_to_time(tm, &ticks);
144
145 /* load 32-bit read-only counter */
146 pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
147 data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
148 base = ticks - data;
149 dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
150 base, data, ticks);
151
152 pm860x_page_reg_write(info->i2c, REG0_DATA, (base >> 24) & 0xFF);
153 pm860x_page_reg_write(info->i2c, REG1_DATA, (base >> 16) & 0xFF);
154 pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF);
155 pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF);
156
157 if (info->sync)
158 info->sync(ticks);
159 return 0;
160}
161
162static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
163{
164 struct pm860x_rtc_info *info = dev_get_drvdata(dev);
165 unsigned char buf[8];
166 unsigned long ticks, base, data;
167 int ret;
168
169 pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
170 dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
171 buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
172 base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
173
174 pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf);
175 data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
176 ticks = base + data;
177 dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
178 base, data, ticks);
179
180 rtc_time_to_tm(ticks, &alrm->time);
181 ret = pm860x_reg_read(info->i2c, PM8607_RTC1);
182 alrm->enabled = (ret & ALARM_EN) ? 1 : 0;
183 alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0;
184 return 0;
185}
186
187static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
188{
189 struct pm860x_rtc_info *info = dev_get_drvdata(dev);
190 struct rtc_time now_tm, alarm_tm;
191 unsigned long ticks, base, data;
192 unsigned char buf[8];
193 int mask;
194
195 pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0);
196
197 pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
198 dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
199 buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
200 base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
201
202 /* load 32-bit read-only counter */
203 pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
204 data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
205 ticks = base + data;
206 dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
207 base, data, ticks);
208
209 rtc_time_to_tm(ticks, &now_tm);
210 rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
211 /* get new ticks for alarm in 24 hours */
212 rtc_tm_to_time(&alarm_tm, &ticks);
213 data = ticks - base;
214
215 buf[0] = data & 0xff;
216 buf[1] = (data >> 8) & 0xff;
217 buf[2] = (data >> 16) & 0xff;
218 buf[3] = (data >> 24) & 0xff;
219 pm860x_bulk_write(info->i2c, PM8607_RTC_EXPIRE1, 4, buf);
220 if (alrm->enabled) {
221 mask = ALARM | ALARM_WAKEUP | ALARM_EN;
222 pm860x_set_bits(info->i2c, PM8607_RTC1, mask, mask);
223 } else {
224 mask = ALARM | ALARM_WAKEUP | ALARM_EN;
225 pm860x_set_bits(info->i2c, PM8607_RTC1, mask,
226 ALARM | ALARM_WAKEUP);
227 }
228 return 0;
229}
230
231static const struct rtc_class_ops pm860x_rtc_ops = {
232 .read_time = pm860x_rtc_read_time,
233 .set_time = pm860x_rtc_set_time,
234 .read_alarm = pm860x_rtc_read_alarm,
235 .set_alarm = pm860x_rtc_set_alarm,
236 .alarm_irq_enable = pm860x_rtc_alarm_irq_enable,
237};
238
239#ifdef VRTC_CALIBRATION
240static void calibrate_vrtc_work(struct work_struct *work)
241{
242 struct pm860x_rtc_info *info = container_of(work,
243 struct pm860x_rtc_info, calib_work.work);
244 unsigned char buf[2];
245 unsigned int sum, data, mean, vrtc_set;
246 int i;
247
248 for (i = 0, sum = 0; i < 16; i++) {
249 msleep(100);
250 pm860x_bulk_read(info->i2c, REG_VRTC_MEAS1, 2, buf);
251 data = (buf[0] << 4) | buf[1];
252 data = (data * 5400) >> 12; /* convert to mv */
253 sum += data;
254 }
255 mean = sum >> 4;
256 vrtc_set = 2700 + (info->vrtc & 0x3) * 200;
257 dev_dbg(info->dev, "mean:%d, vrtc_set:%d\n", mean, vrtc_set);
258
259 sum = pm860x_reg_read(info->i2c, PM8607_RTC_MISC1);
260 data = sum & 0x3;
261 if ((mean + 200) < vrtc_set) {
262 /* try higher voltage */
263 if (++data == 4)
264 goto out;
265 data = (sum & 0xf8) | (data & 0x3);
266 pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data);
267 } else if ((mean - 200) > vrtc_set) {
268 /* try lower voltage */
269 if (data-- == 0)
270 goto out;
271 data = (sum & 0xf8) | (data & 0x3);
272 pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data);
273 } else
274 goto out;
275 dev_dbg(info->dev, "set 0x%x to RTC_MISC1\n", data);
276 /* trigger next calibration since VRTC is updated */
277 schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL);
278 return;
279out:
280 /* disable measurement */
281 pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
282 dev_dbg(info->dev, "finish VRTC calibration\n");
283 return;
284}
285#endif
286
287static int __devinit pm860x_rtc_probe(struct platform_device *pdev)
288{
289 struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
290 struct pm860x_rtc_pdata *pdata = NULL;
291 struct pm860x_rtc_info *info;
292 struct rtc_time tm;
293 unsigned long ticks = 0;
294 int ret;
295
296 pdata = pdev->dev.platform_data;
297 if (pdata == NULL)
298 dev_warn(&pdev->dev, "No platform data!\n");
299
300 info = kzalloc(sizeof(struct pm860x_rtc_info), GFP_KERNEL);
301 if (!info)
302 return -ENOMEM;
303 info->irq = platform_get_irq(pdev, 0);
304 if (info->irq < 0) {
305 dev_err(&pdev->dev, "No IRQ resource!\n");
306 ret = -EINVAL;
307 goto out;
308 }
309
310 info->chip = chip;
311 info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
312 info->dev = &pdev->dev;
313 dev_set_drvdata(&pdev->dev, info);
314
315 ret = request_threaded_irq(info->irq, NULL, rtc_update_handler,
316 IRQF_ONESHOT, "rtc", info);
317 if (ret < 0) {
318 dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
319 info->irq, ret);
320 goto out;
321 }
322
323 /* set addresses of 32-bit base value for RTC time */
324 pm860x_page_reg_write(info->i2c, REG0_ADDR, REG0_DATA);
325 pm860x_page_reg_write(info->i2c, REG1_ADDR, REG1_DATA);
326 pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA);
327 pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA);
328
329 ret = pm860x_rtc_read_time(&pdev->dev, &tm);
330 if (ret < 0) {
331 dev_err(&pdev->dev, "Failed to read initial time.\n");
332 goto out_rtc;
333 }
334 if ((tm.tm_year < 70) || (tm.tm_year > 138)) {
335 tm.tm_year = 70;
336 tm.tm_mon = 0;
337 tm.tm_mday = 1;
338 tm.tm_hour = 0;
339 tm.tm_min = 0;
340 tm.tm_sec = 0;
341 ret = pm860x_rtc_set_time(&pdev->dev, &tm);
342 if (ret < 0) {
343 dev_err(&pdev->dev, "Failed to set initial time.\n");
344 goto out_rtc;
345 }
346 }
347 rtc_tm_to_time(&tm, &ticks);
348 if (pdata && pdata->sync) {
349 pdata->sync(ticks);
350 info->sync = pdata->sync;
351 }
352
353 info->rtc_dev = rtc_device_register("88pm860x-rtc", &pdev->dev,
354 &pm860x_rtc_ops, THIS_MODULE);
355 ret = PTR_ERR(info->rtc_dev);
356 if (IS_ERR(info->rtc_dev)) {
357 dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
358 goto out_rtc;
359 }
360
361 /*
362 * enable internal XO instead of internal 3.25MHz clock since it can
363 * free running in PMIC power-down state.
364 */
365 pm860x_set_bits(info->i2c, PM8607_RTC1, RTC1_USE_XO, RTC1_USE_XO);
366
367#ifdef VRTC_CALIBRATION
368 /* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */
369 if (pdata && pdata->vrtc)
370 info->vrtc = pdata->vrtc & 0x3;
371 else
372 info->vrtc = 1;
373 pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC);
374
375 /* calibrate VRTC */
376 INIT_DELAYED_WORK(&info->calib_work, calibrate_vrtc_work);
377 schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL);
378#endif /* VRTC_CALIBRATION */
379 return 0;
380out_rtc:
381 free_irq(info->irq, info);
382out:
383 kfree(info);
384 return ret;
385}
386
387static int __devexit pm860x_rtc_remove(struct platform_device *pdev)
388{
389 struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
390
391#ifdef VRTC_CALIBRATION
392 flush_scheduled_work();
393 /* disable measurement */
394 pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
395#endif /* VRTC_CALIBRATION */
396
397 platform_set_drvdata(pdev, NULL);
398 rtc_device_unregister(info->rtc_dev);
399 free_irq(info->irq, info);
400 kfree(info);
401 return 0;
402}
403
404static struct platform_driver pm860x_rtc_driver = {
405 .driver = {
406 .name = "88pm860x-rtc",
407 .owner = THIS_MODULE,
408 },
409 .probe = pm860x_rtc_probe,
410 .remove = __devexit_p(pm860x_rtc_remove),
411};
412
413static int __init pm860x_rtc_init(void)
414{
415 return platform_driver_register(&pm860x_rtc_driver);
416}
417module_init(pm860x_rtc_init);
418
419static void __exit pm860x_rtc_exit(void)
420{
421 platform_driver_unregister(&pm860x_rtc_driver);
422}
423module_exit(pm860x_rtc_exit);
424
425MODULE_DESCRIPTION("Marvell 88PM860x RTC driver");
426MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
427MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
new file mode 100644
index 000000000000..d8e1c2578553
--- /dev/null
+++ b/drivers/rtc/rtc-em3027.c
@@ -0,0 +1,161 @@
1/*
2 * An rtc/i2c driver for the EM Microelectronic EM3027
3 * Copyright 2011 CompuLab, Ltd.
4 *
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on rtc-ds1672.c by Alessandro Zummo <a.zummo@towertech.it>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/i2c.h>
15#include <linux/rtc.h>
16#include <linux/bcd.h>
17
18/* Registers */
19#define EM3027_REG_ON_OFF_CTRL 0x00
20#define EM3027_REG_IRQ_CTRL 0x01
21#define EM3027_REG_IRQ_FLAGS 0x02
22#define EM3027_REG_STATUS 0x03
23#define EM3027_REG_RST_CTRL 0x04
24
25#define EM3027_REG_WATCH_SEC 0x08
26#define EM3027_REG_WATCH_MIN 0x09
27#define EM3027_REG_WATCH_HOUR 0x0a
28#define EM3027_REG_WATCH_DATE 0x0b
29#define EM3027_REG_WATCH_DAY 0x0c
30#define EM3027_REG_WATCH_MON 0x0d
31#define EM3027_REG_WATCH_YEAR 0x0e
32
33#define EM3027_REG_ALARM_SEC 0x10
34#define EM3027_REG_ALARM_MIN 0x11
35#define EM3027_REG_ALARM_HOUR 0x12
36#define EM3027_REG_ALARM_DATE 0x13
37#define EM3027_REG_ALARM_DAY 0x14
38#define EM3027_REG_ALARM_MON 0x15
39#define EM3027_REG_ALARM_YEAR 0x16
40
41static struct i2c_driver em3027_driver;
42
43static int em3027_get_time(struct device *dev, struct rtc_time *tm)
44{
45 struct i2c_client *client = to_i2c_client(dev);
46
47 unsigned char addr = EM3027_REG_WATCH_SEC;
48 unsigned char buf[7];
49
50 struct i2c_msg msgs[] = {
51 {client->addr, 0, 1, &addr}, /* setup read addr */
52 {client->addr, I2C_M_RD, 7, buf}, /* read time/date */
53 };
54
55 /* read time/date registers */
56 if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
57 dev_err(&client->dev, "%s: read error\n", __func__);
58 return -EIO;
59 }
60
61 tm->tm_sec = bcd2bin(buf[0]);
62 tm->tm_min = bcd2bin(buf[1]);
63 tm->tm_hour = bcd2bin(buf[2]);
64 tm->tm_mday = bcd2bin(buf[3]);
65 tm->tm_wday = bcd2bin(buf[4]);
66 tm->tm_mon = bcd2bin(buf[5]);
67 tm->tm_year = bcd2bin(buf[6]) + 100;
68
69 return 0;
70}
71
72static int em3027_set_time(struct device *dev, struct rtc_time *tm)
73{
74 struct i2c_client *client = to_i2c_client(dev);
75 unsigned char buf[8];
76
77 struct i2c_msg msg = {
78 client->addr, 0, 8, buf, /* write time/date */
79 };
80
81 buf[0] = EM3027_REG_WATCH_SEC;
82 buf[1] = bin2bcd(tm->tm_sec);
83 buf[2] = bin2bcd(tm->tm_min);
84 buf[3] = bin2bcd(tm->tm_hour);
85 buf[4] = bin2bcd(tm->tm_mday);
86 buf[5] = bin2bcd(tm->tm_wday);
87 buf[6] = bin2bcd(tm->tm_mon);
88 buf[7] = bin2bcd(tm->tm_year % 100);
89
90 /* write time/date registers */
91 if ((i2c_transfer(client->adapter, &msg, 1)) != 1) {
92 dev_err(&client->dev, "%s: write error\n", __func__);
93 return -EIO;
94 }
95
96 return 0;
97}
98
99static const struct rtc_class_ops em3027_rtc_ops = {
100 .read_time = em3027_get_time,
101 .set_time = em3027_set_time,
102};
103
104static int em3027_probe(struct i2c_client *client,
105 const struct i2c_device_id *id)
106{
107 struct rtc_device *rtc;
108
109 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
110 return -ENODEV;
111
112 rtc = rtc_device_register(em3027_driver.driver.name, &client->dev,
113 &em3027_rtc_ops, THIS_MODULE);
114 if (IS_ERR(rtc))
115 return PTR_ERR(rtc);
116
117 i2c_set_clientdata(client, rtc);
118
119 return 0;
120}
121
122static int em3027_remove(struct i2c_client *client)
123{
124 struct rtc_device *rtc = i2c_get_clientdata(client);
125
126 if (rtc)
127 rtc_device_unregister(rtc);
128
129 return 0;
130}
131
132static struct i2c_device_id em3027_id[] = {
133 { "em3027", 0 },
134 { }
135};
136
137static struct i2c_driver em3027_driver = {
138 .driver = {
139 .name = "rtc-em3027",
140 },
141 .probe = &em3027_probe,
142 .remove = &em3027_remove,
143 .id_table = em3027_id,
144};
145
146static int __init em3027_init(void)
147{
148 return i2c_add_driver(&em3027_driver);
149}
150
151static void __exit em3027_exit(void)
152{
153 i2c_del_driver(&em3027_driver);
154}
155
156MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
157MODULE_DESCRIPTION("EM Microelectronic EM3027 RTC driver");
158MODULE_LICENSE("GPL");
159
160module_init(em3027_init);
161module_exit(em3027_exit);
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
new file mode 100644
index 000000000000..1a84b3e227d1
--- /dev/null
+++ b/drivers/rtc/rtc-m41t93.c
@@ -0,0 +1,225 @@
1/*
2 *
3 * Driver for ST M41T93 SPI RTC
4 *
5 * (c) 2010 Nikolaus Voss, Weinmann Medical GmbH
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/bcd.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/rtc.h>
17#include <linux/spi/spi.h>
18
19#define M41T93_REG_SSEC 0
20#define M41T93_REG_ST_SEC 1
21#define M41T93_REG_MIN 2
22#define M41T93_REG_CENT_HOUR 3
23#define M41T93_REG_WDAY 4
24#define M41T93_REG_DAY 5
25#define M41T93_REG_MON 6
26#define M41T93_REG_YEAR 7
27
28
29#define M41T93_REG_ALM_HOUR_HT 0xc
30#define M41T93_REG_FLAGS 0xf
31
32#define M41T93_FLAG_ST (1 << 7)
33#define M41T93_FLAG_OF (1 << 2)
34#define M41T93_FLAG_BL (1 << 4)
35#define M41T93_FLAG_HT (1 << 6)
36
37static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
38{
39 u8 buf[2];
40
41 /* MSB must be '1' to write */
42 buf[0] = addr | 0x80;
43 buf[1] = data;
44
45 return spi_write(spi, buf, sizeof(buf));
46}
47
48static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
49{
50 struct spi_device *spi = to_spi_device(dev);
51 u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */
52 u8 * const data = &buf[1]; /* ptr to first data byte */
53
54 dev_dbg(dev, "%s secs=%d, mins=%d, "
55 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
56 "write", tm->tm_sec, tm->tm_min,
57 tm->tm_hour, tm->tm_mday,
58 tm->tm_mon, tm->tm_year, tm->tm_wday);
59
60 if (tm->tm_year < 100) {
61 dev_warn(&spi->dev, "unsupported date (before 2000-01-01).\n");
62 return -EINVAL;
63 }
64
65 data[M41T93_REG_SSEC] = 0;
66 data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec);
67 data[M41T93_REG_MIN] = bin2bcd(tm->tm_min);
68 data[M41T93_REG_CENT_HOUR] = bin2bcd(tm->tm_hour) |
69 ((tm->tm_year/100-1) << 6);
70 data[M41T93_REG_DAY] = bin2bcd(tm->tm_mday);
71 data[M41T93_REG_WDAY] = bin2bcd(tm->tm_wday + 1);
72 data[M41T93_REG_MON] = bin2bcd(tm->tm_mon + 1);
73 data[M41T93_REG_YEAR] = bin2bcd(tm->tm_year % 100);
74
75 return spi_write(spi, buf, sizeof(buf));
76}
77
78
79static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
80{
81 struct spi_device *spi = to_spi_device(dev);
82 const u8 start_addr = 0;
83 u8 buf[8];
84 int century_after_1900;
85 int tmp;
86 int ret = 0;
87
88 /* Check status of clock. Two states must be considered:
89 1. halt bit (HT) is set: the clock is running but update of readout
90 registers has been disabled due to power failure. This is normal
91 case after poweron. Time is valid after resetting HT bit.
92 2. oscillator fail bit (OF) is set. Oscillator has be stopped and
93 time is invalid:
94 a) OF can be immeditely reset.
95 b) OF cannot be immediately reset: oscillator has to be restarted.
96 */
97 tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
98 if (tmp < 0)
99 return tmp;
100
101 if (tmp & M41T93_FLAG_HT) {
102 dev_dbg(&spi->dev, "HT bit is set, reenable clock update.\n");
103 m41t93_set_reg(spi, M41T93_REG_ALM_HOUR_HT,
104 tmp & ~M41T93_FLAG_HT);
105 }
106
107 tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
108 if (tmp < 0)
109 return tmp;
110
111 if (tmp & M41T93_FLAG_OF) {
112 ret = -EINVAL;
113 dev_warn(&spi->dev, "OF bit is set, resetting.\n");
114 m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
115
116 tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
117 if (tmp < 0)
118 return tmp;
119 else if (tmp & M41T93_FLAG_OF) {
120 u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
121
122 dev_warn(&spi->dev,
123 "OF bit is still set, kickstarting clock.\n");
124 m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
125 reset_osc &= ~M41T93_FLAG_ST;
126 m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
127 }
128 }
129
130 if (tmp & M41T93_FLAG_BL)
131 dev_warn(&spi->dev, "BL bit is set, replace battery.\n");
132
133 /* read actual time/date */
134 tmp = spi_write_then_read(spi, &start_addr, 1, buf, sizeof(buf));
135 if (tmp < 0)
136 return tmp;
137
138 tm->tm_sec = bcd2bin(buf[M41T93_REG_ST_SEC]);
139 tm->tm_min = bcd2bin(buf[M41T93_REG_MIN]);
140 tm->tm_hour = bcd2bin(buf[M41T93_REG_CENT_HOUR] & 0x3f);
141 tm->tm_mday = bcd2bin(buf[M41T93_REG_DAY]);
142 tm->tm_mon = bcd2bin(buf[M41T93_REG_MON]) - 1;
143 tm->tm_wday = bcd2bin(buf[M41T93_REG_WDAY] & 0x0f) - 1;
144
145 century_after_1900 = (buf[M41T93_REG_CENT_HOUR] >> 6) + 1;
146 tm->tm_year = bcd2bin(buf[M41T93_REG_YEAR]) + century_after_1900 * 100;
147
148 dev_dbg(dev, "%s secs=%d, mins=%d, "
149 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
150 "read", tm->tm_sec, tm->tm_min,
151 tm->tm_hour, tm->tm_mday,
152 tm->tm_mon, tm->tm_year, tm->tm_wday);
153
154 return ret < 0 ? ret : rtc_valid_tm(tm);
155}
156
157
158static const struct rtc_class_ops m41t93_rtc_ops = {
159 .read_time = m41t93_get_time,
160 .set_time = m41t93_set_time,
161};
162
163static struct spi_driver m41t93_driver;
164
165static int __devinit m41t93_probe(struct spi_device *spi)
166{
167 struct rtc_device *rtc;
168 int res;
169
170 spi->bits_per_word = 8;
171 spi_setup(spi);
172
173 res = spi_w8r8(spi, M41T93_REG_WDAY);
174 if (res < 0 || (res & 0xf8) != 0) {
175 dev_err(&spi->dev, "not found 0x%x.\n", res);
176 return -ENODEV;
177 }
178
179 rtc = rtc_device_register(m41t93_driver.driver.name,
180 &spi->dev, &m41t93_rtc_ops, THIS_MODULE);
181 if (IS_ERR(rtc))
182 return PTR_ERR(rtc);
183
184 dev_set_drvdata(&spi->dev, rtc);
185
186 return 0;
187}
188
189
190static int __devexit m41t93_remove(struct spi_device *spi)
191{
192 struct rtc_device *rtc = platform_get_drvdata(spi);
193
194 if (rtc)
195 rtc_device_unregister(rtc);
196
197 return 0;
198}
199
200static struct spi_driver m41t93_driver = {
201 .driver = {
202 .name = "rtc-m41t93",
203 .bus = &spi_bus_type,
204 .owner = THIS_MODULE,
205 },
206 .probe = m41t93_probe,
207 .remove = __devexit_p(m41t93_remove),
208};
209
210static __init int m41t93_init(void)
211{
212 return spi_register_driver(&m41t93_driver);
213}
214module_init(m41t93_init);
215
216static __exit void m41t93_exit(void)
217{
218 spi_unregister_driver(&m41t93_driver);
219}
220module_exit(m41t93_exit);
221
222MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
223MODULE_DESCRIPTION("Driver for ST M41T93 SPI RTC");
224MODULE_LICENSE("GPL");
225MODULE_ALIAS("spi:rtc-m41t93");
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index b2f096871a97..0cec5650d56a 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -380,7 +380,7 @@ cleanup1:
380cleanup0: 380cleanup0:
381 dev_set_drvdata(dev, NULL); 381 dev_set_drvdata(dev, NULL);
382 mrst_rtc.dev = NULL; 382 mrst_rtc.dev = NULL;
383 release_region(iomem->start, iomem->end + 1 - iomem->start); 383 release_mem_region(iomem->start, resource_size(iomem));
384 dev_err(dev, "rtc-mrst: unable to initialise\n"); 384 dev_err(dev, "rtc-mrst: unable to initialise\n");
385 return retval; 385 return retval;
386} 386}
@@ -406,7 +406,7 @@ static void __devexit rtc_mrst_do_remove(struct device *dev)
406 mrst->rtc = NULL; 406 mrst->rtc = NULL;
407 407
408 iomem = mrst->iomem; 408 iomem = mrst->iomem;
409 release_region(iomem->start, iomem->end + 1 - iomem->start); 409 release_mem_region(iomem->start, resource_size(iomem));
410 mrst->iomem = NULL; 410 mrst->iomem = NULL;
411 411
412 mrst->dev = NULL; 412 mrst->dev = NULL;
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index d814417bee8c..39e41fbdf08b 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -55,12 +55,6 @@ static const u32 PIE_BIT_DEF[MAX_PIE_NUM][2] = {
55 { MAX_PIE_FREQ, RTC_SAM7_BIT }, 55 { MAX_PIE_FREQ, RTC_SAM7_BIT },
56}; 56};
57 57
58/* Those are the bits from a classic RTC we want to mimic */
59#define RTC_IRQF 0x80 /* any of the following 3 is active */
60#define RTC_PF 0x40 /* Periodic interrupt */
61#define RTC_AF 0x20 /* Alarm interrupt */
62#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
63
64#define MXC_RTC_TIME 0 58#define MXC_RTC_TIME 0
65#define MXC_RTC_ALARM 1 59#define MXC_RTC_ALARM 1
66 60
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index f90c574f9d05..0c423892923c 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -58,7 +58,6 @@ struct pcf50633_time {
58 58
59struct pcf50633_rtc { 59struct pcf50633_rtc {
60 int alarm_enabled; 60 int alarm_enabled;
61 int second_enabled;
62 int alarm_pending; 61 int alarm_pending;
63 62
64 struct pcf50633 *pcf; 63 struct pcf50633 *pcf;
@@ -143,7 +142,7 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
143{ 142{
144 struct pcf50633_rtc *rtc; 143 struct pcf50633_rtc *rtc;
145 struct pcf50633_time pcf_tm; 144 struct pcf50633_time pcf_tm;
146 int second_masked, alarm_masked, ret = 0; 145 int alarm_masked, ret = 0;
147 146
148 rtc = dev_get_drvdata(dev); 147 rtc = dev_get_drvdata(dev);
149 148
@@ -162,11 +161,8 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
162 pcf_tm.time[PCF50633_TI_SEC]); 161 pcf_tm.time[PCF50633_TI_SEC]);
163 162
164 163
165 second_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_SECOND);
166 alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM); 164 alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
167 165
168 if (!second_masked)
169 pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND);
170 if (!alarm_masked) 166 if (!alarm_masked)
171 pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM); 167 pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
172 168
@@ -175,8 +171,6 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
175 PCF50633_TI_EXTENT, 171 PCF50633_TI_EXTENT,
176 &pcf_tm.time[0]); 172 &pcf_tm.time[0]);
177 173
178 if (!second_masked)
179 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND);
180 if (!alarm_masked) 174 if (!alarm_masked)
181 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); 175 pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
182 176
@@ -250,15 +244,8 @@ static void pcf50633_rtc_irq(int irq, void *data)
250{ 244{
251 struct pcf50633_rtc *rtc = data; 245 struct pcf50633_rtc *rtc = data;
252 246
253 switch (irq) { 247 rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
254 case PCF50633_IRQ_ALARM: 248 rtc->alarm_pending = 1;
255 rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
256 rtc->alarm_pending = 1;
257 break;
258 case PCF50633_IRQ_SECOND:
259 rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
260 break;
261 }
262} 249}
263 250
264static int __devinit pcf50633_rtc_probe(struct platform_device *pdev) 251static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
@@ -282,9 +269,6 @@ static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
282 269
283 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, 270 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM,
284 pcf50633_rtc_irq, rtc); 271 pcf50633_rtc_irq, rtc);
285 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_SECOND,
286 pcf50633_rtc_irq, rtc);
287
288 return 0; 272 return 0;
289} 273}
290 274
@@ -295,7 +279,6 @@ static int __devexit pcf50633_rtc_remove(struct platform_device *pdev)
295 rtc = platform_get_drvdata(pdev); 279 rtc = platform_get_drvdata(pdev);
296 280
297 pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM); 281 pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM);
298 pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_SECOND);
299 282
300 rtc_device_unregister(rtc->rtc_dev); 283 rtc_device_unregister(rtc->rtc_dev);
301 kfree(rtc); 284 kfree(rtc);
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
new file mode 100644
index 000000000000..ea09ff211dc6
--- /dev/null
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -0,0 +1,454 @@
1/*
2 * Micro Crystal RV-3029C2 rtc class driver
3 *
4 * Author: Gregory Hermant <gregory.hermant@calao-systems.com>
5 *
6 * based on previously existing rtc class drivers
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * NOTE: Currently this driver only supports the bare minimum for read
13 * and write the RTC and alarms. The extra features provided by this chip
14 * (trickle charger, eeprom, T° compensation) are unavailable.
15 */
16
17#include <linux/module.h>
18#include <linux/i2c.h>
19#include <linux/bcd.h>
20#include <linux/rtc.h>
21
22/* Register map */
23/* control section */
24#define RV3029C2_ONOFF_CTRL 0x00
25#define RV3029C2_IRQ_CTRL 0x01
26#define RV3029C2_IRQ_CTRL_AIE (1 << 0)
27#define RV3029C2_IRQ_FLAGS 0x02
28#define RV3029C2_IRQ_FLAGS_AF (1 << 0)
29#define RV3029C2_STATUS 0x03
30#define RV3029C2_STATUS_VLOW1 (1 << 2)
31#define RV3029C2_STATUS_VLOW2 (1 << 3)
32#define RV3029C2_STATUS_SR (1 << 4)
33#define RV3029C2_STATUS_PON (1 << 5)
34#define RV3029C2_STATUS_EEBUSY (1 << 7)
35#define RV3029C2_RST_CTRL 0x04
36#define RV3029C2_CONTROL_SECTION_LEN 0x05
37
38/* watch section */
39#define RV3029C2_W_SEC 0x08
40#define RV3029C2_W_MINUTES 0x09
41#define RV3029C2_W_HOURS 0x0A
42#define RV3029C2_REG_HR_12_24 (1<<6) /* 24h/12h mode */
43#define RV3029C2_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */
44#define RV3029C2_W_DATE 0x0B
45#define RV3029C2_W_DAYS 0x0C
46#define RV3029C2_W_MONTHS 0x0D
47#define RV3029C2_W_YEARS 0x0E
48#define RV3029C2_WATCH_SECTION_LEN 0x07
49
50/* alarm section */
51#define RV3029C2_A_SC 0x10
52#define RV3029C2_A_MN 0x11
53#define RV3029C2_A_HR 0x12
54#define RV3029C2_A_DT 0x13
55#define RV3029C2_A_DW 0x14
56#define RV3029C2_A_MO 0x15
57#define RV3029C2_A_YR 0x16
58#define RV3029C2_ALARM_SECTION_LEN 0x07
59
60/* timer section */
61#define RV3029C2_TIMER_LOW 0x18
62#define RV3029C2_TIMER_HIGH 0x19
63
64/* temperature section */
65#define RV3029C2_TEMP_PAGE 0x20
66
67/* eeprom data section */
68#define RV3029C2_E2P_EEDATA1 0x28
69#define RV3029C2_E2P_EEDATA2 0x29
70
71/* eeprom control section */
72#define RV3029C2_CONTROL_E2P_EECTRL 0x30
73#define RV3029C2_TRICKLE_1K (1<<0) /* 1K resistance */
74#define RV3029C2_TRICKLE_5K (1<<1) /* 5K resistance */
75#define RV3029C2_TRICKLE_20K (1<<2) /* 20K resistance */
76#define RV3029C2_TRICKLE_80K (1<<3) /* 80K resistance */
77#define RV3029C2_CONTROL_E2P_XTALOFFSET 0x31
78#define RV3029C2_CONTROL_E2P_QCOEF 0x32
79#define RV3029C2_CONTROL_E2P_TURNOVER 0x33
80
81/* user ram section */
82#define RV3029C2_USR1_RAM_PAGE 0x38
83#define RV3029C2_USR1_SECTION_LEN 0x04
84#define RV3029C2_USR2_RAM_PAGE 0x3C
85#define RV3029C2_USR2_SECTION_LEN 0x04
86
87static int
88rv3029c2_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
89 unsigned len)
90{
91 int ret;
92
93 if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
94 (reg + len > RV3029C2_USR1_RAM_PAGE + 8))
95 return -EINVAL;
96
97 ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf);
98 if (ret < 0)
99 return ret;
100 if (ret < len)
101 return -EIO;
102 return 0;
103}
104
105static int
106rv3029c2_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
107 unsigned len)
108{
109 if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
110 (reg + len > RV3029C2_USR1_RAM_PAGE + 8))
111 return -EINVAL;
112
113 return i2c_smbus_write_i2c_block_data(client, reg, len, buf);
114}
115
116static int
117rv3029c2_i2c_get_sr(struct i2c_client *client, u8 *buf)
118{
119 int ret = rv3029c2_i2c_read_regs(client, RV3029C2_STATUS, buf, 1);
120
121 if (ret < 0)
122 return -EIO;
123 dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
124 return 0;
125}
126
127static int
128rv3029c2_i2c_set_sr(struct i2c_client *client, u8 val)
129{
130 u8 buf[1];
131 int sr;
132
133 buf[0] = val;
134 sr = rv3029c2_i2c_write_regs(client, RV3029C2_STATUS, buf, 1);
135 dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
136 if (sr < 0)
137 return -EIO;
138 return 0;
139}
140
141static int
142rv3029c2_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
143{
144 u8 buf[1];
145 int ret;
146 u8 regs[RV3029C2_WATCH_SECTION_LEN] = { 0, };
147
148 ret = rv3029c2_i2c_get_sr(client, buf);
149 if (ret < 0) {
150 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
151 return -EIO;
152 }
153
154 ret = rv3029c2_i2c_read_regs(client, RV3029C2_W_SEC , regs,
155 RV3029C2_WATCH_SECTION_LEN);
156 if (ret < 0) {
157 dev_err(&client->dev, "%s: reading RTC section failed\n",
158 __func__);
159 return ret;
160 }
161
162 tm->tm_sec = bcd2bin(regs[RV3029C2_W_SEC-RV3029C2_W_SEC]);
163 tm->tm_min = bcd2bin(regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC]);
164
165 /* HR field has a more complex interpretation */
166 {
167 const u8 _hr = regs[RV3029C2_W_HOURS-RV3029C2_W_SEC];
168 if (_hr & RV3029C2_REG_HR_12_24) {
169 /* 12h format */
170 tm->tm_hour = bcd2bin(_hr & 0x1f);
171 if (_hr & RV3029C2_REG_HR_PM) /* PM flag set */
172 tm->tm_hour += 12;
173 } else /* 24h format */
174 tm->tm_hour = bcd2bin(_hr & 0x3f);
175 }
176
177 tm->tm_mday = bcd2bin(regs[RV3029C2_W_DATE-RV3029C2_W_SEC]);
178 tm->tm_mon = bcd2bin(regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC]) - 1;
179 tm->tm_year = bcd2bin(regs[RV3029C2_W_YEARS-RV3029C2_W_SEC]) + 100;
180 tm->tm_wday = bcd2bin(regs[RV3029C2_W_DAYS-RV3029C2_W_SEC]) - 1;
181
182 return 0;
183}
184
185static int rv3029c2_rtc_read_time(struct device *dev, struct rtc_time *tm)
186{
187 return rv3029c2_i2c_read_time(to_i2c_client(dev), tm);
188}
189
190static int
191rv3029c2_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
192{
193 struct rtc_time *const tm = &alarm->time;
194 int ret;
195 u8 regs[8];
196
197 ret = rv3029c2_i2c_get_sr(client, regs);
198 if (ret < 0) {
199 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
200 return -EIO;
201 }
202
203 ret = rv3029c2_i2c_read_regs(client, RV3029C2_A_SC, regs,
204 RV3029C2_ALARM_SECTION_LEN);
205
206 if (ret < 0) {
207 dev_err(&client->dev, "%s: reading alarm section failed\n",
208 __func__);
209 return ret;
210 }
211
212 tm->tm_sec = bcd2bin(regs[RV3029C2_A_SC-RV3029C2_A_SC] & 0x7f);
213 tm->tm_min = bcd2bin(regs[RV3029C2_A_MN-RV3029C2_A_SC] & 0x7f);
214 tm->tm_hour = bcd2bin(regs[RV3029C2_A_HR-RV3029C2_A_SC] & 0x3f);
215 tm->tm_mday = bcd2bin(regs[RV3029C2_A_DT-RV3029C2_A_SC] & 0x3f);
216 tm->tm_mon = bcd2bin(regs[RV3029C2_A_MO-RV3029C2_A_SC] & 0x1f) - 1;
217 tm->tm_year = bcd2bin(regs[RV3029C2_A_YR-RV3029C2_A_SC] & 0x7f) + 100;
218 tm->tm_wday = bcd2bin(regs[RV3029C2_A_DW-RV3029C2_A_SC] & 0x07) - 1;
219
220 return 0;
221}
222
223static int
224rv3029c2_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
225{
226 return rv3029c2_i2c_read_alarm(to_i2c_client(dev), alarm);
227}
228
229static int rv3029c2_rtc_i2c_alarm_set_irq(struct i2c_client *client,
230 int enable)
231{
232 int ret;
233 u8 buf[1];
234
235 /* enable AIE irq */
236 ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
237 if (ret < 0) {
238 dev_err(&client->dev, "can't read INT reg\n");
239 return ret;
240 }
241 if (enable)
242 buf[0] |= RV3029C2_IRQ_CTRL_AIE;
243 else
244 buf[0] &= ~RV3029C2_IRQ_CTRL_AIE;
245
246 ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
247 if (ret < 0) {
248 dev_err(&client->dev, "can't set INT reg\n");
249 return ret;
250 }
251
252 return 0;
253}
254
255static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
256 struct rtc_wkalrm *alarm)
257{
258 struct rtc_time *const tm = &alarm->time;
259 int ret;
260 u8 regs[8];
261
262 /*
263 * The clock has an 8 bit wide bcd-coded register (they never learn)
264 * for the year. tm_year is an offset from 1900 and we are interested
265 * in the 2000-2099 range, so any value less than 100 is invalid.
266 */
267 if (tm->tm_year < 100)
268 return -EINVAL;
269
270 ret = rv3029c2_i2c_get_sr(client, regs);
271 if (ret < 0) {
272 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
273 return -EIO;
274 }
275 regs[RV3029C2_A_SC-RV3029C2_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
276 regs[RV3029C2_A_MN-RV3029C2_A_SC] = bin2bcd(tm->tm_min & 0x7f);
277 regs[RV3029C2_A_HR-RV3029C2_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
278 regs[RV3029C2_A_DT-RV3029C2_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
279 regs[RV3029C2_A_MO-RV3029C2_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
280 regs[RV3029C2_A_DW-RV3029C2_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
281 regs[RV3029C2_A_YR-RV3029C2_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
282
283 ret = rv3029c2_i2c_write_regs(client, RV3029C2_A_SC, regs,
284 RV3029C2_ALARM_SECTION_LEN);
285 if (ret < 0)
286 return ret;
287
288 if (alarm->enabled) {
289 u8 buf[1];
290
291 /* clear AF flag */
292 ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_FLAGS,
293 buf, 1);
294 if (ret < 0) {
295 dev_err(&client->dev, "can't read alarm flag\n");
296 return ret;
297 }
298 buf[0] &= ~RV3029C2_IRQ_FLAGS_AF;
299 ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_FLAGS,
300 buf, 1);
301 if (ret < 0) {
302 dev_err(&client->dev, "can't set alarm flag\n");
303 return ret;
304 }
305 /* enable AIE irq */
306 ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
307 if (ret)
308 return ret;
309
310 dev_dbg(&client->dev, "alarm IRQ armed\n");
311 } else {
312 /* disable AIE irq */
313 ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
314 if (ret)
315 return ret;
316
317 dev_dbg(&client->dev, "alarm IRQ disabled\n");
318 }
319
320 return 0;
321}
322
323static int rv3029c2_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
324{
325 return rv3029c2_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
326}
327
328static int
329rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
330{
331 u8 regs[8];
332 int ret;
333
334 /*
335 * The clock has an 8 bit wide bcd-coded register (they never learn)
336 * for the year. tm_year is an offset from 1900 and we are interested
337 * in the 2000-2099 range, so any value less than 100 is invalid.
338 */
339 if (tm->tm_year < 100)
340 return -EINVAL;
341
342 regs[RV3029C2_W_SEC-RV3029C2_W_SEC] = bin2bcd(tm->tm_sec);
343 regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC] = bin2bcd(tm->tm_min);
344 regs[RV3029C2_W_HOURS-RV3029C2_W_SEC] = bin2bcd(tm->tm_hour);
345 regs[RV3029C2_W_DATE-RV3029C2_W_SEC] = bin2bcd(tm->tm_mday);
346 regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC] = bin2bcd(tm->tm_mon+1);
347 regs[RV3029C2_W_DAYS-RV3029C2_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
348 regs[RV3029C2_W_YEARS-RV3029C2_W_SEC] = bin2bcd(tm->tm_year - 100);
349
350 ret = rv3029c2_i2c_write_regs(client, RV3029C2_W_SEC, regs,
351 RV3029C2_WATCH_SECTION_LEN);
352 if (ret < 0)
353 return ret;
354
355 ret = rv3029c2_i2c_get_sr(client, regs);
356 if (ret < 0) {
357 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
358 return ret;
359 }
360 /* clear PON bit */
361 ret = rv3029c2_i2c_set_sr(client, (regs[0] & ~RV3029C2_STATUS_PON));
362 if (ret < 0) {
363 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
364 return ret;
365 }
366
367 return 0;
368}
369
370static int rv3029c2_rtc_set_time(struct device *dev, struct rtc_time *tm)
371{
372 return rv3029c2_i2c_set_time(to_i2c_client(dev), tm);
373}
374
375static const struct rtc_class_ops rv3029c2_rtc_ops = {
376 .read_time = rv3029c2_rtc_read_time,
377 .set_time = rv3029c2_rtc_set_time,
378 .read_alarm = rv3029c2_rtc_read_alarm,
379 .set_alarm = rv3029c2_rtc_set_alarm,
380};
381
382static struct i2c_device_id rv3029c2_id[] = {
383 { "rv3029c2", 0 },
384 { }
385};
386MODULE_DEVICE_TABLE(i2c, rv3029c2_id);
387
388static int __devinit
389rv3029c2_probe(struct i2c_client *client, const struct i2c_device_id *id)
390{
391 struct rtc_device *rtc;
392 int rc = 0;
393 u8 buf[1];
394
395 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL))
396 return -ENODEV;
397
398 rtc = rtc_device_register(client->name,
399 &client->dev, &rv3029c2_rtc_ops,
400 THIS_MODULE);
401
402 if (IS_ERR(rtc))
403 return PTR_ERR(rtc);
404
405 i2c_set_clientdata(client, rtc);
406
407 rc = rv3029c2_i2c_get_sr(client, buf);
408 if (rc < 0) {
409 dev_err(&client->dev, "reading status failed\n");
410 goto exit_unregister;
411 }
412
413 return 0;
414
415exit_unregister:
416 rtc_device_unregister(rtc);
417
418 return rc;
419}
420
421static int __devexit rv3029c2_remove(struct i2c_client *client)
422{
423 struct rtc_device *rtc = i2c_get_clientdata(client);
424
425 rtc_device_unregister(rtc);
426
427 return 0;
428}
429
430static struct i2c_driver rv3029c2_driver = {
431 .driver = {
432 .name = "rtc-rv3029c2",
433 },
434 .probe = rv3029c2_probe,
435 .remove = __devexit_p(rv3029c2_remove),
436 .id_table = rv3029c2_id,
437};
438
439static int __init rv3029c2_init(void)
440{
441 return i2c_add_driver(&rv3029c2_driver);
442}
443
444static void __exit rv3029c2_exit(void)
445{
446 i2c_del_driver(&rv3029c2_driver);
447}
448
449module_init(rv3029c2_init);
450module_exit(rv3029c2_exit);
451
452MODULE_AUTHOR("Gregory Hermant <gregory.hermant@calao-systems.com>");
453MODULE_DESCRIPTION("Micro Crystal RV3029C2 RTC driver");
454MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
new file mode 100644
index 000000000000..893bac2bb21b
--- /dev/null
+++ b/drivers/rtc/rtc-spear.c
@@ -0,0 +1,534 @@
1/*
2 * drivers/rtc/rtc-spear.c
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Rajeev Kumar<rajeev-dlh.kumar@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/bcd.h>
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/rtc.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23
24/* RTC registers */
25#define TIME_REG 0x00
26#define DATE_REG 0x04
27#define ALARM_TIME_REG 0x08
28#define ALARM_DATE_REG 0x0C
29#define CTRL_REG 0x10
30#define STATUS_REG 0x14
31
32/* TIME_REG & ALARM_TIME_REG */
33#define SECONDS_UNITS (0xf<<0) /* seconds units position */
34#define SECONDS_TENS (0x7<<4) /* seconds tens position */
35#define MINUTES_UNITS (0xf<<8) /* minutes units position */
36#define MINUTES_TENS (0x7<<12) /* minutes tens position */
37#define HOURS_UNITS (0xf<<16) /* hours units position */
38#define HOURS_TENS (0x3<<20) /* hours tens position */
39
40/* DATE_REG & ALARM_DATE_REG */
41#define DAYS_UNITS (0xf<<0) /* days units position */
42#define DAYS_TENS (0x3<<4) /* days tens position */
43#define MONTHS_UNITS (0xf<<8) /* months units position */
44#define MONTHS_TENS (0x1<<12) /* months tens position */
45#define YEARS_UNITS (0xf<<16) /* years units position */
46#define YEARS_TENS (0xf<<20) /* years tens position */
47#define YEARS_HUNDREDS (0xf<<24) /* years hundereds position */
48#define YEARS_MILLENIUMS (0xf<<28) /* years millenium position */
49
50/* MASK SHIFT TIME_REG & ALARM_TIME_REG*/
51#define SECOND_SHIFT 0x00 /* seconds units */
52#define MINUTE_SHIFT 0x08 /* minutes units position */
53#define HOUR_SHIFT 0x10 /* hours units position */
54#define MDAY_SHIFT 0x00 /* Month day shift */
55#define MONTH_SHIFT 0x08 /* Month shift */
56#define YEAR_SHIFT 0x10 /* Year shift */
57
58#define SECOND_MASK 0x7F
59#define MIN_MASK 0x7F
60#define HOUR_MASK 0x3F
61#define DAY_MASK 0x3F
62#define MONTH_MASK 0x7F
63#define YEAR_MASK 0xFFFF
64
65/* date reg equal to time reg, for debug only */
66#define TIME_BYP (1<<9)
67#define INT_ENABLE (1<<31) /* interrupt enable */
68
69/* STATUS_REG */
70#define CLK_UNCONNECTED (1<<0)
71#define PEND_WR_TIME (1<<2)
72#define PEND_WR_DATE (1<<3)
73#define LOST_WR_TIME (1<<4)
74#define LOST_WR_DATE (1<<5)
75#define RTC_INT_MASK (1<<31)
76#define STATUS_BUSY (PEND_WR_TIME | PEND_WR_DATE)
77#define STATUS_FAIL (LOST_WR_TIME | LOST_WR_DATE)
78
79struct spear_rtc_config {
80 struct clk *clk;
81 spinlock_t lock;
82 void __iomem *ioaddr;
83};
84
85static inline void spear_rtc_clear_interrupt(struct spear_rtc_config *config)
86{
87 unsigned int val;
88 unsigned long flags;
89
90 spin_lock_irqsave(&config->lock, flags);
91 val = readl(config->ioaddr + STATUS_REG);
92 val |= RTC_INT_MASK;
93 writel(val, config->ioaddr + STATUS_REG);
94 spin_unlock_irqrestore(&config->lock, flags);
95}
96
97static inline void spear_rtc_enable_interrupt(struct spear_rtc_config *config)
98{
99 unsigned int val;
100
101 val = readl(config->ioaddr + CTRL_REG);
102 if (!(val & INT_ENABLE)) {
103 spear_rtc_clear_interrupt(config);
104 val |= INT_ENABLE;
105 writel(val, config->ioaddr + CTRL_REG);
106 }
107}
108
109static inline void spear_rtc_disable_interrupt(struct spear_rtc_config *config)
110{
111 unsigned int val;
112
113 val = readl(config->ioaddr + CTRL_REG);
114 if (val & INT_ENABLE) {
115 val &= ~INT_ENABLE;
116 writel(val, config->ioaddr + CTRL_REG);
117 }
118}
119
120static inline int is_write_complete(struct spear_rtc_config *config)
121{
122 int ret = 0;
123 unsigned long flags;
124
125 spin_lock_irqsave(&config->lock, flags);
126 if ((readl(config->ioaddr + STATUS_REG)) & STATUS_FAIL)
127 ret = -EIO;
128 spin_unlock_irqrestore(&config->lock, flags);
129
130 return ret;
131}
132
133static void rtc_wait_not_busy(struct spear_rtc_config *config)
134{
135 int status, count = 0;
136 unsigned long flags;
137
138 /* Assuming BUSY may stay active for 80 msec) */
139 for (count = 0; count < 80; count++) {
140 spin_lock_irqsave(&config->lock, flags);
141 status = readl(config->ioaddr + STATUS_REG);
142 spin_unlock_irqrestore(&config->lock, flags);
143 if ((status & STATUS_BUSY) == 0)
144 break;
145 /* check status busy, after each msec */
146 msleep(1);
147 }
148}
149
150static irqreturn_t spear_rtc_irq(int irq, void *dev_id)
151{
152 struct rtc_device *rtc = (struct rtc_device *)dev_id;
153 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
154 unsigned long flags, events = 0;
155 unsigned int irq_data;
156
157 spin_lock_irqsave(&config->lock, flags);
158 irq_data = readl(config->ioaddr + STATUS_REG);
159 spin_unlock_irqrestore(&config->lock, flags);
160
161 if ((irq_data & RTC_INT_MASK)) {
162 spear_rtc_clear_interrupt(config);
163 events = RTC_IRQF | RTC_AF;
164 rtc_update_irq(rtc, 1, events);
165 return IRQ_HANDLED;
166 } else
167 return IRQ_NONE;
168
169}
170
171static int tm2bcd(struct rtc_time *tm)
172{
173 if (rtc_valid_tm(tm) != 0)
174 return -EINVAL;
175 tm->tm_sec = bin2bcd(tm->tm_sec);
176 tm->tm_min = bin2bcd(tm->tm_min);
177 tm->tm_hour = bin2bcd(tm->tm_hour);
178 tm->tm_mday = bin2bcd(tm->tm_mday);
179 tm->tm_mon = bin2bcd(tm->tm_mon + 1);
180 tm->tm_year = bin2bcd(tm->tm_year);
181
182 return 0;
183}
184
185static void bcd2tm(struct rtc_time *tm)
186{
187 tm->tm_sec = bcd2bin(tm->tm_sec);
188 tm->tm_min = bcd2bin(tm->tm_min);
189 tm->tm_hour = bcd2bin(tm->tm_hour);
190 tm->tm_mday = bcd2bin(tm->tm_mday);
191 tm->tm_mon = bcd2bin(tm->tm_mon) - 1;
192 /* epoch == 1900 */
193 tm->tm_year = bcd2bin(tm->tm_year);
194}
195
196/*
197 * spear_rtc_read_time - set the time
198 * @dev: rtc device in use
199 * @tm: holds date and time
200 *
201 * This function read time and date. On success it will return 0
202 * otherwise -ve error is returned.
203 */
204static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm)
205{
206 struct platform_device *pdev = to_platform_device(dev);
207 struct rtc_device *rtc = platform_get_drvdata(pdev);
208 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
209 unsigned int time, date;
210
211 /* we don't report wday/yday/isdst ... */
212 rtc_wait_not_busy(config);
213
214 time = readl(config->ioaddr + TIME_REG);
215 date = readl(config->ioaddr + DATE_REG);
216 tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
217 tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
218 tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
219 tm->tm_mday = (date >> MDAY_SHIFT) & DAY_MASK;
220 tm->tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK;
221 tm->tm_year = (date >> YEAR_SHIFT) & YEAR_MASK;
222
223 bcd2tm(tm);
224 return 0;
225}
226
227/*
228 * spear_rtc_set_time - set the time
229 * @dev: rtc device in use
230 * @tm: holds date and time
231 *
232 * This function set time and date. On success it will return 0
233 * otherwise -ve error is returned.
234 */
235static int spear_rtc_set_time(struct device *dev, struct rtc_time *tm)
236{
237 struct platform_device *pdev = to_platform_device(dev);
238 struct rtc_device *rtc = platform_get_drvdata(pdev);
239 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
240 unsigned int time, date, err = 0;
241
242 if (tm2bcd(tm) < 0)
243 return -EINVAL;
244
245 rtc_wait_not_busy(config);
246 time = (tm->tm_sec << SECOND_SHIFT) | (tm->tm_min << MINUTE_SHIFT) |
247 (tm->tm_hour << HOUR_SHIFT);
248 date = (tm->tm_mday << MDAY_SHIFT) | (tm->tm_mon << MONTH_SHIFT) |
249 (tm->tm_year << YEAR_SHIFT);
250 writel(time, config->ioaddr + TIME_REG);
251 writel(date, config->ioaddr + DATE_REG);
252 err = is_write_complete(config);
253 if (err < 0)
254 return err;
255
256 return 0;
257}
258
259/*
260 * spear_rtc_read_alarm - read the alarm time
261 * @dev: rtc device in use
262 * @alm: holds alarm date and time
263 *
264 * This function read alarm time and date. On success it will return 0
265 * otherwise -ve error is returned.
266 */
267static int spear_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
268{
269 struct platform_device *pdev = to_platform_device(dev);
270 struct rtc_device *rtc = platform_get_drvdata(pdev);
271 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
272 unsigned int time, date;
273
274 rtc_wait_not_busy(config);
275
276 time = readl(config->ioaddr + ALARM_TIME_REG);
277 date = readl(config->ioaddr + ALARM_DATE_REG);
278 alm->time.tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
279 alm->time.tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
280 alm->time.tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
281 alm->time.tm_mday = (date >> MDAY_SHIFT) & DAY_MASK;
282 alm->time.tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK;
283 alm->time.tm_year = (date >> YEAR_SHIFT) & YEAR_MASK;
284
285 bcd2tm(&alm->time);
286 alm->enabled = readl(config->ioaddr + CTRL_REG) & INT_ENABLE;
287
288 return 0;
289}
290
291/*
292 * spear_rtc_set_alarm - set the alarm time
293 * @dev: rtc device in use
294 * @alm: holds alarm date and time
295 *
296 * This function set alarm time and date. On success it will return 0
297 * otherwise -ve error is returned.
298 */
299static int spear_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
300{
301 struct platform_device *pdev = to_platform_device(dev);
302 struct rtc_device *rtc = platform_get_drvdata(pdev);
303 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
304 unsigned int time, date, err = 0;
305
306 if (tm2bcd(&alm->time) < 0)
307 return -EINVAL;
308
309 rtc_wait_not_busy(config);
310
311 time = (alm->time.tm_sec << SECOND_SHIFT) | (alm->time.tm_min <<
312 MINUTE_SHIFT) | (alm->time.tm_hour << HOUR_SHIFT);
313 date = (alm->time.tm_mday << MDAY_SHIFT) | (alm->time.tm_mon <<
314 MONTH_SHIFT) | (alm->time.tm_year << YEAR_SHIFT);
315
316 writel(time, config->ioaddr + ALARM_TIME_REG);
317 writel(date, config->ioaddr + ALARM_DATE_REG);
318 err = is_write_complete(config);
319 if (err < 0)
320 return err;
321
322 if (alm->enabled)
323 spear_rtc_enable_interrupt(config);
324 else
325 spear_rtc_disable_interrupt(config);
326
327 return 0;
328}
329static struct rtc_class_ops spear_rtc_ops = {
330 .read_time = spear_rtc_read_time,
331 .set_time = spear_rtc_set_time,
332 .read_alarm = spear_rtc_read_alarm,
333 .set_alarm = spear_rtc_set_alarm,
334};
335
336static int __devinit spear_rtc_probe(struct platform_device *pdev)
337{
338 struct resource *res;
339 struct rtc_device *rtc;
340 struct spear_rtc_config *config;
341 unsigned int status = 0;
342 int irq;
343
344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
345 if (!res) {
346 dev_err(&pdev->dev, "no resource defined\n");
347 return -EBUSY;
348 }
349 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
350 dev_err(&pdev->dev, "rtc region already claimed\n");
351 return -EBUSY;
352 }
353
354 config = kzalloc(sizeof(*config), GFP_KERNEL);
355 if (!config) {
356 dev_err(&pdev->dev, "out of memory\n");
357 status = -ENOMEM;
358 goto err_release_region;
359 }
360
361 config->clk = clk_get(&pdev->dev, NULL);
362 if (IS_ERR(config->clk)) {
363 status = PTR_ERR(config->clk);
364 goto err_kfree;
365 }
366
367 status = clk_enable(config->clk);
368 if (status < 0)
369 goto err_clk_put;
370
371 config->ioaddr = ioremap(res->start, resource_size(res));
372 if (!config->ioaddr) {
373 dev_err(&pdev->dev, "ioremap fail\n");
374 status = -ENOMEM;
375 goto err_disable_clock;
376 }
377
378 spin_lock_init(&config->lock);
379
380 rtc = rtc_device_register(pdev->name, &pdev->dev, &spear_rtc_ops,
381 THIS_MODULE);
382 if (IS_ERR(rtc)) {
383 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
384 PTR_ERR(rtc));
385 status = PTR_ERR(rtc);
386 goto err_iounmap;
387 }
388
389 platform_set_drvdata(pdev, rtc);
390 dev_set_drvdata(&rtc->dev, config);
391
392 /* alarm irqs */
393 irq = platform_get_irq(pdev, 0);
394 if (irq < 0) {
395 dev_err(&pdev->dev, "no update irq?\n");
396 status = irq;
397 goto err_clear_platdata;
398 }
399
400 status = request_irq(irq, spear_rtc_irq, 0, pdev->name, rtc);
401 if (status) {
402 dev_err(&pdev->dev, "Alarm interrupt IRQ%d already \
403 claimed\n", irq);
404 goto err_clear_platdata;
405 }
406
407 if (!device_can_wakeup(&pdev->dev))
408 device_init_wakeup(&pdev->dev, 1);
409
410 return 0;
411
412err_clear_platdata:
413 platform_set_drvdata(pdev, NULL);
414 dev_set_drvdata(&rtc->dev, NULL);
415 rtc_device_unregister(rtc);
416err_iounmap:
417 iounmap(config->ioaddr);
418err_disable_clock:
419 clk_disable(config->clk);
420err_clk_put:
421 clk_put(config->clk);
422err_kfree:
423 kfree(config);
424err_release_region:
425 release_mem_region(res->start, resource_size(res));
426
427 return status;
428}
429
430static int __devexit spear_rtc_remove(struct platform_device *pdev)
431{
432 struct rtc_device *rtc = platform_get_drvdata(pdev);
433 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
434 int irq;
435 struct resource *res;
436
437 /* leave rtc running, but disable irqs */
438 spear_rtc_disable_interrupt(config);
439 device_init_wakeup(&pdev->dev, 0);
440 irq = platform_get_irq(pdev, 0);
441 if (irq)
442 free_irq(irq, pdev);
443 clk_disable(config->clk);
444 clk_put(config->clk);
445 iounmap(config->ioaddr);
446 kfree(config);
447 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
448 if (res)
449 release_mem_region(res->start, resource_size(res));
450 platform_set_drvdata(pdev, NULL);
451 dev_set_drvdata(&rtc->dev, NULL);
452 rtc_device_unregister(rtc);
453
454 return 0;
455}
456
457#ifdef CONFIG_PM
458
459static int spear_rtc_suspend(struct platform_device *pdev, pm_message_t state)
460{
461 struct rtc_device *rtc = platform_get_drvdata(pdev);
462 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
463 int irq;
464
465 irq = platform_get_irq(pdev, 0);
466 if (device_may_wakeup(&pdev->dev))
467 enable_irq_wake(irq);
468 else {
469 spear_rtc_disable_interrupt(config);
470 clk_disable(config->clk);
471 }
472
473 return 0;
474}
475
476static int spear_rtc_resume(struct platform_device *pdev)
477{
478 struct rtc_device *rtc = platform_get_drvdata(pdev);
479 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
480 int irq;
481
482 irq = platform_get_irq(pdev, 0);
483
484 if (device_may_wakeup(&pdev->dev))
485 disable_irq_wake(irq);
486 else {
487 clk_enable(config->clk);
488 spear_rtc_enable_interrupt(config);
489 }
490
491 return 0;
492}
493
494#else
495#define spear_rtc_suspend NULL
496#define spear_rtc_resume NULL
497#endif
498
499static void spear_rtc_shutdown(struct platform_device *pdev)
500{
501 struct rtc_device *rtc = platform_get_drvdata(pdev);
502 struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
503
504 spear_rtc_disable_interrupt(config);
505 clk_disable(config->clk);
506}
507
508static struct platform_driver spear_rtc_driver = {
509 .probe = spear_rtc_probe,
510 .remove = __devexit_p(spear_rtc_remove),
511 .suspend = spear_rtc_suspend,
512 .resume = spear_rtc_resume,
513 .shutdown = spear_rtc_shutdown,
514 .driver = {
515 .name = "rtc-spear",
516 },
517};
518
519static int __init rtc_init(void)
520{
521 return platform_driver_register(&spear_rtc_driver);
522}
523module_init(rtc_init);
524
525static void __exit rtc_exit(void)
526{
527 platform_driver_unregister(&spear_rtc_driver);
528}
529module_exit(rtc_exit);
530
531MODULE_ALIAS("platform:rtc-spear");
532MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
533MODULE_DESCRIPTION("ST SPEAr Realtime Clock Driver (RTC)");
534MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c
new file mode 100644
index 000000000000..eb65dafee66e
--- /dev/null
+++ b/drivers/rtc/rtc-tile.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Tilera-specific RTC driver.
15 */
16
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/rtc.h>
20#include <linux/platform_device.h>
21
22/* Platform device pointer. */
23static struct platform_device *tile_rtc_platform_device;
24
25/*
26 * RTC read routine. Gets time info from RTC chip via hypervisor syscall.
27 */
28static int read_rtc_time(struct device *dev, struct rtc_time *tm)
29{
30 HV_RTCTime hvtm = hv_get_rtc();
31
32 tm->tm_sec = hvtm.tm_sec;
33 tm->tm_min = hvtm.tm_min;
34 tm->tm_hour = hvtm.tm_hour;
35 tm->tm_mday = hvtm.tm_mday;
36 tm->tm_mon = hvtm.tm_mon;
37 tm->tm_year = hvtm.tm_year;
38 tm->tm_wday = 0;
39 tm->tm_yday = 0;
40 tm->tm_isdst = 0;
41
42 if (rtc_valid_tm(tm) < 0)
43 dev_warn(dev, "Read invalid date/time from RTC\n");
44
45 return 0;
46}
47
48/*
49 * RTC write routine. Sends time info to hypervisor via syscall, to be
50 * written to RTC chip.
51 */
52static int set_rtc_time(struct device *dev, struct rtc_time *tm)
53{
54 HV_RTCTime hvtm;
55
56 hvtm.tm_sec = tm->tm_sec;
57 hvtm.tm_min = tm->tm_min;
58 hvtm.tm_hour = tm->tm_hour;
59 hvtm.tm_mday = tm->tm_mday;
60 hvtm.tm_mon = tm->tm_mon;
61 hvtm.tm_year = tm->tm_year;
62
63 hv_set_rtc(hvtm);
64
65 return 0;
66}
67
68/*
69 * RTC read/write ops.
70 */
71static const struct rtc_class_ops tile_rtc_ops = {
72 .read_time = read_rtc_time,
73 .set_time = set_rtc_time,
74};
75
76/*
77 * Device probe routine.
78 */
79static int __devinit tile_rtc_probe(struct platform_device *dev)
80{
81 struct rtc_device *rtc;
82
83 rtc = rtc_device_register("tile",
84 &dev->dev, &tile_rtc_ops, THIS_MODULE);
85
86 if (IS_ERR(rtc))
87 return PTR_ERR(rtc);
88
89 platform_set_drvdata(dev, rtc);
90
91 return 0;
92}
93
94/*
95 * Device cleanup routine.
96 */
97static int __devexit tile_rtc_remove(struct platform_device *dev)
98{
99 struct rtc_device *rtc = platform_get_drvdata(dev);
100
101 if (rtc)
102 rtc_device_unregister(rtc);
103
104 platform_set_drvdata(dev, NULL);
105
106 return 0;
107}
108
109static struct platform_driver tile_rtc_platform_driver = {
110 .driver = {
111 .name = "rtc-tile",
112 .owner = THIS_MODULE,
113 },
114 .probe = tile_rtc_probe,
115 .remove = __devexit_p(tile_rtc_remove),
116};
117
118/*
119 * Driver init routine.
120 */
121static int __init tile_rtc_driver_init(void)
122{
123 int err;
124
125 err = platform_driver_register(&tile_rtc_platform_driver);
126 if (err)
127 return err;
128
129 tile_rtc_platform_device = platform_device_alloc("rtc-tile", 0);
130 if (tile_rtc_platform_device == NULL) {
131 err = -ENOMEM;
132 goto exit_driver_unregister;
133 }
134
135 err = platform_device_add(tile_rtc_platform_device);
136 if (err)
137 goto exit_device_put;
138
139 return 0;
140
141exit_device_put:
142 platform_device_put(tile_rtc_platform_device);
143
144exit_driver_unregister:
145 platform_driver_unregister(&tile_rtc_platform_driver);
146 return err;
147}
148
149/*
150 * Driver cleanup routine.
151 */
152static void __exit tile_rtc_driver_exit(void)
153{
154 platform_driver_unregister(&tile_rtc_platform_driver);
155}
156
157module_init(tile_rtc_driver_init);
158module_exit(tile_rtc_driver_exit);
159
160MODULE_DESCRIPTION("Tilera-specific Real Time Clock Driver");
161MODULE_LICENSE("GPL");
162MODULE_ALIAS("platform:rtc-tile");
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
new file mode 100644
index 000000000000..b8bc862903ae
--- /dev/null
+++ b/drivers/rtc/rtc-vt8500.c
@@ -0,0 +1,366 @@
1/*
2 * drivers/rtc/rtc-vt8500.c
3 *
4 * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
5 *
6 * Based on rtc-pxa.c
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/module.h>
19#include <linux/rtc.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/bcd.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27/*
28 * Register definitions
29 */
30#define VT8500_RTC_TS 0x00 /* Time set */
31#define VT8500_RTC_DS 0x04 /* Date set */
32#define VT8500_RTC_AS 0x08 /* Alarm set */
33#define VT8500_RTC_CR 0x0c /* Control */
34#define VT8500_RTC_TR 0x10 /* Time read */
35#define VT8500_RTC_DR 0x14 /* Date read */
36#define VT8500_RTC_WS 0x18 /* Write status */
37#define VT8500_RTC_CL 0x20 /* Calibration */
38#define VT8500_RTC_IS 0x24 /* Interrupt status */
39#define VT8500_RTC_ST 0x28 /* Status */
40
41#define INVALID_TIME_BIT (1 << 31)
42
43#define DATE_CENTURY_S 19
44#define DATE_YEAR_S 11
45#define DATE_YEAR_MASK (0xff << DATE_YEAR_S)
46#define DATE_MONTH_S 6
47#define DATE_MONTH_MASK (0x1f << DATE_MONTH_S)
48#define DATE_DAY_MASK 0x3f
49
50#define TIME_DOW_S 20
51#define TIME_DOW_MASK (0x07 << TIME_DOW_S)
52#define TIME_HOUR_S 14
53#define TIME_HOUR_MASK (0x3f << TIME_HOUR_S)
54#define TIME_MIN_S 7
55#define TIME_MIN_MASK (0x7f << TIME_MIN_S)
56#define TIME_SEC_MASK 0x7f
57
58#define ALARM_DAY_S 20
59#define ALARM_DAY_MASK (0x3f << ALARM_DAY_S)
60
61#define ALARM_DAY_BIT (1 << 29)
62#define ALARM_HOUR_BIT (1 << 28)
63#define ALARM_MIN_BIT (1 << 27)
64#define ALARM_SEC_BIT (1 << 26)
65
66#define ALARM_ENABLE_MASK (ALARM_DAY_BIT \
67 | ALARM_HOUR_BIT \
68 | ALARM_MIN_BIT \
69 | ALARM_SEC_BIT)
70
71#define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */
72#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */
73#define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */
74#define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */
75#define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */
76
77struct vt8500_rtc {
78 void __iomem *regbase;
79 struct resource *res;
80 int irq_alarm;
81 int irq_hz;
82 struct rtc_device *rtc;
83 spinlock_t lock; /* Protects this structure */
84};
85
86static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
87{
88 struct vt8500_rtc *vt8500_rtc = dev_id;
89 u32 isr;
90 unsigned long events = 0;
91
92 spin_lock(&vt8500_rtc->lock);
93
94 /* clear interrupt sources */
95 isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS);
96 writel(isr, vt8500_rtc->regbase + VT8500_RTC_IS);
97
98 spin_unlock(&vt8500_rtc->lock);
99
100 if (isr & 1)
101 events |= RTC_AF | RTC_IRQF;
102
103 /* Only second/minute interrupts are supported */
104 if (isr & 2)
105 events |= RTC_UF | RTC_IRQF;
106
107 rtc_update_irq(vt8500_rtc->rtc, 1, events);
108
109 return IRQ_HANDLED;
110}
111
112static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
113{
114 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
115 u32 date, time;
116
117 date = readl(vt8500_rtc->regbase + VT8500_RTC_DR);
118 time = readl(vt8500_rtc->regbase + VT8500_RTC_TR);
119
120 tm->tm_sec = bcd2bin(time & TIME_SEC_MASK);
121 tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S);
122 tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S);
123 tm->tm_mday = bcd2bin(date & DATE_DAY_MASK);
124 tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S);
125 tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S)
126 + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100);
127 tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S;
128
129 return 0;
130}
131
132static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
133{
134 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
135
136 if (tm->tm_year < 100) {
137 dev_warn(dev, "Only years 2000-2199 are supported by the "
138 "hardware!\n");
139 return -EINVAL;
140 }
141
142 writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
143 | (bin2bcd(tm->tm_mon) << DATE_MONTH_S)
144 | (bin2bcd(tm->tm_mday)),
145 vt8500_rtc->regbase + VT8500_RTC_DS);
146 writel((bin2bcd(tm->tm_wday) << TIME_DOW_S)
147 | (bin2bcd(tm->tm_hour) << TIME_HOUR_S)
148 | (bin2bcd(tm->tm_min) << TIME_MIN_S)
149 | (bin2bcd(tm->tm_sec)),
150 vt8500_rtc->regbase + VT8500_RTC_TS);
151
152 return 0;
153}
154
155static int vt8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
156{
157 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
158 u32 isr, alarm;
159
160 alarm = readl(vt8500_rtc->regbase + VT8500_RTC_AS);
161 isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS);
162
163 alrm->time.tm_mday = bcd2bin((alarm & ALARM_DAY_MASK) >> ALARM_DAY_S);
164 alrm->time.tm_hour = bcd2bin((alarm & TIME_HOUR_MASK) >> TIME_HOUR_S);
165 alrm->time.tm_min = bcd2bin((alarm & TIME_MIN_MASK) >> TIME_MIN_S);
166 alrm->time.tm_sec = bcd2bin((alarm & TIME_SEC_MASK));
167
168 alrm->enabled = (alarm & ALARM_ENABLE_MASK) ? 1 : 0;
169
170 alrm->pending = (isr & 1) ? 1 : 0;
171 return rtc_valid_tm(&alrm->time);
172}
173
174static int vt8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
175{
176 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
177
178 writel((alrm->enabled ? ALARM_ENABLE_MASK : 0)
179 | (bin2bcd(alrm->time.tm_mday) << ALARM_DAY_S)
180 | (bin2bcd(alrm->time.tm_hour) << TIME_HOUR_S)
181 | (bin2bcd(alrm->time.tm_min) << TIME_MIN_S)
182 | (bin2bcd(alrm->time.tm_sec)),
183 vt8500_rtc->regbase + VT8500_RTC_AS);
184
185 return 0;
186}
187
188static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
189{
190 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
191 unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_AS);
192
193 if (enabled)
194 tmp |= ALARM_ENABLE_MASK;
195 else
196 tmp &= ~ALARM_ENABLE_MASK;
197
198 writel(tmp, vt8500_rtc->regbase + VT8500_RTC_AS);
199 return 0;
200}
201
202static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
203{
204 struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
205 unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
206
207 if (enabled)
208 tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
209 else
210 tmp &= ~VT8500_RTC_CR_SM_ENABLE;
211
212 writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
213 return 0;
214}
215
216static const struct rtc_class_ops vt8500_rtc_ops = {
217 .read_time = vt8500_rtc_read_time,
218 .set_time = vt8500_rtc_set_time,
219 .read_alarm = vt8500_rtc_read_alarm,
220 .set_alarm = vt8500_rtc_set_alarm,
221 .alarm_irq_enable = vt8500_alarm_irq_enable,
222 .update_irq_enable = vt8500_update_irq_enable,
223};
224
225static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
226{
227 struct vt8500_rtc *vt8500_rtc;
228 int ret;
229
230 vt8500_rtc = kzalloc(sizeof(struct vt8500_rtc), GFP_KERNEL);
231 if (!vt8500_rtc)
232 return -ENOMEM;
233
234 spin_lock_init(&vt8500_rtc->lock);
235 platform_set_drvdata(pdev, vt8500_rtc);
236
237 vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
238 if (!vt8500_rtc->res) {
239 dev_err(&pdev->dev, "No I/O memory resource defined\n");
240 ret = -ENXIO;
241 goto err_free;
242 }
243
244 vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0);
245 if (vt8500_rtc->irq_alarm < 0) {
246 dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
247 ret = -ENXIO;
248 goto err_free;
249 }
250
251 vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
252 if (vt8500_rtc->irq_hz < 0) {
253 dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
254 ret = -ENXIO;
255 goto err_free;
256 }
257
258 vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
259 resource_size(vt8500_rtc->res),
260 "vt8500-rtc");
261 if (vt8500_rtc->res == NULL) {
262 dev_err(&pdev->dev, "failed to request I/O memory\n");
263 ret = -EBUSY;
264 goto err_free;
265 }
266
267 vt8500_rtc->regbase = ioremap(vt8500_rtc->res->start,
268 resource_size(vt8500_rtc->res));
269 if (!vt8500_rtc->regbase) {
270 dev_err(&pdev->dev, "Unable to map RTC I/O memory\n");
271 ret = -EBUSY;
272 goto err_release;
273 }
274
275 /* Enable the second/minute interrupt generation and enable RTC */
276 writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H
277 | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
278 vt8500_rtc->regbase + VT8500_RTC_CR);
279
280 vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
281 &vt8500_rtc_ops, THIS_MODULE);
282 if (IS_ERR(vt8500_rtc->rtc)) {
283 ret = PTR_ERR(vt8500_rtc->rtc);
284 dev_err(&pdev->dev,
285 "Failed to register RTC device -> %d\n", ret);
286 goto err_unmap;
287 }
288
289 ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
290 "rtc 1Hz", vt8500_rtc);
291 if (ret < 0) {
292 dev_err(&pdev->dev, "can't get irq %i, err %d\n",
293 vt8500_rtc->irq_hz, ret);
294 goto err_unreg;
295 }
296
297 ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
298 "rtc alarm", vt8500_rtc);
299 if (ret < 0) {
300 dev_err(&pdev->dev, "can't get irq %i, err %d\n",
301 vt8500_rtc->irq_alarm, ret);
302 goto err_free_hz;
303 }
304
305 return 0;
306
307err_free_hz:
308 free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
309err_unreg:
310 rtc_device_unregister(vt8500_rtc->rtc);
311err_unmap:
312 iounmap(vt8500_rtc->regbase);
313err_release:
314 release_mem_region(vt8500_rtc->res->start,
315 resource_size(vt8500_rtc->res));
316err_free:
317 kfree(vt8500_rtc);
318 return ret;
319}
320
321static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
322{
323 struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
324
325 free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
326 free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
327
328 rtc_device_unregister(vt8500_rtc->rtc);
329
330 /* Disable alarm matching */
331 writel(0, vt8500_rtc->regbase + VT8500_RTC_IS);
332 iounmap(vt8500_rtc->regbase);
333 release_mem_region(vt8500_rtc->res->start,
334 resource_size(vt8500_rtc->res));
335
336 kfree(vt8500_rtc);
337 platform_set_drvdata(pdev, NULL);
338
339 return 0;
340}
341
342static struct platform_driver vt8500_rtc_driver = {
343 .probe = vt8500_rtc_probe,
344 .remove = __devexit_p(vt8500_rtc_remove),
345 .driver = {
346 .name = "vt8500-rtc",
347 .owner = THIS_MODULE,
348 },
349};
350
351static int __init vt8500_rtc_init(void)
352{
353 return platform_driver_register(&vt8500_rtc_driver);
354}
355module_init(vt8500_rtc_init);
356
357static void __exit vt8500_rtc_exit(void)
358{
359 platform_driver_unregister(&vt8500_rtc_driver);
360}
361module_exit(vt8500_rtc_exit);
362
363MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
364MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)");
365MODULE_LICENSE("GPL");
366MODULE_ALIAS("platform:vt8500-rtc");
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 85dddb1e4126..46784b83c5c4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,7 @@
24#include <asm/debug.h> 24#include <asm/debug.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/s390_ext.h> 27#include <asm/irq.h>
28#include <asm/vtoc.h> 28#include <asm/vtoc.h>
29#include <asm/diag.h> 29#include <asm/diag.h>
30 30
@@ -642,7 +642,7 @@ dasd_diag_init(void)
642 } 642 }
643 ASCEBC(dasd_diag_discipline.ebcname, 4); 643 ASCEBC(dasd_diag_discipline.ebcname, 4);
644 644
645 ctl_set_bit(0, 9); 645 service_subclass_irq_register();
646 register_external_interrupt(0x2603, dasd_ext_handler); 646 register_external_interrupt(0x2603, dasd_ext_handler);
647 dasd_diag_discipline_pointer = &dasd_diag_discipline; 647 dasd_diag_discipline_pointer = &dasd_diag_discipline;
648 return 0; 648 return 0;
@@ -652,7 +652,7 @@ static void __exit
652dasd_diag_cleanup(void) 652dasd_diag_cleanup(void)
653{ 653{
654 unregister_external_interrupt(0x2603, dasd_ext_handler); 654 unregister_external_interrupt(0x2603, dasd_ext_handler);
655 ctl_clear_bit(0, 9); 655 service_subclass_irq_unregister();
656 dasd_diag_discipline_pointer = NULL; 656 dasd_diag_discipline_pointer = NULL;
657} 657}
658 658
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index b76c61f82485..eaa7e78186f9 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -19,7 +19,6 @@
19#include <linux/suspend.h> 19#include <linux/suspend.h>
20#include <linux/completion.h> 20#include <linux/completion.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <asm/s390_ext.h>
23#include <asm/types.h> 22#include <asm/types.h>
24#include <asm/irq.h> 23#include <asm/irq.h>
25 24
@@ -885,12 +884,12 @@ sclp_check_interface(void)
885 spin_unlock_irqrestore(&sclp_lock, flags); 884 spin_unlock_irqrestore(&sclp_lock, flags);
886 /* Enable service-signal interruption - needs to happen 885 /* Enable service-signal interruption - needs to happen
887 * with IRQs enabled. */ 886 * with IRQs enabled. */
888 ctl_set_bit(0, 9); 887 service_subclass_irq_register();
889 /* Wait for signal from interrupt or timeout */ 888 /* Wait for signal from interrupt or timeout */
890 sclp_sync_wait(); 889 sclp_sync_wait();
891 /* Disable service-signal interruption - needs to happen 890 /* Disable service-signal interruption - needs to happen
892 * with IRQs enabled. */ 891 * with IRQs enabled. */
893 ctl_clear_bit(0,9); 892 service_subclass_irq_unregister();
894 spin_lock_irqsave(&sclp_lock, flags); 893 spin_lock_irqsave(&sclp_lock, flags);
895 del_timer(&sclp_request_timer); 894 del_timer(&sclp_request_timer);
896 if (sclp_init_req.status == SCLP_REQ_DONE && 895 if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1070,7 +1069,7 @@ sclp_init(void)
1070 spin_unlock_irqrestore(&sclp_lock, flags); 1069 spin_unlock_irqrestore(&sclp_lock, flags);
1071 /* Enable service-signal external interruption - needs to happen with 1070 /* Enable service-signal external interruption - needs to happen with
1072 * IRQs enabled. */ 1071 * IRQs enabled. */
1073 ctl_set_bit(0, 9); 1072 service_subclass_irq_register();
1074 sclp_init_mask(1); 1073 sclp_init_mask(1);
1075 return 0; 1074 return 0;
1076 1075
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 607998f0b7d8..aec60d55b10d 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -25,7 +25,6 @@
25#include <asm/kvm_para.h> 25#include <asm/kvm_para.h>
26#include <asm/kvm_virtio.h> 26#include <asm/kvm_virtio.h>
27#include <asm/setup.h> 27#include <asm/setup.h>
28#include <asm/s390_ext.h>
29#include <asm/irq.h> 28#include <asm/irq.h>
30 29
31#define VIRTIO_SUBCODE_64 0x0D00 30#define VIRTIO_SUBCODE_64 0x0D00
@@ -441,7 +440,7 @@ static int __init kvm_devices_init(void)
441 440
442 INIT_WORK(&hotplug_work, hotplug_devices); 441 INIT_WORK(&hotplug_work, hotplug_devices);
443 442
444 ctl_set_bit(0, 9); 443 service_subclass_irq_register();
445 register_external_interrupt(0x2603, kvm_extint_handler); 444 register_external_interrupt(0x2603, kvm_extint_handler);
446 445
447 scan_devices(); 446 scan_devices();
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4ff26521d75f..3382475dc22d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -59,7 +59,6 @@
59#ifndef AAC_DRIVER_BRANCH 59#ifndef AAC_DRIVER_BRANCH
60#define AAC_DRIVER_BRANCH "" 60#define AAC_DRIVER_BRANCH ""
61#endif 61#endif
62#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
63#define AAC_DRIVERNAME "aacraid" 62#define AAC_DRIVERNAME "aacraid"
64 63
65#ifdef AAC_DRIVER_BUILD 64#ifdef AAC_DRIVER_BUILD
@@ -67,7 +66,7 @@
67#define str(x) _str(x) 66#define str(x) _str(x)
68#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH 67#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
69#else 68#else
70#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH " " AAC_DRIVER_BUILD_DATE 69#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
71#endif 70#endif
72 71
73MODULE_AUTHOR("Red Hat Inc and Adaptec"); 72MODULE_AUTHOR("Red Hat Inc and Adaptec");
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 92109b126391..112f1bec7756 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2227,7 +2227,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
2227 bp = buf; 2227 bp = buf;
2228 *bp = '\0'; 2228 *bp = '\0';
2229 if (hd->proc & PR_VERSION) { 2229 if (hd->proc & PR_VERSION) {
2230 sprintf(tbuf, "\nVersion %s - %s. Compiled %s %s", IN2000_VERSION, IN2000_DATE, __DATE__, __TIME__); 2230 sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
2231 strcat(bp, tbuf); 2231 strcat(bp, tbuf);
2232 } 2232 }
2233 if (hd->proc & PR_INFO) { 2233 if (hd->proc & PR_INFO) {
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 7f636b118287..fca6a8953070 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4252,8 +4252,8 @@ static ssize_t pmcraid_show_drv_version(
4252 char *buf 4252 char *buf
4253) 4253)
4254{ 4254{
4255 return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n", 4255 return snprintf(buf, PAGE_SIZE, "version: %s\n",
4256 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE); 4256 PMCRAID_DRIVER_VERSION);
4257} 4257}
4258 4258
4259static struct device_attribute pmcraid_driver_version_attr = { 4259static struct device_attribute pmcraid_driver_version_attr = {
@@ -6096,9 +6096,8 @@ static int __init pmcraid_init(void)
6096 dev_t dev; 6096 dev_t dev;
6097 int error; 6097 int error;
6098 6098
6099 pmcraid_info("%s Device Driver version: %s %s\n", 6099 pmcraid_info("%s Device Driver version: %s\n",
6100 PMCRAID_DRIVER_NAME, 6100 PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
6101 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
6102 6101
6103 error = alloc_chrdev_region(&dev, 0, 6102 error = alloc_chrdev_region(&dev, 0,
6104 PMCRAID_MAX_ADAPTERS, 6103 PMCRAID_MAX_ADAPTERS,
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 34e4c915002e..f920baf3ff24 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -43,7 +43,6 @@
43#define PMCRAID_DRIVER_NAME "PMC MaxRAID" 43#define PMCRAID_DRIVER_NAME "PMC MaxRAID"
44#define PMCRAID_DEVFILE "pmcsas" 44#define PMCRAID_DEVFILE "pmcsas"
45#define PMCRAID_DRIVER_VERSION "1.0.3" 45#define PMCRAID_DRIVER_VERSION "1.0.3"
46#define PMCRAID_DRIVER_DATE __DATE__
47 46
48#define PMCRAID_FW_VERSION_1 0x002 47#define PMCRAID_FW_VERSION_1 0x002
49 48
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 35381cb0936e..03e522b2fe0b 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -655,6 +655,27 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
655 return 0; 655 return 0;
656} 656}
657 657
658#ifndef readq
659static inline __u64 readq(const volatile void __iomem *addr)
660{
661 const volatile u32 __iomem *p = addr;
662 u32 low, high;
663
664 low = readl(p);
665 high = readl(p + 1);
666
667 return low + ((u64)high << 32);
668}
669#endif
670
671#ifndef writeq
672static inline void writeq(__u64 val, volatile void __iomem *addr)
673{
674 writel(val, addr);
675 writel(val >> 32, addr+4);
676}
677#endif
678
658static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha, 679static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
659 u64 off, void *data, int size) 680 u64 off, void *data, int size)
660{ 681{
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 95019c747cc1..4778e2707168 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -636,7 +636,7 @@ static int sr_probe(struct device *dev)
636 disk->first_minor = minor; 636 disk->first_minor = minor;
637 sprintf(disk->disk_name, "sr%d", minor); 637 sprintf(disk->disk_name, "sr%d", minor);
638 disk->fops = &sr_bdops; 638 disk->fops = &sr_bdops;
639 disk->flags = GENHD_FL_CD; 639 disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
640 disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST; 640 disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
641 641
642 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); 642 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 97ae716134d0..c0ee4ea28a19 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -2051,8 +2051,7 @@ wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs,
2051 for (i = 0; i < MAX_SETUP_ARGS; i++) 2051 for (i = 0; i < MAX_SETUP_ARGS; i++)
2052 printk("%s,", setup_args[i]); 2052 printk("%s,", setup_args[i]);
2053 printk("\n"); 2053 printk("\n");
2054 printk(" Version %s - %s, Compiled %s at %s\n", 2054 printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE);
2055 WD33C93_VERSION, WD33C93_DATE, __DATE__, __TIME__);
2056} 2055}
2057 2056
2058int 2057int
@@ -2132,8 +2131,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
2132 bp = buf; 2131 bp = buf;
2133 *bp = '\0'; 2132 *bp = '\0';
2134 if (hd->proc & PR_VERSION) { 2133 if (hd->proc & PR_VERSION) {
2135 sprintf(tbuf, "\nVersion %s - %s. Compiled %s %s", 2134 sprintf(tbuf, "\nVersion %s - %s.",
2136 WD33C93_VERSION, WD33C93_DATE, __DATE__, __TIME__); 2135 WD33C93_VERSION, WD33C93_DATE);
2137 strcat(bp, tbuf); 2136 strcat(bp, tbuf);
2138 } 2137 }
2139 if (hd->proc & PR_INFO) { 2138 if (hd->proc & PR_INFO) {
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index fc14b8dea0d7..fbd96b29530d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -271,8 +271,8 @@ config SPI_ORION
271 This enables using the SPI master controller on the Orion chips. 271 This enables using the SPI master controller on the Orion chips.
272 272
273config SPI_PL022 273config SPI_PL022
274 tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)" 274 tristate "ARM AMBA PL022 SSP controller"
275 depends on ARM_AMBA && EXPERIMENTAL 275 depends on ARM_AMBA
276 default y if MACH_U300 276 default y if MACH_U300
277 default y if ARCH_REALVIEW 277 default y if ARCH_REALVIEW
278 default y if INTEGRATOR_IMPD1 278 default y if INTEGRATOR_IMPD1
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 08de58e7f59f..6a9e58dd36c7 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -24,11 +24,6 @@
24 * GNU General Public License for more details. 24 * GNU General Public License for more details.
25 */ 25 */
26 26
27/*
28 * TODO:
29 * - add timeout on polled transfers
30 */
31
32#include <linux/init.h> 27#include <linux/init.h>
33#include <linux/module.h> 28#include <linux/module.h>
34#include <linux/device.h> 29#include <linux/device.h>
@@ -287,6 +282,8 @@
287 282
288#define CLEAR_ALL_INTERRUPTS 0x3 283#define CLEAR_ALL_INTERRUPTS 0x3
289 284
285#define SPI_POLLING_TIMEOUT 1000
286
290 287
291/* 288/*
292 * The type of reading going on on this chip 289 * The type of reading going on on this chip
@@ -1063,7 +1060,7 @@ static int __init pl022_dma_probe(struct pl022 *pl022)
1063 pl022->master_info->dma_filter, 1060 pl022->master_info->dma_filter,
1064 pl022->master_info->dma_rx_param); 1061 pl022->master_info->dma_rx_param);
1065 if (!pl022->dma_rx_channel) { 1062 if (!pl022->dma_rx_channel) {
1066 dev_err(&pl022->adev->dev, "no RX DMA channel!\n"); 1063 dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
1067 goto err_no_rxchan; 1064 goto err_no_rxchan;
1068 } 1065 }
1069 1066
@@ -1071,13 +1068,13 @@ static int __init pl022_dma_probe(struct pl022 *pl022)
1071 pl022->master_info->dma_filter, 1068 pl022->master_info->dma_filter,
1072 pl022->master_info->dma_tx_param); 1069 pl022->master_info->dma_tx_param);
1073 if (!pl022->dma_tx_channel) { 1070 if (!pl022->dma_tx_channel) {
1074 dev_err(&pl022->adev->dev, "no TX DMA channel!\n"); 1071 dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
1075 goto err_no_txchan; 1072 goto err_no_txchan;
1076 } 1073 }
1077 1074
1078 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1075 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1079 if (!pl022->dummypage) { 1076 if (!pl022->dummypage) {
1080 dev_err(&pl022->adev->dev, "no DMA dummypage!\n"); 1077 dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n");
1081 goto err_no_dummypage; 1078 goto err_no_dummypage;
1082 } 1079 }
1083 1080
@@ -1093,6 +1090,8 @@ err_no_txchan:
1093 dma_release_channel(pl022->dma_rx_channel); 1090 dma_release_channel(pl022->dma_rx_channel);
1094 pl022->dma_rx_channel = NULL; 1091 pl022->dma_rx_channel = NULL;
1095err_no_rxchan: 1092err_no_rxchan:
1093 dev_err(&pl022->adev->dev,
1094 "Failed to work in dma mode, work without dma!\n");
1096 return -ENODEV; 1095 return -ENODEV;
1097} 1096}
1098 1097
@@ -1378,6 +1377,7 @@ static void do_polling_transfer(struct pl022 *pl022)
1378 struct spi_transfer *transfer = NULL; 1377 struct spi_transfer *transfer = NULL;
1379 struct spi_transfer *previous = NULL; 1378 struct spi_transfer *previous = NULL;
1380 struct chip_data *chip; 1379 struct chip_data *chip;
1380 unsigned long time, timeout;
1381 1381
1382 chip = pl022->cur_chip; 1382 chip = pl022->cur_chip;
1383 message = pl022->cur_msg; 1383 message = pl022->cur_msg;
@@ -1415,9 +1415,19 @@ static void do_polling_transfer(struct pl022 *pl022)
1415 SSP_CR1(pl022->virtbase)); 1415 SSP_CR1(pl022->virtbase));
1416 1416
1417 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); 1417 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
1418 /* FIXME: insert a timeout so we don't hang here indefinitely */ 1418
1419 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) 1419 timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
1420 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
1421 time = jiffies;
1420 readwriter(pl022); 1422 readwriter(pl022);
1423 if (time_after(time, timeout)) {
1424 dev_warn(&pl022->adev->dev,
1425 "%s: timeout!\n", __func__);
1426 message->state = STATE_ERROR;
1427 goto out;
1428 }
1429 cpu_relax();
1430 }
1421 1431
1422 /* Update total byte transferred */ 1432 /* Update total byte transferred */
1423 message->actual_length += pl022->cur_transfer->len; 1433 message->actual_length += pl022->cur_transfer->len;
@@ -1426,7 +1436,7 @@ static void do_polling_transfer(struct pl022 *pl022)
1426 /* Move to next transfer */ 1436 /* Move to next transfer */
1427 message->state = next_transfer(pl022); 1437 message->state = next_transfer(pl022);
1428 } 1438 }
1429 1439out:
1430 /* Handle end of message */ 1440 /* Handle end of message */
1431 if (message->state == STATE_DONE) 1441 if (message->state == STATE_DONE)
1432 message->status = 0; 1442 message->status = 0;
@@ -2107,7 +2117,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2107 if (platform_info->enable_dma) { 2117 if (platform_info->enable_dma) {
2108 status = pl022_dma_probe(pl022); 2118 status = pl022_dma_probe(pl022);
2109 if (status != 0) 2119 if (status != 0)
2110 goto err_no_dma; 2120 platform_info->enable_dma = 0;
2111 } 2121 }
2112 2122
2113 /* Initialize and start queue */ 2123 /* Initialize and start queue */
@@ -2143,7 +2153,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2143 err_init_queue: 2153 err_init_queue:
2144 destroy_queue(pl022); 2154 destroy_queue(pl022);
2145 pl022_dma_remove(pl022); 2155 pl022_dma_remove(pl022);
2146 err_no_dma:
2147 free_irq(adev->irq[0], pl022); 2156 free_irq(adev->irq[0], pl022);
2148 err_no_irq: 2157 err_no_irq:
2149 clk_put(pl022->clk); 2158 clk_put(pl022->clk);
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index 871e337c917f..919fa9d9e16b 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -58,8 +58,6 @@ struct chip_data {
58 u8 bits_per_word; 58 u8 bits_per_word;
59 u16 clk_div; /* baud rate divider */ 59 u16 clk_div; /* baud rate divider */
60 u32 speed_hz; /* baud rate */ 60 u32 speed_hz; /* baud rate */
61 int (*write)(struct dw_spi *dws);
62 int (*read)(struct dw_spi *dws);
63 void (*cs_control)(u32 command); 61 void (*cs_control)(u32 command);
64}; 62};
65 63
@@ -162,107 +160,70 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
162} 160}
163#endif /* CONFIG_DEBUG_FS */ 161#endif /* CONFIG_DEBUG_FS */
164 162
165static void wait_till_not_busy(struct dw_spi *dws) 163/* Return the max entries we can fill into tx fifo */
164static inline u32 tx_max(struct dw_spi *dws)
166{ 165{
167 unsigned long end = jiffies + 1 + usecs_to_jiffies(5000); 166 u32 tx_left, tx_room, rxtx_gap;
168 167
169 while (time_before(jiffies, end)) { 168 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
170 if (!(dw_readw(dws, sr) & SR_BUSY)) 169 tx_room = dws->fifo_len - dw_readw(dws, txflr);
171 return;
172 cpu_relax();
173 }
174 dev_err(&dws->master->dev,
175 "DW SPI: Status keeps busy for 5000us after a read/write!\n");
176}
177
178static void flush(struct dw_spi *dws)
179{
180 while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) {
181 dw_readw(dws, dr);
182 cpu_relax();
183 }
184
185 wait_till_not_busy(dws);
186}
187
188static int null_writer(struct dw_spi *dws)
189{
190 u8 n_bytes = dws->n_bytes;
191 170
192 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) 171 /*
193 || (dws->tx == dws->tx_end)) 172 * Another concern is about the tx/rx mismatch, we
194 return 0; 173 * though to use (dws->fifo_len - rxflr - txflr) as
195 dw_writew(dws, dr, 0); 174 * one maximum value for tx, but it doesn't cover the
196 dws->tx += n_bytes; 175 * data which is out of tx/rx fifo and inside the
176 * shift registers. So a control from sw point of
177 * view is taken.
178 */
179 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
180 / dws->n_bytes;
197 181
198 wait_till_not_busy(dws); 182 return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
199 return 1;
200} 183}
201 184
202static int null_reader(struct dw_spi *dws) 185/* Return the max entries we should read out of rx fifo */
186static inline u32 rx_max(struct dw_spi *dws)
203{ 187{
204 u8 n_bytes = dws->n_bytes; 188 u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
205 189
206 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) 190 return min(rx_left, (u32)dw_readw(dws, rxflr));
207 && (dws->rx < dws->rx_end)) {
208 dw_readw(dws, dr);
209 dws->rx += n_bytes;
210 }
211 wait_till_not_busy(dws);
212 return dws->rx == dws->rx_end;
213} 191}
214 192
215static int u8_writer(struct dw_spi *dws) 193static void dw_writer(struct dw_spi *dws)
216{ 194{
217 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) 195 u32 max = tx_max(dws);
218 || (dws->tx == dws->tx_end)) 196 u16 txw = 0;
219 return 0;
220 197
221 dw_writew(dws, dr, *(u8 *)(dws->tx)); 198 while (max--) {
222 ++dws->tx; 199 /* Set the tx word if the transfer's original "tx" is not null */
223 200 if (dws->tx_end - dws->len) {
224 wait_till_not_busy(dws); 201 if (dws->n_bytes == 1)
225 return 1; 202 txw = *(u8 *)(dws->tx);
226} 203 else
227 204 txw = *(u16 *)(dws->tx);
228static int u8_reader(struct dw_spi *dws) 205 }
229{ 206 dw_writew(dws, dr, txw);
230 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) 207 dws->tx += dws->n_bytes;
231 && (dws->rx < dws->rx_end)) {
232 *(u8 *)(dws->rx) = dw_readw(dws, dr);
233 ++dws->rx;
234 } 208 }
235
236 wait_till_not_busy(dws);
237 return dws->rx == dws->rx_end;
238} 209}
239 210
240static int u16_writer(struct dw_spi *dws) 211static void dw_reader(struct dw_spi *dws)
241{ 212{
242 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) 213 u32 max = rx_max(dws);
243 || (dws->tx == dws->tx_end)) 214 u16 rxw;
244 return 0;
245 215
246 dw_writew(dws, dr, *(u16 *)(dws->tx)); 216 while (max--) {
247 dws->tx += 2; 217 rxw = dw_readw(dws, dr);
248 218 /* Care rx only if the transfer's original "rx" is not null */
249 wait_till_not_busy(dws); 219 if (dws->rx_end - dws->len) {
250 return 1; 220 if (dws->n_bytes == 1)
251} 221 *(u8 *)(dws->rx) = rxw;
252 222 else
253static int u16_reader(struct dw_spi *dws) 223 *(u16 *)(dws->rx) = rxw;
254{ 224 }
255 u16 temp; 225 dws->rx += dws->n_bytes;
256
257 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
258 && (dws->rx < dws->rx_end)) {
259 temp = dw_readw(dws, dr);
260 *(u16 *)(dws->rx) = temp;
261 dws->rx += 2;
262 } 226 }
263
264 wait_till_not_busy(dws);
265 return dws->rx == dws->rx_end;
266} 227}
267 228
268static void *next_transfer(struct dw_spi *dws) 229static void *next_transfer(struct dw_spi *dws)
@@ -334,8 +295,7 @@ static void giveback(struct dw_spi *dws)
334 295
335static void int_error_stop(struct dw_spi *dws, const char *msg) 296static void int_error_stop(struct dw_spi *dws, const char *msg)
336{ 297{
337 /* Stop and reset hw */ 298 /* Stop the hw */
338 flush(dws);
339 spi_enable_chip(dws, 0); 299 spi_enable_chip(dws, 0);
340 300
341 dev_err(&dws->master->dev, "%s\n", msg); 301 dev_err(&dws->master->dev, "%s\n", msg);
@@ -362,35 +322,28 @@ EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
362 322
363static irqreturn_t interrupt_transfer(struct dw_spi *dws) 323static irqreturn_t interrupt_transfer(struct dw_spi *dws)
364{ 324{
365 u16 irq_status, irq_mask = 0x3f; 325 u16 irq_status = dw_readw(dws, isr);
366 u32 int_level = dws->fifo_len / 2;
367 u32 left;
368 326
369 irq_status = dw_readw(dws, isr) & irq_mask;
370 /* Error handling */ 327 /* Error handling */
371 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { 328 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
372 dw_readw(dws, txoicr); 329 dw_readw(dws, txoicr);
373 dw_readw(dws, rxoicr); 330 dw_readw(dws, rxoicr);
374 dw_readw(dws, rxuicr); 331 dw_readw(dws, rxuicr);
375 int_error_stop(dws, "interrupt_transfer: fifo overrun"); 332 int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
376 return IRQ_HANDLED; 333 return IRQ_HANDLED;
377 } 334 }
378 335
336 dw_reader(dws);
337 if (dws->rx_end == dws->rx) {
338 spi_mask_intr(dws, SPI_INT_TXEI);
339 dw_spi_xfer_done(dws);
340 return IRQ_HANDLED;
341 }
379 if (irq_status & SPI_INT_TXEI) { 342 if (irq_status & SPI_INT_TXEI) {
380 spi_mask_intr(dws, SPI_INT_TXEI); 343 spi_mask_intr(dws, SPI_INT_TXEI);
381 344 dw_writer(dws);
382 left = (dws->tx_end - dws->tx) / dws->n_bytes; 345 /* Enable TX irq always, it will be disabled when RX finished */
383 left = (left > int_level) ? int_level : left; 346 spi_umask_intr(dws, SPI_INT_TXEI);
384
385 while (left--)
386 dws->write(dws);
387 dws->read(dws);
388
389 /* Re-enable the IRQ if there is still data left to tx */
390 if (dws->tx_end > dws->tx)
391 spi_umask_intr(dws, SPI_INT_TXEI);
392 else
393 dw_spi_xfer_done(dws);
394 } 347 }
395 348
396 return IRQ_HANDLED; 349 return IRQ_HANDLED;
@@ -399,15 +352,13 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
399static irqreturn_t dw_spi_irq(int irq, void *dev_id) 352static irqreturn_t dw_spi_irq(int irq, void *dev_id)
400{ 353{
401 struct dw_spi *dws = dev_id; 354 struct dw_spi *dws = dev_id;
402 u16 irq_status, irq_mask = 0x3f; 355 u16 irq_status = dw_readw(dws, isr) & 0x3f;
403 356
404 irq_status = dw_readw(dws, isr) & irq_mask;
405 if (!irq_status) 357 if (!irq_status)
406 return IRQ_NONE; 358 return IRQ_NONE;
407 359
408 if (!dws->cur_msg) { 360 if (!dws->cur_msg) {
409 spi_mask_intr(dws, SPI_INT_TXEI); 361 spi_mask_intr(dws, SPI_INT_TXEI);
410 /* Never fail */
411 return IRQ_HANDLED; 362 return IRQ_HANDLED;
412 } 363 }
413 364
@@ -417,13 +368,11 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id)
417/* Must be called inside pump_transfers() */ 368/* Must be called inside pump_transfers() */
418static void poll_transfer(struct dw_spi *dws) 369static void poll_transfer(struct dw_spi *dws)
419{ 370{
420 while (dws->write(dws)) 371 do {
421 dws->read(dws); 372 dw_writer(dws);
422 /* 373 dw_reader(dws);
423 * There is a possibility that the last word of a transaction 374 cpu_relax();
424 * will be lost if data is not ready. Re-read to solve this issue. 375 } while (dws->rx_end > dws->rx);
425 */
426 dws->read(dws);
427 376
428 dw_spi_xfer_done(dws); 377 dw_spi_xfer_done(dws);
429} 378}
@@ -483,8 +432,6 @@ static void pump_transfers(unsigned long data)
483 dws->tx_end = dws->tx + transfer->len; 432 dws->tx_end = dws->tx + transfer->len;
484 dws->rx = transfer->rx_buf; 433 dws->rx = transfer->rx_buf;
485 dws->rx_end = dws->rx + transfer->len; 434 dws->rx_end = dws->rx + transfer->len;
486 dws->write = dws->tx ? chip->write : null_writer;
487 dws->read = dws->rx ? chip->read : null_reader;
488 dws->cs_change = transfer->cs_change; 435 dws->cs_change = transfer->cs_change;
489 dws->len = dws->cur_transfer->len; 436 dws->len = dws->cur_transfer->len;
490 if (chip != dws->prev_chip) 437 if (chip != dws->prev_chip)
@@ -518,20 +465,8 @@ static void pump_transfers(unsigned long data)
518 465
519 switch (bits) { 466 switch (bits) {
520 case 8: 467 case 8:
521 dws->n_bytes = 1;
522 dws->dma_width = 1;
523 dws->read = (dws->read != null_reader) ?
524 u8_reader : null_reader;
525 dws->write = (dws->write != null_writer) ?
526 u8_writer : null_writer;
527 break;
528 case 16: 468 case 16:
529 dws->n_bytes = 2; 469 dws->n_bytes = dws->dma_width = bits >> 3;
530 dws->dma_width = 2;
531 dws->read = (dws->read != null_reader) ?
532 u16_reader : null_reader;
533 dws->write = (dws->write != null_writer) ?
534 u16_writer : null_writer;
535 break; 470 break;
536 default: 471 default:
537 printk(KERN_ERR "MRST SPI0: unsupported bits:" 472 printk(KERN_ERR "MRST SPI0: unsupported bits:"
@@ -575,7 +510,7 @@ static void pump_transfers(unsigned long data)
575 txint_level = dws->fifo_len / 2; 510 txint_level = dws->fifo_len / 2;
576 txint_level = (templen > txint_level) ? txint_level : templen; 511 txint_level = (templen > txint_level) ? txint_level : templen;
577 512
578 imask |= SPI_INT_TXEI; 513 imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
579 dws->transfer_handler = interrupt_transfer; 514 dws->transfer_handler = interrupt_transfer;
580 } 515 }
581 516
@@ -733,13 +668,9 @@ static int dw_spi_setup(struct spi_device *spi)
733 if (spi->bits_per_word <= 8) { 668 if (spi->bits_per_word <= 8) {
734 chip->n_bytes = 1; 669 chip->n_bytes = 1;
735 chip->dma_width = 1; 670 chip->dma_width = 1;
736 chip->read = u8_reader;
737 chip->write = u8_writer;
738 } else if (spi->bits_per_word <= 16) { 671 } else if (spi->bits_per_word <= 16) {
739 chip->n_bytes = 2; 672 chip->n_bytes = 2;
740 chip->dma_width = 2; 673 chip->dma_width = 2;
741 chip->read = u16_reader;
742 chip->write = u16_writer;
743 } else { 674 } else {
744 /* Never take >16b case for MRST SPIC */ 675 /* Never take >16b case for MRST SPIC */
745 dev_err(&spi->dev, "invalid wordsize\n"); 676 dev_err(&spi->dev, "invalid wordsize\n");
@@ -851,7 +782,6 @@ static void spi_hw_init(struct dw_spi *dws)
851 spi_enable_chip(dws, 0); 782 spi_enable_chip(dws, 0);
852 spi_mask_intr(dws, 0xff); 783 spi_mask_intr(dws, 0xff);
853 spi_enable_chip(dws, 1); 784 spi_enable_chip(dws, 1);
854 flush(dws);
855 785
856 /* 786 /*
857 * Try to detect the FIFO depth if not set by interface driver, 787 * Try to detect the FIFO depth if not set by interface driver,
diff --git a/drivers/spi/dw_spi.h b/drivers/spi/dw_spi.h
index b23e452adaf7..7a5e78d2a5cb 100644
--- a/drivers/spi/dw_spi.h
+++ b/drivers/spi/dw_spi.h
@@ -137,8 +137,6 @@ struct dw_spi {
137 u8 max_bits_per_word; /* maxim is 16b */ 137 u8 max_bits_per_word; /* maxim is 16b */
138 u32 dma_width; 138 u32 dma_width;
139 int cs_change; 139 int cs_change;
140 int (*write)(struct dw_spi *dws);
141 int (*read)(struct dw_spi *dws);
142 irqreturn_t (*transfer_handler)(struct dw_spi *dws); 140 irqreturn_t (*transfer_handler)(struct dw_spi *dws);
143 void (*cs_control)(u32 command); 141 void (*cs_control)(u32 command);
144 142
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 82b9a428c323..2e13a14bba3f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1047,8 +1047,8 @@ static u8 *buf;
1047 * spi_{async,sync}() calls with dma-safe buffers. 1047 * spi_{async,sync}() calls with dma-safe buffers.
1048 */ 1048 */
1049int spi_write_then_read(struct spi_device *spi, 1049int spi_write_then_read(struct spi_device *spi,
1050 const u8 *txbuf, unsigned n_tx, 1050 const void *txbuf, unsigned n_tx,
1051 u8 *rxbuf, unsigned n_rx) 1051 void *rxbuf, unsigned n_rx)
1052{ 1052{
1053 static DEFINE_MUTEX(lock); 1053 static DEFINE_MUTEX(lock);
1054 1054
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c
index d5be18b3078c..3cd15f690f16 100644
--- a/drivers/spi/spi_nuc900.c
+++ b/drivers/spi/spi_nuc900.c
@@ -463,7 +463,7 @@ static int __devexit nuc900_spi_remove(struct platform_device *dev)
463 463
464 platform_set_drvdata(dev, NULL); 464 platform_set_drvdata(dev, NULL);
465 465
466 spi_unregister_master(hw->master); 466 spi_bitbang_stop(&hw->bitbang);
467 467
468 clk_disable(hw->clk); 468 clk_disable(hw->clk);
469 clk_put(hw->clk); 469 clk_put(hw->clk);
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 151a95e40653..1a5fcabfd565 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -668,7 +668,7 @@ static int __exit s3c24xx_spi_remove(struct platform_device *dev)
668 668
669 platform_set_drvdata(dev, NULL); 669 platform_set_drvdata(dev, NULL);
670 670
671 spi_unregister_master(hw->master); 671 spi_bitbang_stop(&hw->bitbang);
672 672
673 clk_disable(hw->clk); 673 clk_disable(hw->clk);
674 clk_put(hw->clk); 674 clk_put(hw->clk);
diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi_sh.c
index 869a07d375d6..9eedd71ad898 100644
--- a/drivers/spi/spi_sh.c
+++ b/drivers/spi/spi_sh.c
@@ -427,10 +427,10 @@ static int __devexit spi_sh_remove(struct platform_device *pdev)
427{ 427{
428 struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev); 428 struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
429 429
430 spi_unregister_master(ss->master);
430 destroy_workqueue(ss->workqueue); 431 destroy_workqueue(ss->workqueue);
431 free_irq(ss->irq, ss); 432 free_irq(ss->irq, ss);
432 iounmap(ss->addr); 433 iounmap(ss->addr);
433 spi_master_put(ss->master);
434 434
435 return 0; 435 return 0;
436} 436}
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
index 891e5909038c..6c3aa6ecaade 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi_tegra.c
@@ -578,6 +578,7 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
578 master = dev_get_drvdata(&pdev->dev); 578 master = dev_get_drvdata(&pdev->dev);
579 tspi = spi_master_get_devdata(master); 579 tspi = spi_master_get_devdata(master);
580 580
581 spi_unregister_master(master);
581 tegra_dma_free_channel(tspi->rx_dma); 582 tegra_dma_free_channel(tspi->rx_dma);
582 583
583 dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, 584 dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
@@ -586,7 +587,6 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
586 clk_put(tspi->clk); 587 clk_put(tspi->clk);
587 iounmap(tspi->base); 588 iounmap(tspi->base);
588 589
589 spi_master_put(master);
590 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 590 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
591 release_mem_region(r->start, (r->end - r->start) + 1); 591 release_mem_region(r->start, (r->end - r->start) + 1);
592 592
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index c69c6f2c2c5c..4d2c75df886c 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -18,7 +18,6 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/mfd/core.h>
22#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
23#include <linux/spi/spi_bitbang.h> 22#include <linux/spi/spi_bitbang.h>
24#include <linux/spi/xilinx_spi.h> 23#include <linux/spi/xilinx_spi.h>
@@ -471,7 +470,7 @@ static int __devinit xilinx_spi_probe(struct platform_device *dev)
471 struct spi_master *master; 470 struct spi_master *master;
472 u8 i; 471 u8 i;
473 472
474 pdata = mfd_get_data(dev); 473 pdata = dev->dev.platform_data;
475 if (pdata) { 474 if (pdata) {
476 num_cs = pdata->num_chipselect; 475 num_cs = pdata->num_chipselect;
477 little_endian = pdata->little_endian; 476 little_endian = pdata->little_endian;
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index 31d7ba8299e7..77dfb4070c1d 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -587,7 +587,7 @@ ar6k_cfg80211_connect_event(struct ar6_softc *ar, u16 channel,
587 WLAN_STATUS_SUCCESS, GFP_KERNEL); 587 WLAN_STATUS_SUCCESS, GFP_KERNEL);
588 } else { 588 } else {
589 /* inform roam event to cfg80211 */ 589 /* inform roam event to cfg80211 */
590 cfg80211_roamed(ar->arNetDev, bssid, 590 cfg80211_roamed(ar->arNetDev, ibss_channel, bssid,
591 assocReqIe, assocReqLen, 591 assocReqIe, assocReqLen,
592 assocRespIe, assocRespLen, 592 assocRespIe, assocRespLen,
593 GFP_KERNEL); 593 GFP_KERNEL);
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index e3b409bb9847..1827b0bf9201 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -2869,7 +2869,7 @@ wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
2869 wl_update_prof(wl, NULL, &e->addr, WL_PROF_BSSID); 2869 wl_update_prof(wl, NULL, &e->addr, WL_PROF_BSSID);
2870 wl_update_bss_info(wl); 2870 wl_update_bss_info(wl);
2871 2871
2872 cfg80211_roamed(ndev, 2872 cfg80211_roamed(ndev, NULL,
2873 (u8 *)wl_read_prof(wl, WL_PROF_BSSID), 2873 (u8 *)wl_read_prof(wl, WL_PROF_BSSID),
2874 conn_info->req_ie, conn_info->req_ie_len, 2874 conn_info->req_ie, conn_info->req_ie_len,
2875 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); 2875 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
diff --git a/drivers/staging/generic_serial/rio/rioinit.c b/drivers/staging/generic_serial/rio/rioinit.c
index 24a282bb89d4..fb62b383f1de 100644
--- a/drivers/staging/generic_serial/rio/rioinit.c
+++ b/drivers/staging/generic_serial/rio/rioinit.c
@@ -381,7 +381,7 @@ struct rioVersion *RIOVersid(void)
381{ 381{
382 strlcpy(stVersion.version, "RIO driver for linux V1.0", 382 strlcpy(stVersion.version, "RIO driver for linux V1.0",
383 sizeof(stVersion.version)); 383 sizeof(stVersion.version));
384 strlcpy(stVersion.buildDate, __DATE__, 384 strlcpy(stVersion.buildDate, "Aug 15 2010",
385 sizeof(stVersion.buildDate)); 385 sizeof(stVersion.buildDate));
386 386
387 return &stVersion; 387 return &stVersion;
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 76378397b763..fb466f4c92e0 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -695,7 +695,7 @@ void prism2_disconnected(wlandevice_t *wlandev)
695 695
696void prism2_roamed(wlandevice_t *wlandev) 696void prism2_roamed(wlandevice_t *wlandev)
697{ 697{
698 cfg80211_roamed(wlandev->netdev, wlandev->bssid, 698 cfg80211_roamed(wlandev->netdev, NULL, wlandev->bssid,
699 NULL, 0, NULL, 0, GFP_KERNEL); 699 NULL, 0, NULL, 0, GFP_KERNEL);
700} 700}
701 701
diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache.c
index b8a2b30a1572..77ac2d4d3ef1 100644
--- a/drivers/staging/zcache/zcache.c
+++ b/drivers/staging/zcache/zcache.c
@@ -1181,9 +1181,12 @@ static bool zcache_freeze;
1181/* 1181/*
1182 * zcache shrinker interface (only useful for ephemeral pages, so zbud only) 1182 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1183 */ 1183 */
1184static int shrink_zcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1184static int shrink_zcache_memory(struct shrinker *shrink,
1185 struct shrink_control *sc)
1185{ 1186{
1186 int ret = -1; 1187 int ret = -1;
1188 int nr = sc->nr_to_scan;
1189 gfp_t gfp_mask = sc->gfp_mask;
1187 1190
1188 if (nr >= 0) { 1191 if (nr >= 0) {
1189 if (!(gfp_mask & __GFP_FS)) 1192 if (!(gfp_mask & __GFP_FS))
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index bfa05e801823..c0e8f2eeb886 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -4096,8 +4096,7 @@ static int __init cy_init(void)
4096 if (!cy_serial_driver) 4096 if (!cy_serial_driver)
4097 goto err; 4097 goto err;
4098 4098
4099 printk(KERN_INFO "Cyclades driver " CY_VERSION " (built %s %s)\n", 4099 printk(KERN_INFO "Cyclades driver " CY_VERSION "\n");
4100 __DATE__, __TIME__);
4101 4100
4102 /* Initialize the tty_driver structure */ 4101 /* Initialize the tty_driver structure */
4103 4102
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index b1aecc7bb32a..fd347ff34d07 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -61,8 +61,7 @@
61#include <linux/delay.h> 61#include <linux/delay.h>
62 62
63 63
64#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \ 64#define VERSION_STRING DRIVER_DESC " 2.1d"
65 __DATE__ " " __TIME__ ")"
66 65
67/* Macros definitions */ 66/* Macros definitions */
68 67
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index d5bfd41707e7..e0a77540b8ca 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -281,7 +281,7 @@ static void receive_chars(struct m68k_serial *info, unsigned short rx)
281#ifdef CONFIG_MAGIC_SYSRQ 281#ifdef CONFIG_MAGIC_SYSRQ
282 } else if (ch == 0x10) { /* ^P */ 282 } else if (ch == 0x10) { /* ^P */
283 show_state(); 283 show_state();
284 show_free_areas(); 284 show_free_areas(0);
285 show_buffers(); 285 show_buffers();
286/* show_net_buffers(); */ 286/* show_net_buffers(); */
287 return; 287 return;
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index bea5c215460c..84db7321cce8 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -907,9 +907,10 @@ static int m32r_sio_request_port(struct uart_port *port)
907 return ret; 907 return ret;
908} 908}
909 909
910static void m32r_sio_config_port(struct uart_port *port, int flags) 910static void m32r_sio_config_port(struct uart_port *port, int unused)
911{ 911{
912 struct uart_sio_port *up = (struct uart_sio_port *)port; 912 struct uart_sio_port *up = (struct uart_sio_port *)port;
913 unsigned long flags;
913 914
914 spin_lock_irqsave(&up->port.lock, flags); 915 spin_lock_irqsave(&up->port.lock, flags);
915 916
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index c63d0d152af6..f2cb7503fcb2 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -15,6 +15,7 @@
15 *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 15 *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 */ 16 */
17#include <linux/serial_reg.h> 17#include <linux/serial_reg.h>
18#include <linux/slab.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/pci.h> 20#include <linux/pci.h>
20#include <linux/serial_core.h> 21#include <linux/serial_core.h>
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index 3f2e07011a48..cfb5aa72b196 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -100,6 +100,7 @@ struct twl6030_usb {
100 u8 linkstat; 100 u8 linkstat;
101 u8 asleep; 101 u8 asleep;
102 bool irq_enabled; 102 bool irq_enabled;
103 unsigned long features;
103}; 104};
104 105
105#define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg) 106#define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg)
@@ -204,6 +205,12 @@ static int twl6030_start_srp(struct otg_transceiver *x)
204 205
205static int twl6030_usb_ldo_init(struct twl6030_usb *twl) 206static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
206{ 207{
208 char *regulator_name;
209
210 if (twl->features & TWL6025_SUBCLASS)
211 regulator_name = "ldousb";
212 else
213 regulator_name = "vusb";
207 214
208 /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */ 215 /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
209 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG); 216 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
@@ -214,7 +221,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
214 /* Program MISC2 register and set bit VUSB_IN_VBAT */ 221 /* Program MISC2 register and set bit VUSB_IN_VBAT */
215 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2); 222 twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
216 223
217 twl->usb3v3 = regulator_get(twl->dev, "vusb"); 224 twl->usb3v3 = regulator_get(twl->dev, regulator_name);
218 if (IS_ERR(twl->usb3v3)) 225 if (IS_ERR(twl->usb3v3))
219 return -ENODEV; 226 return -ENODEV;
220 227
@@ -409,6 +416,7 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
409 twl->dev = &pdev->dev; 416 twl->dev = &pdev->dev;
410 twl->irq1 = platform_get_irq(pdev, 0); 417 twl->irq1 = platform_get_irq(pdev, 0);
411 twl->irq2 = platform_get_irq(pdev, 1); 418 twl->irq2 = platform_get_irq(pdev, 1);
419 twl->features = pdata->features;
412 twl->otg.dev = twl->dev; 420 twl->otg.dev = twl->dev;
413 twl->otg.label = "twl6030"; 421 twl->otg.label = "twl6030";
414 twl->otg.set_host = twl6030_set_host; 422 twl->otg.set_host = twl6030_set_host;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index f9916ca5ca4d..549b960667c8 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1460,6 +1460,14 @@ config FB_S3
1460 ---help--- 1460 ---help---
1461 Driver for graphics boards with S3 Trio / S3 Virge chip. 1461 Driver for graphics boards with S3 Trio / S3 Virge chip.
1462 1462
1463config FB_S3_DDC
1464 bool "DDC for S3 support"
1465 depends on FB_S3
1466 select FB_DDC
1467 default y
1468 help
1469 Say Y here if you want DDC support for your S3 graphics card.
1470
1463config FB_SAVAGE 1471config FB_SAVAGE
1464 tristate "S3 Savage support" 1472 tristate "S3 Savage support"
1465 depends on FB && PCI && EXPERIMENTAL 1473 depends on FB && PCI && EXPERIMENTAL
@@ -1983,6 +1991,18 @@ config FB_SH_MOBILE_HDMI
1983 ---help--- 1991 ---help---
1984 Driver for the on-chip SH-Mobile HDMI controller. 1992 Driver for the on-chip SH-Mobile HDMI controller.
1985 1993
1994config FB_SH_MOBILE_MERAM
1995 tristate "SuperH Mobile MERAM read ahead support for LCDC"
1996 depends on FB_SH_MOBILE_LCDC
1997 default y
1998 ---help---
1999 Enable MERAM support for the SH-Mobile LCD controller.
2000
2001 This will allow for caching of the framebuffer to provide more
2002 reliable access under heavy main memory bus traffic situations.
2003 Up to 4 memory channels can be configured, allowing 4 RGB or
2004 2 YCbCr framebuffers to be configured.
2005
1986config FB_TMIO 2006config FB_TMIO
1987 tristate "Toshiba Mobile IO FrameBuffer support" 2007 tristate "Toshiba Mobile IO FrameBuffer support"
1988 depends on FB && MFD_CORE 2008 depends on FB && MFD_CORE
@@ -2246,29 +2266,43 @@ config FB_METRONOME
2246config FB_MB862XX 2266config FB_MB862XX
2247 tristate "Fujitsu MB862xx GDC support" 2267 tristate "Fujitsu MB862xx GDC support"
2248 depends on FB 2268 depends on FB
2269 depends on PCI || (OF && PPC)
2249 select FB_CFB_FILLRECT 2270 select FB_CFB_FILLRECT
2250 select FB_CFB_COPYAREA 2271 select FB_CFB_COPYAREA
2251 select FB_CFB_IMAGEBLIT 2272 select FB_CFB_IMAGEBLIT
2252 ---help--- 2273 ---help---
2253 Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers. 2274 Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
2254 2275
2276choice
2277 prompt "GDC variant"
2278 depends on FB_MB862XX
2279
2255config FB_MB862XX_PCI_GDC 2280config FB_MB862XX_PCI_GDC
2256 bool "Carmine/Coral-P(A) GDC" 2281 bool "Carmine/Coral-P(A) GDC"
2257 depends on PCI && FB_MB862XX 2282 depends on PCI
2258 ---help--- 2283 ---help---
2259 This enables framebuffer support for Fujitsu Carmine/Coral-P(A) 2284 This enables framebuffer support for Fujitsu Carmine/Coral-P(A)
2260 PCI graphics controller devices. 2285 PCI graphics controller devices.
2261 2286
2262config FB_MB862XX_LIME 2287config FB_MB862XX_LIME
2263 bool "Lime GDC" 2288 bool "Lime GDC"
2264 depends on FB_MB862XX 2289 depends on OF && PPC
2265 depends on OF && !FB_MB862XX_PCI_GDC
2266 depends on PPC
2267 select FB_FOREIGN_ENDIAN 2290 select FB_FOREIGN_ENDIAN
2268 select FB_LITTLE_ENDIAN 2291 select FB_LITTLE_ENDIAN
2269 ---help--- 2292 ---help---
2270 Framebuffer support for Fujitsu Lime GDC on host CPU bus. 2293 Framebuffer support for Fujitsu Lime GDC on host CPU bus.
2271 2294
2295endchoice
2296
2297config FB_MB862XX_I2C
2298 bool "Support I2C bus on MB862XX GDC"
2299 depends on FB_MB862XX && I2C
2300 default y
2301 help
2302 Selecting this option adds Coral-P(A)/Lime GDC I2C bus adapter
2303 driver to support accessing I2C devices on controller's I2C bus.
2304 These are usually some video decoder chips.
2305
2272config FB_EP93XX 2306config FB_EP93XX
2273 tristate "EP93XX frame buffer support" 2307 tristate "EP93XX frame buffer support"
2274 depends on FB && ARCH_EP93XX 2308 depends on FB && ARCH_EP93XX
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 2ea44b6625fe..8b83129e209c 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_FB_UDL) += udlfb.o
130obj-$(CONFIG_FB_XILINX) += xilinxfb.o 130obj-$(CONFIG_FB_XILINX) += xilinxfb.o
131obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o 131obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o
132obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o 132obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o
133obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
133obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o 134obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
134obj-$(CONFIG_FB_OMAP) += omap/ 135obj-$(CONFIG_FB_OMAP) += omap/
135obj-y += omap2/ 136obj-y += omap2/
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index e5d6b56d4447..5ea6596dd824 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -2224,22 +2224,23 @@ static int amifb_ioctl(struct fb_info *info,
2224 * Allocate, Clear and Align a Block of Chip Memory 2224 * Allocate, Clear and Align a Block of Chip Memory
2225 */ 2225 */
2226 2226
2227static u_long unaligned_chipptr = 0; 2227static void *aligned_chipptr;
2228 2228
2229static inline u_long __init chipalloc(u_long size) 2229static inline u_long __init chipalloc(u_long size)
2230{ 2230{
2231 size += PAGE_SIZE-1; 2231 aligned_chipptr = amiga_chip_alloc(size, "amifb [RAM]");
2232 if (!(unaligned_chipptr = (u_long)amiga_chip_alloc(size, 2232 if (!aligned_chipptr) {
2233 "amifb [RAM]"))) 2233 pr_err("amifb: No Chip RAM for frame buffer");
2234 panic("No Chip RAM for frame buffer"); 2234 return 0;
2235 memset((void *)unaligned_chipptr, 0, size); 2235 }
2236 return PAGE_ALIGN(unaligned_chipptr); 2236 memset(aligned_chipptr, 0, size);
2237 return (u_long)aligned_chipptr;
2237} 2238}
2238 2239
2239static inline void chipfree(void) 2240static inline void chipfree(void)
2240{ 2241{
2241 if (unaligned_chipptr) 2242 if (aligned_chipptr)
2242 amiga_chip_free((void *)unaligned_chipptr); 2243 amiga_chip_free(aligned_chipptr);
2243} 2244}
2244 2245
2245 2246
@@ -2295,7 +2296,7 @@ default_chipset:
2295 defmode = amiga_vblank == 50 ? DEFMODE_PAL 2296 defmode = amiga_vblank == 50 ? DEFMODE_PAL
2296 : DEFMODE_NTSC; 2297 : DEFMODE_NTSC;
2297 if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT > 2298 if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
2298 VIDEOMEMSIZE_ECS_1M) 2299 VIDEOMEMSIZE_ECS_2M)
2299 fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_2M; 2300 fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_2M;
2300 else 2301 else
2301 fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_1M; 2302 fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_1M;
@@ -2312,7 +2313,7 @@ default_chipset:
2312 maxfmode = TAG_FMODE_4; 2313 maxfmode = TAG_FMODE_4;
2313 defmode = DEFMODE_AGA; 2314 defmode = DEFMODE_AGA;
2314 if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT > 2315 if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
2315 VIDEOMEMSIZE_AGA_1M) 2316 VIDEOMEMSIZE_AGA_2M)
2316 fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_2M; 2317 fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_2M;
2317 else 2318 else
2318 fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_1M; 2319 fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_1M;
@@ -2385,6 +2386,10 @@ default_chipset:
2385 DUMMYSPRITEMEMSIZE+ 2386 DUMMYSPRITEMEMSIZE+
2386 COPINITSIZE+ 2387 COPINITSIZE+
2387 4*COPLISTSIZE); 2388 4*COPLISTSIZE);
2389 if (!chipptr) {
2390 err = -ENOMEM;
2391 goto amifb_error;
2392 }
2388 2393
2389 assignchunk(videomemory, u_long, chipptr, fb_info.fix.smem_len); 2394 assignchunk(videomemory, u_long, chipptr, fb_info.fix.smem_len);
2390 assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE); 2395 assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE);
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index c8b520e9a11a..c04b94da81f7 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -16,7 +16,6 @@
16#include <linux/fb.h> 16#include <linux/fb.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/backlight.h> 18#include <linux/backlight.h>
19#include <linux/mfd/core.h>
20#include <linux/mfd/88pm860x.h> 19#include <linux/mfd/88pm860x.h>
21 20
22#define MAX_BRIGHTNESS (0xFF) 21#define MAX_BRIGHTNESS (0xFF)
@@ -168,7 +167,6 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
168 struct pm860x_backlight_pdata *pdata = NULL; 167 struct pm860x_backlight_pdata *pdata = NULL;
169 struct pm860x_backlight_data *data; 168 struct pm860x_backlight_data *data;
170 struct backlight_device *bl; 169 struct backlight_device *bl;
171 struct mfd_cell *cell;
172 struct resource *res; 170 struct resource *res;
173 struct backlight_properties props; 171 struct backlight_properties props;
174 unsigned char value; 172 unsigned char value;
@@ -181,10 +179,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
181 return -EINVAL; 179 return -EINVAL;
182 } 180 }
183 181
184 cell = pdev->dev.platform_data; 182 pdata = pdev->dev.platform_data;
185 if (cell == NULL)
186 return -ENODEV;
187 pdata = cell->mfd_data;
188 if (pdata == NULL) { 183 if (pdata == NULL) {
189 dev_err(&pdev->dev, "platform data isn't assigned to " 184 dev_err(&pdev->dev, "platform data isn't assigned to "
190 "backlight\n"); 185 "backlight\n");
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index af3119707dbf..d1aee730d7d8 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -211,8 +211,12 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
211 const char *buf, size_t count) 211 const char *buf, size_t count)
212{ 212{
213 struct adp5520_bl *data = dev_get_drvdata(dev); 213 struct adp5520_bl *data = dev_get_drvdata(dev);
214 int ret;
215
216 ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
217 if (ret < 0)
218 return ret;
214 219
215 strict_strtoul(buf, 10, &data->cached_daylight_max);
216 return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX); 220 return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX);
217} 221}
218static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show, 222static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 8b7d47386f39..fcdac872522d 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -899,7 +899,7 @@ static struct fb_ops da8xx_fb_ops = {
899 .fb_blank = cfb_blank, 899 .fb_blank = cfb_blank,
900}; 900};
901 901
902static int __init fb_probe(struct platform_device *device) 902static int __devinit fb_probe(struct platform_device *device)
903{ 903{
904 struct da8xx_lcdc_platform_data *fb_pdata = 904 struct da8xx_lcdc_platform_data *fb_pdata =
905 device->dev.platform_data; 905 device->dev.platform_data;
@@ -1165,7 +1165,7 @@ static int fb_resume(struct platform_device *dev)
1165 1165
1166static struct platform_driver da8xx_fb_driver = { 1166static struct platform_driver da8xx_fb_driver = {
1167 .probe = fb_probe, 1167 .probe = fb_probe,
1168 .remove = fb_remove, 1168 .remove = __devexit_p(fb_remove),
1169 .suspend = fb_suspend, 1169 .suspend = fb_suspend,
1170 .resume = fb_resume, 1170 .resume = fb_resume,
1171 .driver = { 1171 .driver = {
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 4eb38db36e4b..fb205843c2c7 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -242,9 +242,9 @@ static int set_system(const struct dmi_system_id *id)
242 return 0; 242 return 0;
243 } 243 }
244 244
245 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " 245 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x "
246 "(%dx%d, stride %d)\n", id->ident, 246 "(%dx%d, stride %d)\n", id->ident,
247 (void *)screen_info.lfb_base, screen_info.lfb_width, 247 screen_info.lfb_base, screen_info.lfb_width,
248 screen_info.lfb_height, screen_info.lfb_linelength); 248 screen_info.lfb_height, screen_info.lfb_linelength);
249 249
250 250
diff --git a/drivers/video/mb862xx/Makefile b/drivers/video/mb862xx/Makefile
index d7777714166b..5707ed0e31a7 100644
--- a/drivers/video/mb862xx/Makefile
+++ b/drivers/video/mb862xx/Makefile
@@ -2,4 +2,7 @@
2# Makefile for the MB862xx framebuffer driver 2# Makefile for the MB862xx framebuffer driver
3# 3#
4 4
5obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o mb862xxfb_accel.o 5obj-$(CONFIG_FB_MB862XX) += mb862xxfb.o
6
7mb862xxfb-y := mb862xxfbdrv.o mb862xxfb_accel.o
8mb862xxfb-$(CONFIG_FB_MB862XX_I2C) += mb862xx-i2c.o
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c
new file mode 100644
index 000000000000..b953099edd8e
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xx-i2c.c
@@ -0,0 +1,178 @@
1/*
2 * Coral-P(A)/Lime I2C adapter driver
3 *
4 * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/fb.h>
13#include <linux/i2c.h>
14#include <linux/io.h>
15#include <linux/delay.h>
16
17#include "mb862xxfb.h"
18#include "mb862xx_reg.h"
19
20static int mb862xx_i2c_wait_event(struct i2c_adapter *adap)
21{
22 struct mb862xxfb_par *par = adap->algo_data;
23 u32 reg;
24
25 do {
26 udelay(1);
27 reg = inreg(i2c, GC_I2C_BCR);
28 if (reg & (I2C_INT | I2C_BER))
29 break;
30 } while (1);
31
32 return (reg & I2C_BER) ? 0 : 1;
33}
34
35static int mb862xx_i2c_do_address(struct i2c_adapter *adap, int addr)
36{
37 struct mb862xxfb_par *par = adap->algo_data;
38
39 outreg(i2c, GC_I2C_DAR, addr);
40 outreg(i2c, GC_I2C_CCR, I2C_CLOCK_AND_ENABLE);
41 outreg(i2c, GC_I2C_BCR, par->i2c_rs ? I2C_REPEATED_START : I2C_START);
42 if (!mb862xx_i2c_wait_event(adap))
43 return -EIO;
44 par->i2c_rs = !(inreg(i2c, GC_I2C_BSR) & I2C_LRB);
45 return par->i2c_rs;
46}
47
48static int mb862xx_i2c_write_byte(struct i2c_adapter *adap, u8 byte)
49{
50 struct mb862xxfb_par *par = adap->algo_data;
51
52 outreg(i2c, GC_I2C_DAR, byte);
53 outreg(i2c, GC_I2C_BCR, I2C_START);
54 if (!mb862xx_i2c_wait_event(adap))
55 return -EIO;
56 return !(inreg(i2c, GC_I2C_BSR) & I2C_LRB);
57}
58
59static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last)
60{
61 struct mb862xxfb_par *par = adap->algo_data;
62
63 outreg(i2c, GC_I2C_BCR, I2C_START | (last ? 0 : I2C_ACK));
64 if (!mb862xx_i2c_wait_event(adap))
65 return 0;
66 *byte = inreg(i2c, GC_I2C_DAR);
67 return 1;
68}
69
70void mb862xx_i2c_stop(struct i2c_adapter *adap)
71{
72 struct mb862xxfb_par *par = adap->algo_data;
73
74 outreg(i2c, GC_I2C_BCR, I2C_STOP);
75 outreg(i2c, GC_I2C_CCR, I2C_DISABLE);
76 par->i2c_rs = 0;
77}
78
79static int mb862xx_i2c_read(struct i2c_adapter *adap, struct i2c_msg *m)
80{
81 int i, ret = 0;
82 int last = m->len - 1;
83
84 for (i = 0; i < m->len; i++) {
85 if (!mb862xx_i2c_read_byte(adap, &m->buf[i], i == last)) {
86 ret = -EIO;
87 break;
88 }
89 }
90 return ret;
91}
92
93static int mb862xx_i2c_write(struct i2c_adapter *adap, struct i2c_msg *m)
94{
95 int i, ret = 0;
96
97 for (i = 0; i < m->len; i++) {
98 if (!mb862xx_i2c_write_byte(adap, m->buf[i])) {
99 ret = -EIO;
100 break;
101 }
102 }
103 return ret;
104}
105
106static int mb862xx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
107 int num)
108{
109 struct mb862xxfb_par *par = adap->algo_data;
110 struct i2c_msg *m;
111 int addr;
112 int i = 0, err = 0;
113
114 dev_dbg(par->dev, "%s: %d msgs\n", __func__, num);
115
116 for (i = 0; i < num; i++) {
117 m = &msgs[i];
118 if (!m->len) {
119 dev_dbg(par->dev, "%s: null msgs\n", __func__);
120 continue;
121 }
122 addr = m->addr;
123 if (m->flags & I2C_M_RD)
124 addr |= 1;
125
126 err = mb862xx_i2c_do_address(adap, addr);
127 if (err < 0)
128 break;
129 if (m->flags & I2C_M_RD)
130 err = mb862xx_i2c_read(adap, m);
131 else
132 err = mb862xx_i2c_write(adap, m);
133 }
134
135 if (i)
136 mb862xx_i2c_stop(adap);
137
138 return (err < 0) ? err : i;
139}
140
141static u32 mb862xx_func(struct i2c_adapter *adap)
142{
143 return I2C_FUNC_SMBUS_BYTE_DATA;
144}
145
146static const struct i2c_algorithm mb862xx_algo = {
147 .master_xfer = mb862xx_xfer,
148 .functionality = mb862xx_func,
149};
150
151static struct i2c_adapter mb862xx_i2c_adapter = {
152 .name = "MB862xx I2C adapter",
153 .algo = &mb862xx_algo,
154 .owner = THIS_MODULE,
155};
156
157int mb862xx_i2c_init(struct mb862xxfb_par *par)
158{
159 int ret;
160
161 mb862xx_i2c_adapter.algo_data = par;
162 par->adap = &mb862xx_i2c_adapter;
163
164 ret = i2c_add_adapter(par->adap);
165 if (ret < 0) {
166 dev_err(par->dev, "failed to add %s\n",
167 mb862xx_i2c_adapter.name);
168 }
169 return ret;
170}
171
172void mb862xx_i2c_exit(struct mb862xxfb_par *par)
173{
174 if (par->adap) {
175 i2c_del_adapter(par->adap);
176 par->adap = NULL;
177 }
178}
diff --git a/drivers/video/mb862xx/mb862xx_reg.h b/drivers/video/mb862xx/mb862xx_reg.h
index 2ba65e118500..9df48b8edc94 100644
--- a/drivers/video/mb862xx/mb862xx_reg.h
+++ b/drivers/video/mb862xx/mb862xx_reg.h
@@ -5,11 +5,8 @@
5#ifndef _MB862XX_REG_H 5#ifndef _MB862XX_REG_H
6#define _MB862XX_REG_H 6#define _MB862XX_REG_H
7 7
8#ifdef MB862XX_MMIO_BOTTOM
9#define MB862XX_MMIO_BASE 0x03fc0000
10#else
11#define MB862XX_MMIO_BASE 0x01fc0000 8#define MB862XX_MMIO_BASE 0x01fc0000
12#endif 9#define MB862XX_MMIO_HIGH_BASE 0x03fc0000
13#define MB862XX_I2C_BASE 0x0000c000 10#define MB862XX_I2C_BASE 0x0000c000
14#define MB862XX_DISP_BASE 0x00010000 11#define MB862XX_DISP_BASE 0x00010000
15#define MB862XX_CAP_BASE 0x00018000 12#define MB862XX_CAP_BASE 0x00018000
@@ -23,6 +20,7 @@
23#define GC_IMASK 0x00000024 20#define GC_IMASK 0x00000024
24#define GC_SRST 0x0000002c 21#define GC_SRST 0x0000002c
25#define GC_CCF 0x00000038 22#define GC_CCF 0x00000038
23#define GC_RSW 0x0000005c
26#define GC_CID 0x000000f0 24#define GC_CID 0x000000f0
27#define GC_REVISION 0x00000084 25#define GC_REVISION 0x00000084
28 26
@@ -53,10 +51,16 @@
53#define GC_L0OA0 0x00000024 51#define GC_L0OA0 0x00000024
54#define GC_L0DA0 0x00000028 52#define GC_L0DA0 0x00000028
55#define GC_L0DY_L0DX 0x0000002c 53#define GC_L0DY_L0DX 0x0000002c
54#define GC_L1M 0x00000030
55#define GC_L1DA 0x00000034
56#define GC_DCM1 0x00000100 56#define GC_DCM1 0x00000100
57#define GC_L0EM 0x00000110 57#define GC_L0EM 0x00000110
58#define GC_L0WY_L0WX 0x00000114 58#define GC_L0WY_L0WX 0x00000114
59#define GC_L0WH_L0WW 0x00000118 59#define GC_L0WH_L0WW 0x00000118
60#define GC_L1EM 0x00000120
61#define GC_L1WY_L1WX 0x00000124
62#define GC_L1WH_L1WW 0x00000128
63#define GC_DLS 0x00000180
60#define GC_DCM2 0x00000104 64#define GC_DCM2 0x00000104
61#define GC_DCM3 0x00000108 65#define GC_DCM3 0x00000108
62#define GC_CPM_CUTC 0x000000a0 66#define GC_CPM_CUTC 0x000000a0
@@ -68,6 +72,11 @@
68 72
69#define GC_CPM_CEN0 0x00100000 73#define GC_CPM_CEN0 0x00100000
70#define GC_CPM_CEN1 0x00200000 74#define GC_CPM_CEN1 0x00200000
75#define GC_DCM1_DEN 0x80000000
76#define GC_DCM1_L1E 0x00020000
77#define GC_L1M_16 0x80000000
78#define GC_L1M_YC 0x40000000
79#define GC_L1M_CS 0x20000000
71 80
72#define GC_DCM01_ESY 0x00000004 81#define GC_DCM01_ESY 0x00000004
73#define GC_DCM01_SC 0x00003f00 82#define GC_DCM01_SC 0x00003f00
@@ -79,9 +88,50 @@
79#define GC_L0M_L0C_16 0x80000000 88#define GC_L0M_L0C_16 0x80000000
80#define GC_L0EM_L0EC_24 0x40000000 89#define GC_L0EM_L0EC_24 0x40000000
81#define GC_L0M_L0W_UNIT 64 90#define GC_L0M_L0W_UNIT 64
91#define GC_L1EM_DM 0x02000000
82 92
83#define GC_DISP_REFCLK_400 400 93#define GC_DISP_REFCLK_400 400
84 94
95/* I2C */
96#define GC_I2C_BSR 0x00000000 /* BSR */
97#define GC_I2C_BCR 0x00000004 /* BCR */
98#define GC_I2C_CCR 0x00000008 /* CCR */
99#define GC_I2C_ADR 0x0000000C /* ADR */
100#define GC_I2C_DAR 0x00000010 /* DAR */
101
102#define I2C_DISABLE 0x00000000
103#define I2C_STOP 0x00000000
104#define I2C_START 0x00000010
105#define I2C_REPEATED_START 0x00000030
106#define I2C_CLOCK_AND_ENABLE 0x0000003f
107#define I2C_READY 0x01
108#define I2C_INT 0x01
109#define I2C_INTE 0x02
110#define I2C_ACK 0x08
111#define I2C_BER 0x80
112#define I2C_BEIE 0x40
113#define I2C_TRX 0x80
114#define I2C_LRB 0x10
115
116/* Capture registers and bits */
117#define GC_CAP_VCM 0x00000000
118#define GC_CAP_CSC 0x00000004
119#define GC_CAP_VCS 0x00000008
120#define GC_CAP_CBM 0x00000010
121#define GC_CAP_CBOA 0x00000014
122#define GC_CAP_CBLA 0x00000018
123#define GC_CAP_IMG_START 0x0000001C
124#define GC_CAP_IMG_END 0x00000020
125#define GC_CAP_CMSS 0x00000048
126#define GC_CAP_CMDS 0x0000004C
127
128#define GC_VCM_VIE 0x80000000
129#define GC_VCM_CM 0x03000000
130#define GC_VCM_VS_PAL 0x00000002
131#define GC_CBM_OO 0x80000000
132#define GC_CBM_HRV 0x00000010
133#define GC_CBM_CBST 0x00000001
134
85/* Carmine specific */ 135/* Carmine specific */
86#define MB86297_DRAW_BASE 0x00020000 136#define MB86297_DRAW_BASE 0x00020000
87#define MB86297_DISP0_BASE 0x00100000 137#define MB86297_DISP0_BASE 0x00100000
diff --git a/drivers/video/mb862xx/mb862xxfb.h b/drivers/video/mb862xx/mb862xxfb.h
index d7e7cb76bbf2..8550630c1e01 100644
--- a/drivers/video/mb862xx/mb862xxfb.h
+++ b/drivers/video/mb862xx/mb862xxfb.h
@@ -1,6 +1,26 @@
1#ifndef __MB862XX_H__ 1#ifndef __MB862XX_H__
2#define __MB862XX_H__ 2#define __MB862XX_H__
3 3
4struct mb862xx_l1_cfg {
5 unsigned short sx;
6 unsigned short sy;
7 unsigned short sw;
8 unsigned short sh;
9 unsigned short dx;
10 unsigned short dy;
11 unsigned short dw;
12 unsigned short dh;
13 int mirror;
14};
15
16#define MB862XX_BASE 'M'
17#define MB862XX_L1_GET_CFG _IOR(MB862XX_BASE, 0, struct mb862xx_l1_cfg*)
18#define MB862XX_L1_SET_CFG _IOW(MB862XX_BASE, 1, struct mb862xx_l1_cfg*)
19#define MB862XX_L1_ENABLE _IOW(MB862XX_BASE, 2, int)
20#define MB862XX_L1_CAP_CTL _IOW(MB862XX_BASE, 3, int)
21
22#ifdef __KERNEL__
23
4#define PCI_VENDOR_ID_FUJITSU_LIMITED 0x10cf 24#define PCI_VENDOR_ID_FUJITSU_LIMITED 0x10cf
5#define PCI_DEVICE_ID_FUJITSU_CORALP 0x2019 25#define PCI_DEVICE_ID_FUJITSU_CORALP 0x2019
6#define PCI_DEVICE_ID_FUJITSU_CORALPA 0x201e 26#define PCI_DEVICE_ID_FUJITSU_CORALPA 0x201e
@@ -38,6 +58,8 @@ struct mb862xxfb_par {
38 void __iomem *mmio_base; /* remapped registers */ 58 void __iomem *mmio_base; /* remapped registers */
39 size_t mapped_vram; /* length of remapped vram */ 59 size_t mapped_vram; /* length of remapped vram */
40 size_t mmio_len; /* length of register region */ 60 size_t mmio_len; /* length of register region */
61 unsigned long cap_buf; /* capture buffers offset */
62 size_t cap_len; /* length of capture buffers */
41 63
42 void __iomem *host; /* relocatable reg. bases */ 64 void __iomem *host; /* relocatable reg. bases */
43 void __iomem *i2c; 65 void __iomem *i2c;
@@ -57,11 +79,23 @@ struct mb862xxfb_par {
57 unsigned int refclk; /* disp. reference clock */ 79 unsigned int refclk; /* disp. reference clock */
58 struct mb862xx_gc_mode *gc_mode; /* GDC mode init data */ 80 struct mb862xx_gc_mode *gc_mode; /* GDC mode init data */
59 int pre_init; /* don't init display if 1 */ 81 int pre_init; /* don't init display if 1 */
82 struct i2c_adapter *adap; /* GDC I2C bus adapter */
83 int i2c_rs;
84
85 struct mb862xx_l1_cfg l1_cfg;
86 int l1_stride;
60 87
61 u32 pseudo_palette[16]; 88 u32 pseudo_palette[16];
62}; 89};
63 90
64extern void mb862xxfb_init_accel(struct fb_info *info, int xres); 91extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
92#ifdef CONFIG_FB_MB862XX_I2C
93extern int mb862xx_i2c_init(struct mb862xxfb_par *par);
94extern void mb862xx_i2c_exit(struct mb862xxfb_par *par);
95#else
96static inline int mb862xx_i2c_init(struct mb862xxfb_par *par) { return 0; }
97static inline void mb862xx_i2c_exit(struct mb862xxfb_par *par) { }
98#endif
65 99
66#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC) 100#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC)
67#error "Select Lime GDC or CoralP/Carmine support, but not both together" 101#error "Select Lime GDC or CoralP/Carmine support, but not both together"
@@ -82,4 +116,6 @@ extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
82 116
83#define pack(a, b) (((a) << 16) | (b)) 117#define pack(a, b) (((a) << 16) | (b))
84 118
119#endif /* __KERNEL__ */
120
85#endif 121#endif
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index c76e663a6cd4..f70bd63b0187 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/fb.h> 17#include <linux/fb.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/uaccess.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21#include <linux/pci.h> 22#include <linux/pci.h>
@@ -27,7 +28,7 @@
27 28
28#define NR_PALETTE 256 29#define NR_PALETTE 256
29#define MB862XX_MEM_SIZE 0x1000000 30#define MB862XX_MEM_SIZE 0x1000000
30#define CORALP_MEM_SIZE 0x4000000 31#define CORALP_MEM_SIZE 0x2000000
31#define CARMINE_MEM_SIZE 0x8000000 32#define CARMINE_MEM_SIZE 0x8000000
32#define DRV_NAME "mb862xxfb" 33#define DRV_NAME "mb862xxfb"
33 34
@@ -309,6 +310,97 @@ static int mb862xxfb_blank(int mode, struct fb_info *fbi)
309 return 0; 310 return 0;
310} 311}
311 312
313static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
314 unsigned long arg)
315{
316 struct mb862xxfb_par *par = fbi->par;
317 struct mb862xx_l1_cfg *l1_cfg = &par->l1_cfg;
318 void __user *argp = (void __user *)arg;
319 int *enable;
320 u32 l1em = 0;
321
322 switch (cmd) {
323 case MB862XX_L1_GET_CFG:
324 if (copy_to_user(argp, l1_cfg, sizeof(*l1_cfg)))
325 return -EFAULT;
326 break;
327 case MB862XX_L1_SET_CFG:
328 if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg)))
329 return -EFAULT;
330 if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) {
331 /* downscaling */
332 outreg(cap, GC_CAP_CSC,
333 pack((l1_cfg->sh << 11) / l1_cfg->dh,
334 (l1_cfg->sw << 11) / l1_cfg->dw));
335 l1em = inreg(disp, GC_L1EM);
336 l1em &= ~GC_L1EM_DM;
337 } else if ((l1_cfg->sw <= l1_cfg->dw) &&
338 (l1_cfg->sh <= l1_cfg->dh)) {
339 /* upscaling */
340 outreg(cap, GC_CAP_CSC,
341 pack((l1_cfg->sh << 11) / l1_cfg->dh,
342 (l1_cfg->sw << 11) / l1_cfg->dw));
343 outreg(cap, GC_CAP_CMSS,
344 pack(l1_cfg->sw >> 1, l1_cfg->sh));
345 outreg(cap, GC_CAP_CMDS,
346 pack(l1_cfg->dw >> 1, l1_cfg->dh));
347 l1em = inreg(disp, GC_L1EM);
348 l1em |= GC_L1EM_DM;
349 }
350
351 if (l1_cfg->mirror) {
352 outreg(cap, GC_CAP_CBM,
353 inreg(cap, GC_CAP_CBM) | GC_CBM_HRV);
354 l1em |= l1_cfg->dw * 2 - 8;
355 } else {
356 outreg(cap, GC_CAP_CBM,
357 inreg(cap, GC_CAP_CBM) & ~GC_CBM_HRV);
358 l1em &= 0xffff0000;
359 }
360 outreg(disp, GC_L1EM, l1em);
361 break;
362 case MB862XX_L1_ENABLE:
363 enable = (int *)arg;
364 if (*enable) {
365 outreg(disp, GC_L1DA, par->cap_buf);
366 outreg(cap, GC_CAP_IMG_START,
367 pack(l1_cfg->sy >> 1, l1_cfg->sx));
368 outreg(cap, GC_CAP_IMG_END,
369 pack(l1_cfg->sh, l1_cfg->sw));
370 outreg(disp, GC_L1M, GC_L1M_16 | GC_L1M_YC | GC_L1M_CS |
371 (par->l1_stride << 16));
372 outreg(disp, GC_L1WY_L1WX,
373 pack(l1_cfg->dy, l1_cfg->dx));
374 outreg(disp, GC_L1WH_L1WW,
375 pack(l1_cfg->dh - 1, l1_cfg->dw));
376 outreg(disp, GC_DLS, 1);
377 outreg(cap, GC_CAP_VCM,
378 GC_VCM_VIE | GC_VCM_CM | GC_VCM_VS_PAL);
379 outreg(disp, GC_DCM1, inreg(disp, GC_DCM1) |
380 GC_DCM1_DEN | GC_DCM1_L1E);
381 } else {
382 outreg(cap, GC_CAP_VCM,
383 inreg(cap, GC_CAP_VCM) & ~GC_VCM_VIE);
384 outreg(disp, GC_DCM1,
385 inreg(disp, GC_DCM1) & ~GC_DCM1_L1E);
386 }
387 break;
388 case MB862XX_L1_CAP_CTL:
389 enable = (int *)arg;
390 if (*enable) {
391 outreg(cap, GC_CAP_VCM,
392 inreg(cap, GC_CAP_VCM) | GC_VCM_VIE);
393 } else {
394 outreg(cap, GC_CAP_VCM,
395 inreg(cap, GC_CAP_VCM) & ~GC_VCM_VIE);
396 }
397 break;
398 default:
399 return -EINVAL;
400 }
401 return 0;
402}
403
312/* framebuffer ops */ 404/* framebuffer ops */
313static struct fb_ops mb862xxfb_ops = { 405static struct fb_ops mb862xxfb_ops = {
314 .owner = THIS_MODULE, 406 .owner = THIS_MODULE,
@@ -320,6 +412,7 @@ static struct fb_ops mb862xxfb_ops = {
320 .fb_fillrect = cfb_fillrect, 412 .fb_fillrect = cfb_fillrect,
321 .fb_copyarea = cfb_copyarea, 413 .fb_copyarea = cfb_copyarea,
322 .fb_imageblit = cfb_imageblit, 414 .fb_imageblit = cfb_imageblit,
415 .fb_ioctl = mb862xxfb_ioctl,
323}; 416};
324 417
325/* initialize fb_info data */ 418/* initialize fb_info data */
@@ -328,6 +421,7 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
328 struct mb862xxfb_par *par = fbi->par; 421 struct mb862xxfb_par *par = fbi->par;
329 struct mb862xx_gc_mode *mode = par->gc_mode; 422 struct mb862xx_gc_mode *mode = par->gc_mode;
330 unsigned long reg; 423 unsigned long reg;
424 int stride;
331 425
332 fbi->fbops = &mb862xxfb_ops; 426 fbi->fbops = &mb862xxfb_ops;
333 fbi->pseudo_palette = par->pseudo_palette; 427 fbi->pseudo_palette = par->pseudo_palette;
@@ -336,7 +430,6 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
336 430
337 strcpy(fbi->fix.id, DRV_NAME); 431 strcpy(fbi->fix.id, DRV_NAME);
338 fbi->fix.smem_start = (unsigned long)par->fb_base_phys; 432 fbi->fix.smem_start = (unsigned long)par->fb_base_phys;
339 fbi->fix.smem_len = par->mapped_vram;
340 fbi->fix.mmio_start = (unsigned long)par->mmio_base_phys; 433 fbi->fix.mmio_start = (unsigned long)par->mmio_base_phys;
341 fbi->fix.mmio_len = par->mmio_len; 434 fbi->fix.mmio_len = par->mmio_len;
342 fbi->fix.accel = FB_ACCEL_NONE; 435 fbi->fix.accel = FB_ACCEL_NONE;
@@ -420,6 +513,28 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
420 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; 513 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
421 fbi->fix.line_length = (fbi->var.xres_virtual * 514 fbi->fix.line_length = (fbi->var.xres_virtual *
422 fbi->var.bits_per_pixel) / 8; 515 fbi->var.bits_per_pixel) / 8;
516 fbi->fix.smem_len = fbi->fix.line_length * fbi->var.yres_virtual;
517
518 /*
519 * reserve space for capture buffers and two cursors
520 * at the end of vram: 720x576 * 2 * 2.2 + 64x64 * 16.
521 */
522 par->cap_buf = par->mapped_vram - 0x1bd800 - 0x10000;
523 par->cap_len = 0x1bd800;
524 par->l1_cfg.sx = 0;
525 par->l1_cfg.sy = 0;
526 par->l1_cfg.sw = 720;
527 par->l1_cfg.sh = 576;
528 par->l1_cfg.dx = 0;
529 par->l1_cfg.dy = 0;
530 par->l1_cfg.dw = 720;
531 par->l1_cfg.dh = 576;
532 stride = par->l1_cfg.sw * (fbi->var.bits_per_pixel / 8);
533 par->l1_stride = stride / 64 + ((stride % 64) ? 1 : 0);
534 outreg(cap, GC_CAP_CBM, GC_CBM_OO | GC_CBM_CBST |
535 (par->l1_stride << 16));
536 outreg(cap, GC_CAP_CBOA, par->cap_buf);
537 outreg(cap, GC_CAP_CBLA, par->cap_buf + par->cap_len);
423 return 0; 538 return 0;
424} 539}
425 540
@@ -742,22 +857,38 @@ static int coralp_init(struct mb862xxfb_par *par)
742 857
743 par->refclk = GC_DISP_REFCLK_400; 858 par->refclk = GC_DISP_REFCLK_400;
744 859
860 if (par->mapped_vram >= 0x2000000) {
861 /* relocate gdc registers space */
862 writel(1, par->fb_base + MB862XX_MMIO_BASE + GC_RSW);
863 udelay(1); /* wait at least 20 bus cycles */
864 }
865
745 ver = inreg(host, GC_CID); 866 ver = inreg(host, GC_CID);
746 cn = (ver & GC_CID_CNAME_MSK) >> 8; 867 cn = (ver & GC_CID_CNAME_MSK) >> 8;
747 ver = ver & GC_CID_VERSION_MSK; 868 ver = ver & GC_CID_VERSION_MSK;
748 if (cn == 3) { 869 if (cn == 3) {
870 unsigned long reg;
871
749 dev_info(par->dev, "Fujitsu Coral-%s GDC Rev.%d found\n",\ 872 dev_info(par->dev, "Fujitsu Coral-%s GDC Rev.%d found\n",\
750 (ver == 6) ? "P" : (ver == 8) ? "PA" : "?", 873 (ver == 6) ? "P" : (ver == 8) ? "PA" : "?",
751 par->pdev->revision); 874 par->pdev->revision);
752 outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133); 875 reg = inreg(disp, GC_DCM1);
753 udelay(200); 876 if (reg & GC_DCM01_DEN && reg & GC_DCM01_L0E)
754 outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL); 877 par->pre_init = 1;
755 udelay(10); 878
879 if (!par->pre_init) {
880 outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133);
881 udelay(200);
882 outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL);
883 udelay(10);
884 }
756 /* Clear interrupt status */ 885 /* Clear interrupt status */
757 outreg(host, GC_IST, 0); 886 outreg(host, GC_IST, 0);
758 } else { 887 } else {
759 return -ENODEV; 888 return -ENODEV;
760 } 889 }
890
891 mb862xx_i2c_init(par);
761 return 0; 892 return 0;
762} 893}
763 894
@@ -899,7 +1030,13 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
899 case PCI_DEVICE_ID_FUJITSU_CORALPA: 1030 case PCI_DEVICE_ID_FUJITSU_CORALPA:
900 par->fb_base_phys = pci_resource_start(par->pdev, 0); 1031 par->fb_base_phys = pci_resource_start(par->pdev, 0);
901 par->mapped_vram = CORALP_MEM_SIZE; 1032 par->mapped_vram = CORALP_MEM_SIZE;
902 par->mmio_base_phys = par->fb_base_phys + MB862XX_MMIO_BASE; 1033 if (par->mapped_vram >= 0x2000000) {
1034 par->mmio_base_phys = par->fb_base_phys +
1035 MB862XX_MMIO_HIGH_BASE;
1036 } else {
1037 par->mmio_base_phys = par->fb_base_phys +
1038 MB862XX_MMIO_BASE;
1039 }
903 par->mmio_len = MB862XX_MMIO_SIZE; 1040 par->mmio_len = MB862XX_MMIO_SIZE;
904 par->type = BT_CORALP; 1041 par->type = BT_CORALP;
905 break; 1042 break;
@@ -1009,6 +1146,8 @@ static void __devexit mb862xx_pci_remove(struct pci_dev *pdev)
1009 outreg(host, GC_IMASK, 0); 1146 outreg(host, GC_IMASK, 0);
1010 } 1147 }
1011 1148
1149 mb862xx_i2c_exit(par);
1150
1012 device_remove_file(&pdev->dev, &dev_attr_dispregs); 1151 device_remove_file(&pdev->dev, &dev_attr_dispregs);
1013 1152
1014 pci_set_drvdata(pdev, NULL); 1153 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
index 49226a1b909e..25db55696e14 100644
--- a/drivers/video/omap/Makefile
+++ b/drivers/video/omap/Makefile
@@ -30,7 +30,6 @@ objs-y$(CONFIG_MACH_OMAP_APOLLON) += lcd_apollon.o
30objs-y$(CONFIG_MACH_OMAP_2430SDP) += lcd_2430sdp.o 30objs-y$(CONFIG_MACH_OMAP_2430SDP) += lcd_2430sdp.o
31objs-y$(CONFIG_MACH_OMAP_3430SDP) += lcd_2430sdp.o 31objs-y$(CONFIG_MACH_OMAP_3430SDP) += lcd_2430sdp.o
32objs-y$(CONFIG_MACH_OMAP_LDP) += lcd_ldp.o 32objs-y$(CONFIG_MACH_OMAP_LDP) += lcd_ldp.o
33objs-y$(CONFIG_MACH_OMAP2EVM) += lcd_omap2evm.o
34objs-y$(CONFIG_MACH_OMAP3EVM) += lcd_omap3evm.o 33objs-y$(CONFIG_MACH_OMAP3EVM) += lcd_omap3evm.o
35objs-y$(CONFIG_MACH_OMAP3_BEAGLE) += lcd_omap3beagle.o 34objs-y$(CONFIG_MACH_OMAP3_BEAGLE) += lcd_omap3beagle.o
36objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o 35objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 529483467abf..0ccd7adf47bb 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -922,14 +922,14 @@ static int get_dss_clocks(void)
922 return PTR_ERR(dispc.dss_ick); 922 return PTR_ERR(dispc.dss_ick);
923 } 923 }
924 924
925 dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck"); 925 dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "fck");
926 if (IS_ERR(dispc.dss1_fck)) { 926 if (IS_ERR(dispc.dss1_fck)) {
927 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 927 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
928 clk_put(dispc.dss_ick); 928 clk_put(dispc.dss_ick);
929 return PTR_ERR(dispc.dss1_fck); 929 return PTR_ERR(dispc.dss1_fck);
930 } 930 }
931 931
932 dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_fck"); 932 dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_clk");
933 if (IS_ERR(dispc.dss_54m_fck)) { 933 if (IS_ERR(dispc.dss_54m_fck)) {
934 dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); 934 dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
935 clk_put(dispc.dss_ick); 935 clk_put(dispc.dss_ick);
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
deleted file mode 100644
index 7e7a65c08452..000000000000
--- a/drivers/video/omap/lcd_omap2evm.c
+++ /dev/null
@@ -1,192 +0,0 @@
1/*
2 * LCD panel support for the MISTRAL OMAP2EVM board
3 *
4 * Author: Arun C <arunedarath@mistralsolutions.com>
5 *
6 * Derived from drivers/video/omap/lcd_omap3evm.c
7 * Derived from drivers/video/omap/lcd-apollon.c
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/gpio.h>
27#include <linux/i2c/twl.h>
28
29#include <plat/mux.h>
30#include <asm/mach-types.h>
31
32#include "omapfb.h"
33
34#define LCD_PANEL_ENABLE_GPIO 154
35#define LCD_PANEL_LR 128
36#define LCD_PANEL_UD 129
37#define LCD_PANEL_INI 152
38#define LCD_PANEL_QVGA 148
39#define LCD_PANEL_RESB 153
40
41#define TWL_LED_LEDEN 0x00
42#define TWL_PWMA_PWMAON 0x00
43#define TWL_PWMA_PWMAOFF 0x01
44
45static unsigned int bklight_level;
46
47static int omap2evm_panel_init(struct lcd_panel *panel,
48 struct omapfb_device *fbdev)
49{
50 gpio_request(LCD_PANEL_ENABLE_GPIO, "LCD enable");
51 gpio_request(LCD_PANEL_LR, "LCD lr");
52 gpio_request(LCD_PANEL_UD, "LCD ud");
53 gpio_request(LCD_PANEL_INI, "LCD ini");
54 gpio_request(LCD_PANEL_QVGA, "LCD qvga");
55 gpio_request(LCD_PANEL_RESB, "LCD resb");
56
57 gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 1);
58 gpio_direction_output(LCD_PANEL_RESB, 1);
59 gpio_direction_output(LCD_PANEL_INI, 1);
60 gpio_direction_output(LCD_PANEL_QVGA, 0);
61 gpio_direction_output(LCD_PANEL_LR, 1);
62 gpio_direction_output(LCD_PANEL_UD, 1);
63
64 twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
65 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
66 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
67 bklight_level = 100;
68
69 return 0;
70}
71
72static void omap2evm_panel_cleanup(struct lcd_panel *panel)
73{
74 gpio_free(LCD_PANEL_RESB);
75 gpio_free(LCD_PANEL_QVGA);
76 gpio_free(LCD_PANEL_INI);
77 gpio_free(LCD_PANEL_UD);
78 gpio_free(LCD_PANEL_LR);
79 gpio_free(LCD_PANEL_ENABLE_GPIO);
80}
81
82static int omap2evm_panel_enable(struct lcd_panel *panel)
83{
84 gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0);
85 return 0;
86}
87
88static void omap2evm_panel_disable(struct lcd_panel *panel)
89{
90 gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1);
91}
92
93static unsigned long omap2evm_panel_get_caps(struct lcd_panel *panel)
94{
95 return 0;
96}
97
98static int omap2evm_bklight_setlevel(struct lcd_panel *panel,
99 unsigned int level)
100{
101 u8 c;
102 if ((level >= 0) && (level <= 100)) {
103 c = (125 * (100 - level)) / 100 + 2;
104 twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
105 bklight_level = level;
106 }
107 return 0;
108}
109
110static unsigned int omap2evm_bklight_getlevel(struct lcd_panel *panel)
111{
112 return bklight_level;
113}
114
115static unsigned int omap2evm_bklight_getmaxlevel(struct lcd_panel *panel)
116{
117 return 100;
118}
119
120struct lcd_panel omap2evm_panel = {
121 .name = "omap2evm",
122 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
123 OMAP_LCDC_INV_HSYNC,
124
125 .bpp = 16,
126 .data_lines = 18,
127 .x_res = 480,
128 .y_res = 640,
129 .hsw = 3,
130 .hfp = 0,
131 .hbp = 28,
132 .vsw = 2,
133 .vfp = 1,
134 .vbp = 0,
135
136 .pixel_clock = 20000,
137
138 .init = omap2evm_panel_init,
139 .cleanup = omap2evm_panel_cleanup,
140 .enable = omap2evm_panel_enable,
141 .disable = omap2evm_panel_disable,
142 .get_caps = omap2evm_panel_get_caps,
143 .set_bklight_level = omap2evm_bklight_setlevel,
144 .get_bklight_level = omap2evm_bklight_getlevel,
145 .get_bklight_max = omap2evm_bklight_getmaxlevel,
146};
147
148static int omap2evm_panel_probe(struct platform_device *pdev)
149{
150 omapfb_register_panel(&omap2evm_panel);
151 return 0;
152}
153
154static int omap2evm_panel_remove(struct platform_device *pdev)
155{
156 return 0;
157}
158
159static int omap2evm_panel_suspend(struct platform_device *pdev,
160 pm_message_t mesg)
161{
162 return 0;
163}
164
165static int omap2evm_panel_resume(struct platform_device *pdev)
166{
167 return 0;
168}
169
170struct platform_driver omap2evm_panel_driver = {
171 .probe = omap2evm_panel_probe,
172 .remove = omap2evm_panel_remove,
173 .suspend = omap2evm_panel_suspend,
174 .resume = omap2evm_panel_resume,
175 .driver = {
176 .name = "omap2evm_lcd",
177 .owner = THIS_MODULE,
178 },
179};
180
181static int __init omap2evm_panel_drv_init(void)
182{
183 return platform_driver_register(&omap2evm_panel_driver);
184}
185
186static void __exit omap2evm_panel_drv_exit(void)
187{
188 platform_driver_unregister(&omap2evm_panel_driver);
189}
190
191module_init(omap2evm_panel_drv_init);
192module_exit(omap2evm_panel_drv_exit);
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index e264efd0278f..b3ddd743d8a6 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -90,7 +90,7 @@ static void omapdss_release(struct device *dev)
90 90
91/* dummy device for clocks */ 91/* dummy device for clocks */
92static struct platform_device omapdss_device = { 92static struct platform_device omapdss_device = {
93 .name = "omapdss", 93 .name = "omapdss_dss",
94 .id = -1, 94 .id = -1,
95 .dev = { 95 .dev = {
96 .release = omapdss_release, 96 .release = omapdss_release,
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index eada9f12efc7..0c6981f1a4a3 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -90,7 +90,7 @@ static int rfbi_get_clocks(void)
90 return PTR_ERR(rfbi.dss_ick); 90 return PTR_ERR(rfbi.dss_ick);
91 } 91 }
92 92
93 rfbi.dss1_fck = clk_get(&rfbi.fbdev->dssdev->dev, "dss1_fck"); 93 rfbi.dss1_fck = clk_get(&rfbi.fbdev->dssdev->dev, "fck");
94 if (IS_ERR(rfbi.dss1_fck)) { 94 if (IS_ERR(rfbi.dss1_fck)) {
95 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); 95 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
96 clk_put(rfbi.dss_ick); 96 clk_put(rfbi.dss_ick);
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
index d853d05dad31..5ddef129f798 100644
--- a/drivers/video/omap2/Makefile
+++ b/drivers/video/omap2/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_OMAP2_VRAM) += vram.o 1obj-$(CONFIG_OMAP2_VRAM) += vram.o
2obj-$(CONFIG_OMAP2_VRFB) += vrfb.o 2obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
3 3
4obj-y += dss/ 4obj-$(CONFIG_OMAP2_DSS) += dss/
5obj-y += omapfb/ 5obj-$(CONFIG_FB_OMAP2) += omapfb/
6obj-y += displays/ 6obj-y += displays/
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index d18ad6b2372a..609a28073178 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -3,6 +3,7 @@ menu "OMAP2/3 Display Device Drivers"
3 3
4config PANEL_GENERIC_DPI 4config PANEL_GENERIC_DPI
5 tristate "Generic DPI Panel" 5 tristate "Generic DPI Panel"
6 depends on OMAP2_DSS_DPI
6 help 7 help
7 Generic DPI panel driver. 8 Generic DPI panel driver.
8 Supports DVI output for Beagle and OMAP3 SDP. 9 Supports DVI output for Beagle and OMAP3 SDP.
@@ -11,20 +12,20 @@ config PANEL_GENERIC_DPI
11 12
12config PANEL_LGPHILIPS_LB035Q02 13config PANEL_LGPHILIPS_LB035Q02
13 tristate "LG.Philips LB035Q02 LCD Panel" 14 tristate "LG.Philips LB035Q02 LCD Panel"
14 depends on OMAP2_DSS && SPI 15 depends on OMAP2_DSS_DPI && SPI
15 help 16 help
16 LCD Panel used on the Gumstix Overo Palo35 17 LCD Panel used on the Gumstix Overo Palo35
17 18
18config PANEL_SHARP_LS037V7DW01 19config PANEL_SHARP_LS037V7DW01
19 tristate "Sharp LS037V7DW01 LCD Panel" 20 tristate "Sharp LS037V7DW01 LCD Panel"
20 depends on OMAP2_DSS 21 depends on OMAP2_DSS_DPI
21 select BACKLIGHT_CLASS_DEVICE 22 select BACKLIGHT_CLASS_DEVICE
22 help 23 help
23 LCD Panel used in TI's SDP3430 and EVM boards 24 LCD Panel used in TI's SDP3430 and EVM boards
24 25
25config PANEL_NEC_NL8048HL11_01B 26config PANEL_NEC_NL8048HL11_01B
26 tristate "NEC NL8048HL11-01B Panel" 27 tristate "NEC NL8048HL11-01B Panel"
27 depends on OMAP2_DSS 28 depends on OMAP2_DSS_DPI
28 help 29 help
29 This NEC NL8048HL11-01B panel is TFT LCD 30 This NEC NL8048HL11-01B panel is TFT LCD
30 used in the Zoom2/3/3630 sdp boards. 31 used in the Zoom2/3/3630 sdp boards.
@@ -37,7 +38,7 @@ config PANEL_TAAL
37 38
38config PANEL_TPO_TD043MTEA1 39config PANEL_TPO_TD043MTEA1
39 tristate "TPO TD043MTEA1 LCD Panel" 40 tristate "TPO TD043MTEA1 LCD Panel"
40 depends on OMAP2_DSS && SPI 41 depends on OMAP2_DSS_DPI && SPI
41 help 42 help
42 LCD Panel used in OMAP3 Pandora 43 LCD Panel used in OMAP3 Pandora
43 44
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index 7e04c921aa2a..dbd59b8e5b36 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -30,7 +30,7 @@
30#include <linux/backlight.h> 30#include <linux/backlight.h>
31#include <linux/fb.h> 31#include <linux/fb.h>
32 32
33#include <plat/display.h> 33#include <video/omapdss.h>
34 34
35#define MIPID_CMD_READ_DISP_ID 0x04 35#define MIPID_CMD_READ_DISP_ID 0x04
36#define MIPID_CMD_READ_RED 0x06 36#define MIPID_CMD_READ_RED 0x06
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 4a9b9ff59467..9c90f75653fb 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -33,8 +33,9 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <video/omapdss.h>
36 37
37#include <plat/panel-generic-dpi.h> 38#include <video/omap-panel-generic-dpi.h>
38 39
39struct panel_config { 40struct panel_config {
40 struct omap_video_timings timings; 41 struct omap_video_timings timings;
@@ -181,6 +182,56 @@ static struct panel_config generic_dpi_panels[] = {
181 .power_off_delay = 0, 182 .power_off_delay = 0,
182 .name = "samsung_lte430wq_f0c", 183 .name = "samsung_lte430wq_f0c",
183 }, 184 },
185
186 /* Seiko 70WVW1TZ3Z3 */
187 {
188 {
189 .x_res = 800,
190 .y_res = 480,
191
192 .pixel_clock = 33000,
193
194 .hsw = 128,
195 .hfp = 10,
196 .hbp = 10,
197
198 .vsw = 2,
199 .vfp = 4,
200 .vbp = 11,
201 },
202 .acbi = 0x0,
203 .acb = 0x0,
204 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
205 OMAP_DSS_LCD_IHS,
206 .power_on_delay = 0,
207 .power_off_delay = 0,
208 .name = "seiko_70wvw1tz3",
209 },
210
211 /* Powertip PH480272T */
212 {
213 {
214 .x_res = 480,
215 .y_res = 272,
216
217 .pixel_clock = 9000,
218
219 .hsw = 40,
220 .hfp = 2,
221 .hbp = 2,
222
223 .vsw = 10,
224 .vfp = 2,
225 .vbp = 2,
226 },
227 .acbi = 0x0,
228 .acb = 0x0,
229 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
230 OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO,
231 .power_on_delay = 0,
232 .power_off_delay = 0,
233 .name = "powertip_ph480272t",
234 },
184}; 235};
185 236
186struct panel_drv_data { 237struct panel_drv_data {
@@ -285,7 +336,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
285 return 0; 336 return 0;
286} 337}
287 338
288static void generic_dpi_panel_remove(struct omap_dss_device *dssdev) 339static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev)
289{ 340{
290 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); 341 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
291 342
@@ -358,7 +409,7 @@ static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
358 409
359static struct omap_dss_driver dpi_driver = { 410static struct omap_dss_driver dpi_driver = {
360 .probe = generic_dpi_panel_probe, 411 .probe = generic_dpi_panel_probe,
361 .remove = generic_dpi_panel_remove, 412 .remove = __exit_p(generic_dpi_panel_remove),
362 413
363 .enable = generic_dpi_panel_enable, 414 .enable = generic_dpi_panel_enable,
364 .disable = generic_dpi_panel_disable, 415 .disable = generic_dpi_panel_disable,
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
index 271324db2436..e0eb35be303e 100644
--- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -21,7 +21,7 @@
21#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23 23
24#include <plat/display.h> 24#include <video/omapdss.h>
25 25
26struct lb035q02_data { 26struct lb035q02_data {
27 struct mutex lock; 27 struct mutex lock;
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index 925e0fadff54..2ba9d0ca187c 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -22,7 +22,7 @@
22#include <linux/backlight.h> 22#include <linux/backlight.h>
23#include <linux/fb.h> 23#include <linux/fb.h>
24 24
25#include <plat/display.h> 25#include <video/omapdss.h>
26 26
27#define LCD_XRES 800 27#define LCD_XRES 800
28#define LCD_YRES 480 28#define LCD_YRES 480
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index d2b35d2df2a6..ba38b3ad17d6 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -25,7 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#include <plat/display.h> 28#include <video/omapdss.h>
29 29
30struct sharp_data { 30struct sharp_data {
31 struct backlight_device *bl; 31 struct backlight_device *bl;
@@ -120,7 +120,7 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
120 return 0; 120 return 0;
121} 121}
122 122
123static void sharp_ls_panel_remove(struct omap_dss_device *dssdev) 123static void __exit sharp_ls_panel_remove(struct omap_dss_device *dssdev)
124{ 124{
125 struct sharp_data *sd = dev_get_drvdata(&dssdev->dev); 125 struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
126 struct backlight_device *bl = sd->bl; 126 struct backlight_device *bl = sd->bl;
@@ -205,7 +205,7 @@ static int sharp_ls_panel_resume(struct omap_dss_device *dssdev)
205 205
206static struct omap_dss_driver sharp_ls_driver = { 206static struct omap_dss_driver sharp_ls_driver = {
207 .probe = sharp_ls_panel_probe, 207 .probe = sharp_ls_panel_probe,
208 .remove = sharp_ls_panel_remove, 208 .remove = __exit_p(sharp_ls_panel_remove),
209 209
210 .enable = sharp_ls_panel_enable, 210 .enable = sharp_ls_panel_enable,
211 .disable = sharp_ls_panel_disable, 211 .disable = sharp_ls_panel_disable,
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index adc9900458e1..fdd5d4ae437d 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -33,8 +33,8 @@
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35 35
36#include <plat/display.h> 36#include <video/omapdss.h>
37#include <plat/nokia-dsi-panel.h> 37#include <video/omap-panel-nokia-dsi.h>
38 38
39/* DSI Virtual channel. Hardcoded for now. */ 39/* DSI Virtual channel. Hardcoded for now. */
40#define TCH 0 40#define TCH 0
@@ -63,12 +63,12 @@
63#define DCS_GET_ID2 0xdb 63#define DCS_GET_ID2 0xdb
64#define DCS_GET_ID3 0xdc 64#define DCS_GET_ID3 0xdc
65 65
66#define TAAL_ESD_CHECK_PERIOD msecs_to_jiffies(5000)
67
68static irqreturn_t taal_te_isr(int irq, void *data); 66static irqreturn_t taal_te_isr(int irq, void *data);
69static void taal_te_timeout_work_callback(struct work_struct *work); 67static void taal_te_timeout_work_callback(struct work_struct *work);
70static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable); 68static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
71 69
70static int taal_panel_reset(struct omap_dss_device *dssdev);
71
72struct panel_regulator { 72struct panel_regulator {
73 struct regulator *regulator; 73 struct regulator *regulator;
74 const char *name; 74 const char *name;
@@ -229,8 +229,14 @@ struct taal_data {
229 229
230 bool intro_printed; 230 bool intro_printed;
231 231
232 struct workqueue_struct *esd_wq; 232 struct workqueue_struct *workqueue;
233
233 struct delayed_work esd_work; 234 struct delayed_work esd_work;
235 unsigned esd_interval;
236
237 bool ulps_enabled;
238 unsigned ulps_timeout;
239 struct delayed_work ulps_work;
234 240
235 struct panel_config *panel_config; 241 struct panel_config *panel_config;
236}; 242};
@@ -242,6 +248,7 @@ static inline struct nokia_dsi_panel_data
242} 248}
243 249
244static void taal_esd_work(struct work_struct *work); 250static void taal_esd_work(struct work_struct *work);
251static void taal_ulps_work(struct work_struct *work);
245 252
246static void hw_guard_start(struct taal_data *td, int guard_msec) 253static void hw_guard_start(struct taal_data *td, int guard_msec)
247{ 254{
@@ -264,7 +271,7 @@ static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data)
264 int r; 271 int r;
265 u8 buf[1]; 272 u8 buf[1];
266 273
267 r = dsi_vc_dcs_read(td->channel, dcs_cmd, buf, 1); 274 r = dsi_vc_dcs_read(td->dssdev, td->channel, dcs_cmd, buf, 1);
268 275
269 if (r < 0) 276 if (r < 0)
270 return r; 277 return r;
@@ -276,7 +283,7 @@ static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data)
276 283
277static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd) 284static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd)
278{ 285{
279 return dsi_vc_dcs_write(td->channel, &dcs_cmd, 1); 286 return dsi_vc_dcs_write(td->dssdev, td->channel, &dcs_cmd, 1);
280} 287}
281 288
282static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param) 289static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param)
@@ -284,7 +291,7 @@ static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param)
284 u8 buf[2]; 291 u8 buf[2];
285 buf[0] = dcs_cmd; 292 buf[0] = dcs_cmd;
286 buf[1] = param; 293 buf[1] = param;
287 return dsi_vc_dcs_write(td->channel, buf, 2); 294 return dsi_vc_dcs_write(td->dssdev, td->channel, buf, 2);
288} 295}
289 296
290static int taal_sleep_in(struct taal_data *td) 297static int taal_sleep_in(struct taal_data *td)
@@ -296,7 +303,7 @@ static int taal_sleep_in(struct taal_data *td)
296 hw_guard_wait(td); 303 hw_guard_wait(td);
297 304
298 cmd = DCS_SLEEP_IN; 305 cmd = DCS_SLEEP_IN;
299 r = dsi_vc_dcs_write_nosync(td->channel, &cmd, 1); 306 r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, &cmd, 1);
300 if (r) 307 if (r)
301 return r; 308 return r;
302 309
@@ -402,7 +409,7 @@ static int taal_set_update_window(struct taal_data *td,
402 buf[3] = (x2 >> 8) & 0xff; 409 buf[3] = (x2 >> 8) & 0xff;
403 buf[4] = (x2 >> 0) & 0xff; 410 buf[4] = (x2 >> 0) & 0xff;
404 411
405 r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf)); 412 r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf));
406 if (r) 413 if (r)
407 return r; 414 return r;
408 415
@@ -412,15 +419,132 @@ static int taal_set_update_window(struct taal_data *td,
412 buf[3] = (y2 >> 8) & 0xff; 419 buf[3] = (y2 >> 8) & 0xff;
413 buf[4] = (y2 >> 0) & 0xff; 420 buf[4] = (y2 >> 0) & 0xff;
414 421
415 r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf)); 422 r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf));
416 if (r) 423 if (r)
417 return r; 424 return r;
418 425
419 dsi_vc_send_bta_sync(td->channel); 426 dsi_vc_send_bta_sync(td->dssdev, td->channel);
420 427
421 return r; 428 return r;
422} 429}
423 430
431static void taal_queue_esd_work(struct omap_dss_device *dssdev)
432{
433 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
434
435 if (td->esd_interval > 0)
436 queue_delayed_work(td->workqueue, &td->esd_work,
437 msecs_to_jiffies(td->esd_interval));
438}
439
440static void taal_cancel_esd_work(struct omap_dss_device *dssdev)
441{
442 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
443
444 cancel_delayed_work(&td->esd_work);
445}
446
447static void taal_queue_ulps_work(struct omap_dss_device *dssdev)
448{
449 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
450
451 if (td->ulps_timeout > 0)
452 queue_delayed_work(td->workqueue, &td->ulps_work,
453 msecs_to_jiffies(td->ulps_timeout));
454}
455
456static void taal_cancel_ulps_work(struct omap_dss_device *dssdev)
457{
458 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
459
460 cancel_delayed_work(&td->ulps_work);
461}
462
463static int taal_enter_ulps(struct omap_dss_device *dssdev)
464{
465 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
466 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
467 int r;
468
469 if (td->ulps_enabled)
470 return 0;
471
472 taal_cancel_ulps_work(dssdev);
473
474 r = _taal_enable_te(dssdev, false);
475 if (r)
476 goto err;
477
478 disable_irq(gpio_to_irq(panel_data->ext_te_gpio));
479
480 omapdss_dsi_display_disable(dssdev, false, true);
481
482 td->ulps_enabled = true;
483
484 return 0;
485
486err:
487 dev_err(&dssdev->dev, "enter ULPS failed");
488 taal_panel_reset(dssdev);
489
490 td->ulps_enabled = false;
491
492 taal_queue_ulps_work(dssdev);
493
494 return r;
495}
496
497static int taal_exit_ulps(struct omap_dss_device *dssdev)
498{
499 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
500 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
501 int r;
502
503 if (!td->ulps_enabled)
504 return 0;
505
506 r = omapdss_dsi_display_enable(dssdev);
507 if (r)
508 goto err;
509
510 omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
511
512 r = _taal_enable_te(dssdev, true);
513 if (r)
514 goto err;
515
516 enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
517
518 taal_queue_ulps_work(dssdev);
519
520 td->ulps_enabled = false;
521
522 return 0;
523
524err:
525 dev_err(&dssdev->dev, "exit ULPS failed");
526 r = taal_panel_reset(dssdev);
527
528 enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
529 td->ulps_enabled = false;
530
531 taal_queue_ulps_work(dssdev);
532
533 return r;
534}
535
536static int taal_wake_up(struct omap_dss_device *dssdev)
537{
538 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
539
540 if (td->ulps_enabled)
541 return taal_exit_ulps(dssdev);
542
543 taal_cancel_ulps_work(dssdev);
544 taal_queue_ulps_work(dssdev);
545 return 0;
546}
547
424static int taal_bl_update_status(struct backlight_device *dev) 548static int taal_bl_update_status(struct backlight_device *dev)
425{ 549{
426 struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev); 550 struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
@@ -441,9 +565,13 @@ static int taal_bl_update_status(struct backlight_device *dev)
441 565
442 if (td->use_dsi_bl) { 566 if (td->use_dsi_bl) {
443 if (td->enabled) { 567 if (td->enabled) {
444 dsi_bus_lock(); 568 dsi_bus_lock(dssdev);
445 r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level); 569
446 dsi_bus_unlock(); 570 r = taal_wake_up(dssdev);
571 if (!r)
572 r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level);
573
574 dsi_bus_unlock(dssdev);
447 } else { 575 } else {
448 r = 0; 576 r = 0;
449 } 577 }
@@ -504,9 +632,13 @@ static ssize_t taal_num_errors_show(struct device *dev,
504 mutex_lock(&td->lock); 632 mutex_lock(&td->lock);
505 633
506 if (td->enabled) { 634 if (td->enabled) {
507 dsi_bus_lock(); 635 dsi_bus_lock(dssdev);
508 r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors); 636
509 dsi_bus_unlock(); 637 r = taal_wake_up(dssdev);
638 if (!r)
639 r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors);
640
641 dsi_bus_unlock(dssdev);
510 } else { 642 } else {
511 r = -ENODEV; 643 r = -ENODEV;
512 } 644 }
@@ -530,9 +662,13 @@ static ssize_t taal_hw_revision_show(struct device *dev,
530 mutex_lock(&td->lock); 662 mutex_lock(&td->lock);
531 663
532 if (td->enabled) { 664 if (td->enabled) {
533 dsi_bus_lock(); 665 dsi_bus_lock(dssdev);
534 r = taal_get_id(td, &id1, &id2, &id3); 666
535 dsi_bus_unlock(); 667 r = taal_wake_up(dssdev);
668 if (!r)
669 r = taal_get_id(td, &id1, &id2, &id3);
670
671 dsi_bus_unlock(dssdev);
536 } else { 672 } else {
537 r = -ENODEV; 673 r = -ENODEV;
538 } 674 }
@@ -579,6 +715,7 @@ static ssize_t store_cabc_mode(struct device *dev,
579 struct omap_dss_device *dssdev = to_dss_device(dev); 715 struct omap_dss_device *dssdev = to_dss_device(dev);
580 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 716 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
581 int i; 717 int i;
718 int r;
582 719
583 for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { 720 for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
584 if (sysfs_streq(cabc_modes[i], buf)) 721 if (sysfs_streq(cabc_modes[i], buf))
@@ -591,10 +728,19 @@ static ssize_t store_cabc_mode(struct device *dev,
591 mutex_lock(&td->lock); 728 mutex_lock(&td->lock);
592 729
593 if (td->enabled) { 730 if (td->enabled) {
594 dsi_bus_lock(); 731 dsi_bus_lock(dssdev);
595 if (!td->cabc_broken) 732
596 taal_dcs_write_1(td, DCS_WRITE_CABC, i); 733 if (!td->cabc_broken) {
597 dsi_bus_unlock(); 734 r = taal_wake_up(dssdev);
735 if (r)
736 goto err;
737
738 r = taal_dcs_write_1(td, DCS_WRITE_CABC, i);
739 if (r)
740 goto err;
741 }
742
743 dsi_bus_unlock(dssdev);
598 } 744 }
599 745
600 td->cabc_mode = i; 746 td->cabc_mode = i;
@@ -602,6 +748,10 @@ static ssize_t store_cabc_mode(struct device *dev,
602 mutex_unlock(&td->lock); 748 mutex_unlock(&td->lock);
603 749
604 return count; 750 return count;
751err:
752 dsi_bus_unlock(dssdev);
753 mutex_unlock(&td->lock);
754 return r;
605} 755}
606 756
607static ssize_t show_cabc_available_modes(struct device *dev, 757static ssize_t show_cabc_available_modes(struct device *dev,
@@ -620,18 +770,161 @@ static ssize_t show_cabc_available_modes(struct device *dev,
620 return len < PAGE_SIZE ? len : PAGE_SIZE - 1; 770 return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
621} 771}
622 772
773static ssize_t taal_store_esd_interval(struct device *dev,
774 struct device_attribute *attr,
775 const char *buf, size_t count)
776{
777 struct omap_dss_device *dssdev = to_dss_device(dev);
778 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
779
780 unsigned long t;
781 int r;
782
783 r = strict_strtoul(buf, 10, &t);
784 if (r)
785 return r;
786
787 mutex_lock(&td->lock);
788 taal_cancel_esd_work(dssdev);
789 td->esd_interval = t;
790 if (td->enabled)
791 taal_queue_esd_work(dssdev);
792 mutex_unlock(&td->lock);
793
794 return count;
795}
796
797static ssize_t taal_show_esd_interval(struct device *dev,
798 struct device_attribute *attr,
799 char *buf)
800{
801 struct omap_dss_device *dssdev = to_dss_device(dev);
802 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
803 unsigned t;
804
805 mutex_lock(&td->lock);
806 t = td->esd_interval;
807 mutex_unlock(&td->lock);
808
809 return snprintf(buf, PAGE_SIZE, "%u\n", t);
810}
811
812static ssize_t taal_store_ulps(struct device *dev,
813 struct device_attribute *attr,
814 const char *buf, size_t count)
815{
816 struct omap_dss_device *dssdev = to_dss_device(dev);
817 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
818 unsigned long t;
819 int r;
820
821 r = strict_strtoul(buf, 10, &t);
822 if (r)
823 return r;
824
825 mutex_lock(&td->lock);
826
827 if (td->enabled) {
828 dsi_bus_lock(dssdev);
829
830 if (t)
831 r = taal_enter_ulps(dssdev);
832 else
833 r = taal_wake_up(dssdev);
834
835 dsi_bus_unlock(dssdev);
836 }
837
838 mutex_unlock(&td->lock);
839
840 if (r)
841 return r;
842
843 return count;
844}
845
846static ssize_t taal_show_ulps(struct device *dev,
847 struct device_attribute *attr,
848 char *buf)
849{
850 struct omap_dss_device *dssdev = to_dss_device(dev);
851 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
852 unsigned t;
853
854 mutex_lock(&td->lock);
855 t = td->ulps_enabled;
856 mutex_unlock(&td->lock);
857
858 return snprintf(buf, PAGE_SIZE, "%u\n", t);
859}
860
861static ssize_t taal_store_ulps_timeout(struct device *dev,
862 struct device_attribute *attr,
863 const char *buf, size_t count)
864{
865 struct omap_dss_device *dssdev = to_dss_device(dev);
866 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
867 unsigned long t;
868 int r;
869
870 r = strict_strtoul(buf, 10, &t);
871 if (r)
872 return r;
873
874 mutex_lock(&td->lock);
875 td->ulps_timeout = t;
876
877 if (td->enabled) {
878 /* taal_wake_up will restart the timer */
879 dsi_bus_lock(dssdev);
880 r = taal_wake_up(dssdev);
881 dsi_bus_unlock(dssdev);
882 }
883
884 mutex_unlock(&td->lock);
885
886 if (r)
887 return r;
888
889 return count;
890}
891
892static ssize_t taal_show_ulps_timeout(struct device *dev,
893 struct device_attribute *attr,
894 char *buf)
895{
896 struct omap_dss_device *dssdev = to_dss_device(dev);
897 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
898 unsigned t;
899
900 mutex_lock(&td->lock);
901 t = td->ulps_timeout;
902 mutex_unlock(&td->lock);
903
904 return snprintf(buf, PAGE_SIZE, "%u\n", t);
905}
906
623static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL); 907static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL);
624static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL); 908static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL);
625static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, 909static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
626 show_cabc_mode, store_cabc_mode); 910 show_cabc_mode, store_cabc_mode);
627static DEVICE_ATTR(cabc_available_modes, S_IRUGO, 911static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
628 show_cabc_available_modes, NULL); 912 show_cabc_available_modes, NULL);
913static DEVICE_ATTR(esd_interval, S_IRUGO | S_IWUSR,
914 taal_show_esd_interval, taal_store_esd_interval);
915static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR,
916 taal_show_ulps, taal_store_ulps);
917static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR,
918 taal_show_ulps_timeout, taal_store_ulps_timeout);
629 919
630static struct attribute *taal_attrs[] = { 920static struct attribute *taal_attrs[] = {
631 &dev_attr_num_dsi_errors.attr, 921 &dev_attr_num_dsi_errors.attr,
632 &dev_attr_hw_revision.attr, 922 &dev_attr_hw_revision.attr,
633 &dev_attr_cabc_mode.attr, 923 &dev_attr_cabc_mode.attr,
634 &dev_attr_cabc_available_modes.attr, 924 &dev_attr_cabc_available_modes.attr,
925 &dev_attr_esd_interval.attr,
926 &dev_attr_ulps.attr,
927 &dev_attr_ulps_timeout.attr,
635 NULL, 928 NULL,
636}; 929};
637 930
@@ -700,6 +993,9 @@ static int taal_probe(struct omap_dss_device *dssdev)
700 } 993 }
701 td->dssdev = dssdev; 994 td->dssdev = dssdev;
702 td->panel_config = panel_config; 995 td->panel_config = panel_config;
996 td->esd_interval = panel_data->esd_interval;
997 td->ulps_enabled = false;
998 td->ulps_timeout = panel_data->ulps_timeout;
703 999
704 mutex_init(&td->lock); 1000 mutex_init(&td->lock);
705 1001
@@ -710,13 +1006,14 @@ static int taal_probe(struct omap_dss_device *dssdev)
710 if (r) 1006 if (r)
711 goto err_reg; 1007 goto err_reg;
712 1008
713 td->esd_wq = create_singlethread_workqueue("taal_esd"); 1009 td->workqueue = create_singlethread_workqueue("taal_esd");
714 if (td->esd_wq == NULL) { 1010 if (td->workqueue == NULL) {
715 dev_err(&dssdev->dev, "can't create ESD workqueue\n"); 1011 dev_err(&dssdev->dev, "can't create ESD workqueue\n");
716 r = -ENOMEM; 1012 r = -ENOMEM;
717 goto err_wq; 1013 goto err_wq;
718 } 1014 }
719 INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work); 1015 INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
1016 INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
720 1017
721 dev_set_drvdata(&dssdev->dev, td); 1018 dev_set_drvdata(&dssdev->dev, td);
722 1019
@@ -734,8 +1031,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
734 props.max_brightness = 127; 1031 props.max_brightness = 127;
735 1032
736 props.type = BACKLIGHT_RAW; 1033 props.type = BACKLIGHT_RAW;
737 bldev = backlight_device_register("taal", &dssdev->dev, dssdev, 1034 bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev,
738 &taal_bl_ops, &props); 1035 dssdev, &taal_bl_ops, &props);
739 if (IS_ERR(bldev)) { 1036 if (IS_ERR(bldev)) {
740 r = PTR_ERR(bldev); 1037 r = PTR_ERR(bldev);
741 goto err_bl; 1038 goto err_bl;
@@ -810,7 +1107,7 @@ err_irq:
810err_gpio: 1107err_gpio:
811 backlight_device_unregister(bldev); 1108 backlight_device_unregister(bldev);
812err_bl: 1109err_bl:
813 destroy_workqueue(td->esd_wq); 1110 destroy_workqueue(td->workqueue);
814err_wq: 1111err_wq:
815 free_regulators(panel_config->regulators, panel_config->num_regulators); 1112 free_regulators(panel_config->regulators, panel_config->num_regulators);
816err_reg: 1113err_reg:
@@ -819,7 +1116,7 @@ err:
819 return r; 1116 return r;
820} 1117}
821 1118
822static void taal_remove(struct omap_dss_device *dssdev) 1119static void __exit taal_remove(struct omap_dss_device *dssdev)
823{ 1120{
824 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1121 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
825 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); 1122 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
@@ -841,8 +1138,9 @@ static void taal_remove(struct omap_dss_device *dssdev)
841 taal_bl_update_status(bldev); 1138 taal_bl_update_status(bldev);
842 backlight_device_unregister(bldev); 1139 backlight_device_unregister(bldev);
843 1140
844 cancel_delayed_work(&td->esd_work); 1141 taal_cancel_ulps_work(dssdev);
845 destroy_workqueue(td->esd_wq); 1142 taal_cancel_esd_work(dssdev);
1143 destroy_workqueue(td->workqueue);
846 1144
847 /* reset, to be sure that the panel is in a valid state */ 1145 /* reset, to be sure that the panel is in a valid state */
848 taal_hw_reset(dssdev); 1146 taal_hw_reset(dssdev);
@@ -867,7 +1165,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
867 1165
868 taal_hw_reset(dssdev); 1166 taal_hw_reset(dssdev);
869 1167
870 omapdss_dsi_vc_enable_hs(td->channel, false); 1168 omapdss_dsi_vc_enable_hs(dssdev, td->channel, false);
871 1169
872 r = taal_sleep_out(td); 1170 r = taal_sleep_out(td);
873 if (r) 1171 if (r)
@@ -924,7 +1222,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
924 td->intro_printed = true; 1222 td->intro_printed = true;
925 } 1223 }
926 1224
927 omapdss_dsi_vc_enable_hs(td->channel, true); 1225 omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
928 1226
929 return 0; 1227 return 0;
930err: 1228err:
@@ -932,7 +1230,7 @@ err:
932 1230
933 taal_hw_reset(dssdev); 1231 taal_hw_reset(dssdev);
934 1232
935 omapdss_dsi_display_disable(dssdev); 1233 omapdss_dsi_display_disable(dssdev, true, false);
936err0: 1234err0:
937 return r; 1235 return r;
938} 1236}
@@ -955,15 +1253,23 @@ static void taal_power_off(struct omap_dss_device *dssdev)
955 taal_hw_reset(dssdev); 1253 taal_hw_reset(dssdev);
956 } 1254 }
957 1255
958 omapdss_dsi_display_disable(dssdev); 1256 omapdss_dsi_display_disable(dssdev, true, false);
959 1257
960 td->enabled = 0; 1258 td->enabled = 0;
961} 1259}
962 1260
1261static int taal_panel_reset(struct omap_dss_device *dssdev)
1262{
1263 dev_err(&dssdev->dev, "performing LCD reset\n");
1264
1265 taal_power_off(dssdev);
1266 taal_hw_reset(dssdev);
1267 return taal_power_on(dssdev);
1268}
1269
963static int taal_enable(struct omap_dss_device *dssdev) 1270static int taal_enable(struct omap_dss_device *dssdev)
964{ 1271{
965 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1272 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
966 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
967 int r; 1273 int r;
968 1274
969 dev_dbg(&dssdev->dev, "enable\n"); 1275 dev_dbg(&dssdev->dev, "enable\n");
@@ -975,18 +1281,16 @@ static int taal_enable(struct omap_dss_device *dssdev)
975 goto err; 1281 goto err;
976 } 1282 }
977 1283
978 dsi_bus_lock(); 1284 dsi_bus_lock(dssdev);
979 1285
980 r = taal_power_on(dssdev); 1286 r = taal_power_on(dssdev);
981 1287
982 dsi_bus_unlock(); 1288 dsi_bus_unlock(dssdev);
983 1289
984 if (r) 1290 if (r)
985 goto err; 1291 goto err;
986 1292
987 if (panel_data->use_esd_check) 1293 taal_queue_esd_work(dssdev);
988 queue_delayed_work(td->esd_wq, &td->esd_work,
989 TAAL_ESD_CHECK_PERIOD);
990 1294
991 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 1295 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
992 1296
@@ -1007,14 +1311,17 @@ static void taal_disable(struct omap_dss_device *dssdev)
1007 1311
1008 mutex_lock(&td->lock); 1312 mutex_lock(&td->lock);
1009 1313
1010 cancel_delayed_work(&td->esd_work); 1314 taal_cancel_ulps_work(dssdev);
1315 taal_cancel_esd_work(dssdev);
1011 1316
1012 dsi_bus_lock(); 1317 dsi_bus_lock(dssdev);
1013 1318
1014 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 1319 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
1320 taal_wake_up(dssdev);
1015 taal_power_off(dssdev); 1321 taal_power_off(dssdev);
1322 }
1016 1323
1017 dsi_bus_unlock(); 1324 dsi_bus_unlock(dssdev);
1018 1325
1019 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 1326 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
1020 1327
@@ -1035,13 +1342,16 @@ static int taal_suspend(struct omap_dss_device *dssdev)
1035 goto err; 1342 goto err;
1036 } 1343 }
1037 1344
1038 cancel_delayed_work(&td->esd_work); 1345 taal_cancel_ulps_work(dssdev);
1346 taal_cancel_esd_work(dssdev);
1039 1347
1040 dsi_bus_lock(); 1348 dsi_bus_lock(dssdev);
1041 1349
1042 taal_power_off(dssdev); 1350 r = taal_wake_up(dssdev);
1351 if (!r)
1352 taal_power_off(dssdev);
1043 1353
1044 dsi_bus_unlock(); 1354 dsi_bus_unlock(dssdev);
1045 1355
1046 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; 1356 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
1047 1357
@@ -1056,7 +1366,6 @@ err:
1056static int taal_resume(struct omap_dss_device *dssdev) 1366static int taal_resume(struct omap_dss_device *dssdev)
1057{ 1367{
1058 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1368 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
1059 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
1060 int r; 1369 int r;
1061 1370
1062 dev_dbg(&dssdev->dev, "resume\n"); 1371 dev_dbg(&dssdev->dev, "resume\n");
@@ -1068,19 +1377,17 @@ static int taal_resume(struct omap_dss_device *dssdev)
1068 goto err; 1377 goto err;
1069 } 1378 }
1070 1379
1071 dsi_bus_lock(); 1380 dsi_bus_lock(dssdev);
1072 1381
1073 r = taal_power_on(dssdev); 1382 r = taal_power_on(dssdev);
1074 1383
1075 dsi_bus_unlock(); 1384 dsi_bus_unlock(dssdev);
1076 1385
1077 if (r) { 1386 if (r) {
1078 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 1387 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
1079 } else { 1388 } else {
1080 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 1389 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
1081 if (panel_data->use_esd_check) 1390 taal_queue_esd_work(dssdev);
1082 queue_delayed_work(td->esd_wq, &td->esd_work,
1083 TAAL_ESD_CHECK_PERIOD);
1084 } 1391 }
1085 1392
1086 mutex_unlock(&td->lock); 1393 mutex_unlock(&td->lock);
@@ -1095,7 +1402,7 @@ static void taal_framedone_cb(int err, void *data)
1095{ 1402{
1096 struct omap_dss_device *dssdev = data; 1403 struct omap_dss_device *dssdev = data;
1097 dev_dbg(&dssdev->dev, "framedone, err %d\n", err); 1404 dev_dbg(&dssdev->dev, "framedone, err %d\n", err);
1098 dsi_bus_unlock(); 1405 dsi_bus_unlock(dssdev);
1099} 1406}
1100 1407
1101static irqreturn_t taal_te_isr(int irq, void *data) 1408static irqreturn_t taal_te_isr(int irq, void *data)
@@ -1123,7 +1430,7 @@ static irqreturn_t taal_te_isr(int irq, void *data)
1123 return IRQ_HANDLED; 1430 return IRQ_HANDLED;
1124err: 1431err:
1125 dev_err(&dssdev->dev, "start update failed\n"); 1432 dev_err(&dssdev->dev, "start update failed\n");
1126 dsi_bus_unlock(); 1433 dsi_bus_unlock(dssdev);
1127 return IRQ_HANDLED; 1434 return IRQ_HANDLED;
1128} 1435}
1129 1436
@@ -1136,7 +1443,7 @@ static void taal_te_timeout_work_callback(struct work_struct *work)
1136 dev_err(&dssdev->dev, "TE not received for 250ms!\n"); 1443 dev_err(&dssdev->dev, "TE not received for 250ms!\n");
1137 1444
1138 atomic_set(&td->do_update, 0); 1445 atomic_set(&td->do_update, 0);
1139 dsi_bus_unlock(); 1446 dsi_bus_unlock(dssdev);
1140} 1447}
1141 1448
1142static int taal_update(struct omap_dss_device *dssdev, 1449static int taal_update(struct omap_dss_device *dssdev,
@@ -1149,7 +1456,11 @@ static int taal_update(struct omap_dss_device *dssdev,
1149 dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); 1456 dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
1150 1457
1151 mutex_lock(&td->lock); 1458 mutex_lock(&td->lock);
1152 dsi_bus_lock(); 1459 dsi_bus_lock(dssdev);
1460
1461 r = taal_wake_up(dssdev);
1462 if (r)
1463 goto err;
1153 1464
1154 if (!td->enabled) { 1465 if (!td->enabled) {
1155 r = 0; 1466 r = 0;
@@ -1184,7 +1495,7 @@ static int taal_update(struct omap_dss_device *dssdev,
1184 mutex_unlock(&td->lock); 1495 mutex_unlock(&td->lock);
1185 return 0; 1496 return 0;
1186err: 1497err:
1187 dsi_bus_unlock(); 1498 dsi_bus_unlock(dssdev);
1188 mutex_unlock(&td->lock); 1499 mutex_unlock(&td->lock);
1189 return r; 1500 return r;
1190} 1501}
@@ -1196,8 +1507,8 @@ static int taal_sync(struct omap_dss_device *dssdev)
1196 dev_dbg(&dssdev->dev, "sync\n"); 1507 dev_dbg(&dssdev->dev, "sync\n");
1197 1508
1198 mutex_lock(&td->lock); 1509 mutex_lock(&td->lock);
1199 dsi_bus_lock(); 1510 dsi_bus_lock(dssdev);
1200 dsi_bus_unlock(); 1511 dsi_bus_unlock(dssdev);
1201 mutex_unlock(&td->lock); 1512 mutex_unlock(&td->lock);
1202 1513
1203 dev_dbg(&dssdev->dev, "sync done\n"); 1514 dev_dbg(&dssdev->dev, "sync done\n");
@@ -1235,9 +1546,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
1235 if (td->te_enabled == enable) 1546 if (td->te_enabled == enable)
1236 goto end; 1547 goto end;
1237 1548
1238 dsi_bus_lock(); 1549 dsi_bus_lock(dssdev);
1239 1550
1240 if (td->enabled) { 1551 if (td->enabled) {
1552 r = taal_wake_up(dssdev);
1553 if (r)
1554 goto err;
1555
1241 r = _taal_enable_te(dssdev, enable); 1556 r = _taal_enable_te(dssdev, enable);
1242 if (r) 1557 if (r)
1243 goto err; 1558 goto err;
@@ -1245,13 +1560,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
1245 1560
1246 td->te_enabled = enable; 1561 td->te_enabled = enable;
1247 1562
1248 dsi_bus_unlock(); 1563 dsi_bus_unlock(dssdev);
1249end: 1564end:
1250 mutex_unlock(&td->lock); 1565 mutex_unlock(&td->lock);
1251 1566
1252 return 0; 1567 return 0;
1253err: 1568err:
1254 dsi_bus_unlock(); 1569 dsi_bus_unlock(dssdev);
1255 mutex_unlock(&td->lock); 1570 mutex_unlock(&td->lock);
1256 1571
1257 return r; 1572 return r;
@@ -1281,9 +1596,13 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
1281 if (td->rotate == rotate) 1596 if (td->rotate == rotate)
1282 goto end; 1597 goto end;
1283 1598
1284 dsi_bus_lock(); 1599 dsi_bus_lock(dssdev);
1285 1600
1286 if (td->enabled) { 1601 if (td->enabled) {
1602 r = taal_wake_up(dssdev);
1603 if (r)
1604 goto err;
1605
1287 r = taal_set_addr_mode(td, rotate, td->mirror); 1606 r = taal_set_addr_mode(td, rotate, td->mirror);
1288 if (r) 1607 if (r)
1289 goto err; 1608 goto err;
@@ -1291,12 +1610,12 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
1291 1610
1292 td->rotate = rotate; 1611 td->rotate = rotate;
1293 1612
1294 dsi_bus_unlock(); 1613 dsi_bus_unlock(dssdev);
1295end: 1614end:
1296 mutex_unlock(&td->lock); 1615 mutex_unlock(&td->lock);
1297 return 0; 1616 return 0;
1298err: 1617err:
1299 dsi_bus_unlock(); 1618 dsi_bus_unlock(dssdev);
1300 mutex_unlock(&td->lock); 1619 mutex_unlock(&td->lock);
1301 return r; 1620 return r;
1302} 1621}
@@ -1325,8 +1644,12 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
1325 if (td->mirror == enable) 1644 if (td->mirror == enable)
1326 goto end; 1645 goto end;
1327 1646
1328 dsi_bus_lock(); 1647 dsi_bus_lock(dssdev);
1329 if (td->enabled) { 1648 if (td->enabled) {
1649 r = taal_wake_up(dssdev);
1650 if (r)
1651 goto err;
1652
1330 r = taal_set_addr_mode(td, td->rotate, enable); 1653 r = taal_set_addr_mode(td, td->rotate, enable);
1331 if (r) 1654 if (r)
1332 goto err; 1655 goto err;
@@ -1334,12 +1657,12 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
1334 1657
1335 td->mirror = enable; 1658 td->mirror = enable;
1336 1659
1337 dsi_bus_unlock(); 1660 dsi_bus_unlock(dssdev);
1338end: 1661end:
1339 mutex_unlock(&td->lock); 1662 mutex_unlock(&td->lock);
1340 return 0; 1663 return 0;
1341err: 1664err:
1342 dsi_bus_unlock(); 1665 dsi_bus_unlock(dssdev);
1343 mutex_unlock(&td->lock); 1666 mutex_unlock(&td->lock);
1344 return r; 1667 return r;
1345} 1668}
@@ -1369,7 +1692,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
1369 goto err1; 1692 goto err1;
1370 } 1693 }
1371 1694
1372 dsi_bus_lock(); 1695 dsi_bus_lock(dssdev);
1696
1697 r = taal_wake_up(dssdev);
1698 if (r)
1699 goto err2;
1373 1700
1374 r = taal_dcs_read_1(td, DCS_GET_ID1, &id1); 1701 r = taal_dcs_read_1(td, DCS_GET_ID1, &id1);
1375 if (r) 1702 if (r)
@@ -1381,11 +1708,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
1381 if (r) 1708 if (r)
1382 goto err2; 1709 goto err2;
1383 1710
1384 dsi_bus_unlock(); 1711 dsi_bus_unlock(dssdev);
1385 mutex_unlock(&td->lock); 1712 mutex_unlock(&td->lock);
1386 return 0; 1713 return 0;
1387err2: 1714err2:
1388 dsi_bus_unlock(); 1715 dsi_bus_unlock(dssdev);
1389err1: 1716err1:
1390 mutex_unlock(&td->lock); 1717 mutex_unlock(&td->lock);
1391 return r; 1718 return r;
@@ -1415,7 +1742,11 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
1415 dssdev->panel.timings.x_res * 1742 dssdev->panel.timings.x_res *
1416 dssdev->panel.timings.y_res * 3); 1743 dssdev->panel.timings.y_res * 3);
1417 1744
1418 dsi_bus_lock(); 1745 dsi_bus_lock(dssdev);
1746
1747 r = taal_wake_up(dssdev);
1748 if (r)
1749 goto err2;
1419 1750
1420 /* plen 1 or 2 goes into short packet. until checksum error is fixed, 1751 /* plen 1 or 2 goes into short packet. until checksum error is fixed,
1421 * use short packets. plen 32 works, but bigger packets seem to cause 1752 * use short packets. plen 32 works, but bigger packets seem to cause
@@ -1427,7 +1758,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
1427 1758
1428 taal_set_update_window(td, x, y, w, h); 1759 taal_set_update_window(td, x, y, w, h);
1429 1760
1430 r = dsi_vc_set_max_rx_packet_size(td->channel, plen); 1761 r = dsi_vc_set_max_rx_packet_size(dssdev, td->channel, plen);
1431 if (r) 1762 if (r)
1432 goto err2; 1763 goto err2;
1433 1764
@@ -1435,7 +1766,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
1435 u8 dcs_cmd = first ? 0x2e : 0x3e; 1766 u8 dcs_cmd = first ? 0x2e : 0x3e;
1436 first = 0; 1767 first = 0;
1437 1768
1438 r = dsi_vc_dcs_read(td->channel, dcs_cmd, 1769 r = dsi_vc_dcs_read(dssdev, td->channel, dcs_cmd,
1439 buf + buf_used, size - buf_used); 1770 buf + buf_used, size - buf_used);
1440 1771
1441 if (r < 0) { 1772 if (r < 0) {
@@ -1461,14 +1792,35 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
1461 r = buf_used; 1792 r = buf_used;
1462 1793
1463err3: 1794err3:
1464 dsi_vc_set_max_rx_packet_size(td->channel, 1); 1795 dsi_vc_set_max_rx_packet_size(dssdev, td->channel, 1);
1465err2: 1796err2:
1466 dsi_bus_unlock(); 1797 dsi_bus_unlock(dssdev);
1467err1: 1798err1:
1468 mutex_unlock(&td->lock); 1799 mutex_unlock(&td->lock);
1469 return r; 1800 return r;
1470} 1801}
1471 1802
1803static void taal_ulps_work(struct work_struct *work)
1804{
1805 struct taal_data *td = container_of(work, struct taal_data,
1806 ulps_work.work);
1807 struct omap_dss_device *dssdev = td->dssdev;
1808
1809 mutex_lock(&td->lock);
1810
1811 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !td->enabled) {
1812 mutex_unlock(&td->lock);
1813 return;
1814 }
1815
1816 dsi_bus_lock(dssdev);
1817
1818 taal_enter_ulps(dssdev);
1819
1820 dsi_bus_unlock(dssdev);
1821 mutex_unlock(&td->lock);
1822}
1823
1472static void taal_esd_work(struct work_struct *work) 1824static void taal_esd_work(struct work_struct *work)
1473{ 1825{
1474 struct taal_data *td = container_of(work, struct taal_data, 1826 struct taal_data *td = container_of(work, struct taal_data,
@@ -1485,7 +1837,13 @@ static void taal_esd_work(struct work_struct *work)
1485 return; 1837 return;
1486 } 1838 }
1487 1839
1488 dsi_bus_lock(); 1840 dsi_bus_lock(dssdev);
1841
1842 r = taal_wake_up(dssdev);
1843 if (r) {
1844 dev_err(&dssdev->dev, "failed to exit ULPS\n");
1845 goto err;
1846 }
1489 1847
1490 r = taal_dcs_read_1(td, DCS_RDDSDR, &state1); 1848 r = taal_dcs_read_1(td, DCS_RDDSDR, &state1);
1491 if (r) { 1849 if (r) {
@@ -1521,22 +1879,20 @@ static void taal_esd_work(struct work_struct *work)
1521 goto err; 1879 goto err;
1522 } 1880 }
1523 1881
1524 dsi_bus_unlock(); 1882 dsi_bus_unlock(dssdev);
1525 1883
1526 queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD); 1884 taal_queue_esd_work(dssdev);
1527 1885
1528 mutex_unlock(&td->lock); 1886 mutex_unlock(&td->lock);
1529 return; 1887 return;
1530err: 1888err:
1531 dev_err(&dssdev->dev, "performing LCD reset\n"); 1889 dev_err(&dssdev->dev, "performing LCD reset\n");
1532 1890
1533 taal_power_off(dssdev); 1891 taal_panel_reset(dssdev);
1534 taal_hw_reset(dssdev);
1535 taal_power_on(dssdev);
1536 1892
1537 dsi_bus_unlock(); 1893 dsi_bus_unlock(dssdev);
1538 1894
1539 queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD); 1895 taal_queue_esd_work(dssdev);
1540 1896
1541 mutex_unlock(&td->lock); 1897 mutex_unlock(&td->lock);
1542} 1898}
@@ -1557,7 +1913,7 @@ static enum omap_dss_update_mode taal_get_update_mode(
1557 1913
1558static struct omap_dss_driver taal_driver = { 1914static struct omap_dss_driver taal_driver = {
1559 .probe = taal_probe, 1915 .probe = taal_probe,
1560 .remove = taal_remove, 1916 .remove = __exit_p(taal_remove),
1561 1917
1562 .enable = taal_enable, 1918 .enable = taal_enable,
1563 .disable = taal_disable, 1919 .disable = taal_disable,
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index dbe9d43b4850..2462b9ec6662 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -17,7 +17,7 @@
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20#include <plat/display.h> 20#include <video/omapdss.h>
21 21
22#define TPO_R02_MODE(x) ((x) & 7) 22#define TPO_R02_MODE(x) ((x) & 7)
23#define TPO_R02_MODE_800x480 7 23#define TPO_R02_MODE_800x480 7
@@ -144,13 +144,15 @@ static ssize_t tpo_td043_vmirror_store(struct device *dev,
144 struct device_attribute *attr, const char *buf, size_t count) 144 struct device_attribute *attr, const char *buf, size_t count)
145{ 145{
146 struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); 146 struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
147 long val; 147 int val;
148 int ret; 148 int ret;
149 149
150 ret = strict_strtol(buf, 0, &val); 150 ret = kstrtoint(buf, 0, &val);
151 if (ret < 0) 151 if (ret < 0)
152 return ret; 152 return ret;
153 153
154 val = !!val;
155
154 ret = tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, val); 156 ret = tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, val);
155 if (ret < 0) 157 if (ret < 0)
156 return ret; 158 return ret;
@@ -175,7 +177,7 @@ static ssize_t tpo_td043_mode_store(struct device *dev,
175 long val; 177 long val;
176 int ret; 178 int ret;
177 179
178 ret = strict_strtol(buf, 0, &val); 180 ret = kstrtol(buf, 0, &val);
179 if (ret != 0 || val & ~7) 181 if (ret != 0 || val & ~7)
180 return -EINVAL; 182 return -EINVAL;
181 183
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index bfc5da0e9700..6b3e2da11419 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -80,7 +80,7 @@ config OMAP2_DSS_SDI
80 80
81config OMAP2_DSS_DSI 81config OMAP2_DSS_DSI
82 bool "DSI support" 82 bool "DSI support"
83 depends on ARCH_OMAP3 83 depends on ARCH_OMAP3 || ARCH_OMAP4
84 default n 84 default n
85 help 85 help
86 MIPI DSI (Display Serial Interface) support. 86 MIPI DSI (Display Serial Interface) support.
@@ -90,14 +90,6 @@ config OMAP2_DSS_DSI
90 90
91 See http://www.mipi.org/ for DSI spesifications. 91 See http://www.mipi.org/ for DSI spesifications.
92 92
93config OMAP2_DSS_USE_DSI_PLL
94 bool "Use DSI PLL for PCLK (EXPERIMENTAL)"
95 default n
96 depends on OMAP2_DSS_DSI
97 help
98 Use DSI PLL to generate pixel clock. Currently only for DPI output.
99 DSI PLL can be used to generate higher and more precise pixel clocks.
100
101config OMAP2_DSS_FAKE_VSYNC 93config OMAP2_DSS_FAKE_VSYNC
102 bool "Fake VSYNC irq from manual update displays" 94 bool "Fake VSYNC irq from manual update displays"
103 default n 95 default n
@@ -125,4 +117,27 @@ config OMAP2_DSS_MIN_FCK_PER_PCK
125 Max FCK is 173MHz, so this doesn't work if your PCK 117 Max FCK is 173MHz, so this doesn't work if your PCK
126 is very high. 118 is very high.
127 119
120config OMAP2_DSS_SLEEP_BEFORE_RESET
121 bool "Sleep 50ms before DSS reset"
122 default y
123 help
124 For some unknown reason we may get SYNC_LOST errors from the display
125 subsystem at initialization time if we don't sleep before resetting
126 the DSS. See the source (dss.c) for more comments.
127
128 However, 50ms is quite long time to sleep, and with some
129 configurations the SYNC_LOST may never happen, so the sleep can
130 be disabled here.
131
132config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
133 bool "Sleep 20ms after VENC reset"
134 default y
135 help
136 There is a 20ms sleep after VENC reset which seemed to fix the
137 reset. The reason for the bug is unclear, and it's also unclear
138 on what platforms this happens.
139
140 This option enables the sleep, and is enabled by default. You can
141 disable the sleep if it doesn't cause problems on your platform.
142
128endif 143endif
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 1aa2ed1e786e..3da426719dd6 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -33,7 +33,7 @@
33#include <linux/device.h> 33#include <linux/device.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35 35
36#include <plat/display.h> 36#include <video/omapdss.h>
37 37
38#include "dss.h" 38#include "dss.h"
39#include "dss_features.h" 39#include "dss_features.h"
@@ -54,6 +54,9 @@ unsigned int dss_debug;
54module_param_named(debug, dss_debug, bool, 0644); 54module_param_named(debug, dss_debug, bool, 0644);
55#endif 55#endif
56 56
57static int omap_dss_register_device(struct omap_dss_device *);
58static void omap_dss_unregister_device(struct omap_dss_device *);
59
57/* REGULATORS */ 60/* REGULATORS */
58 61
59struct regulator *dss_get_vdds_dsi(void) 62struct regulator *dss_get_vdds_dsi(void)
@@ -124,8 +127,7 @@ static int dss_initialize_debugfs(void)
124#endif 127#endif
125 128
126#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS) 129#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
127 debugfs_create_file("dsi_irq", S_IRUGO, dss_debugfs_dir, 130 dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
128 &dsi_dump_irqs, &dss_debug_fops);
129#endif 131#endif
130 132
131 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir, 133 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
@@ -137,8 +139,7 @@ static int dss_initialize_debugfs(void)
137 &rfbi_dump_regs, &dss_debug_fops); 139 &rfbi_dump_regs, &dss_debug_fops);
138#endif 140#endif
139#ifdef CONFIG_OMAP2_DSS_DSI 141#ifdef CONFIG_OMAP2_DSS_DSI
140 debugfs_create_file("dsi", S_IRUGO, dss_debugfs_dir, 142 dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
141 &dsi_dump_regs, &dss_debug_fops);
142#endif 143#endif
143#ifdef CONFIG_OMAP2_DSS_VENC 144#ifdef CONFIG_OMAP2_DSS_VENC
144 debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir, 145 debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
@@ -480,7 +481,7 @@ static void omap_dss_dev_release(struct device *dev)
480 reset_device(dev, 0); 481 reset_device(dev, 0);
481} 482}
482 483
483int omap_dss_register_device(struct omap_dss_device *dssdev) 484static int omap_dss_register_device(struct omap_dss_device *dssdev)
484{ 485{
485 static int dev_num; 486 static int dev_num;
486 487
@@ -494,7 +495,7 @@ int omap_dss_register_device(struct omap_dss_device *dssdev)
494 return device_register(&dssdev->dev); 495 return device_register(&dssdev->dev);
495} 496}
496 497
497void omap_dss_unregister_device(struct omap_dss_device *dssdev) 498static void omap_dss_unregister_device(struct omap_dss_device *dssdev)
498{ 499{
499 device_unregister(&dssdev->dev); 500 device_unregister(&dssdev->dev);
500} 501}
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 7804779c9da1..7a9a2e7d9685 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -37,99 +37,15 @@
37#include <plat/sram.h> 37#include <plat/sram.h>
38#include <plat/clock.h> 38#include <plat/clock.h>
39 39
40#include <plat/display.h> 40#include <video/omapdss.h>
41 41
42#include "dss.h" 42#include "dss.h"
43#include "dss_features.h" 43#include "dss_features.h"
44#include "dispc.h"
44 45
45/* DISPC */ 46/* DISPC */
46#define DISPC_SZ_REGS SZ_4K 47#define DISPC_SZ_REGS SZ_4K
47 48
48struct dispc_reg { u16 idx; };
49
50#define DISPC_REG(idx) ((const struct dispc_reg) { idx })
51
52/*
53 * DISPC common registers and
54 * DISPC channel registers , ch = 0 for LCD, ch = 1 for
55 * DIGIT, and ch = 2 for LCD2
56 */
57#define DISPC_REVISION DISPC_REG(0x0000)
58#define DISPC_SYSCONFIG DISPC_REG(0x0010)
59#define DISPC_SYSSTATUS DISPC_REG(0x0014)
60#define DISPC_IRQSTATUS DISPC_REG(0x0018)
61#define DISPC_IRQENABLE DISPC_REG(0x001C)
62#define DISPC_CONTROL DISPC_REG(0x0040)
63#define DISPC_CONTROL2 DISPC_REG(0x0238)
64#define DISPC_CONFIG DISPC_REG(0x0044)
65#define DISPC_CONFIG2 DISPC_REG(0x0620)
66#define DISPC_CAPABLE DISPC_REG(0x0048)
67#define DISPC_DEFAULT_COLOR(ch) DISPC_REG(ch == 0 ? 0x004C : \
68 (ch == 1 ? 0x0050 : 0x03AC))
69#define DISPC_TRANS_COLOR(ch) DISPC_REG(ch == 0 ? 0x0054 : \
70 (ch == 1 ? 0x0058 : 0x03B0))
71#define DISPC_LINE_STATUS DISPC_REG(0x005C)
72#define DISPC_LINE_NUMBER DISPC_REG(0x0060)
73#define DISPC_TIMING_H(ch) DISPC_REG(ch != 2 ? 0x0064 : 0x0400)
74#define DISPC_TIMING_V(ch) DISPC_REG(ch != 2 ? 0x0068 : 0x0404)
75#define DISPC_POL_FREQ(ch) DISPC_REG(ch != 2 ? 0x006C : 0x0408)
76#define DISPC_DIVISORo(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
77#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
78#define DISPC_SIZE_DIG DISPC_REG(0x0078)
79#define DISPC_SIZE_LCD(ch) DISPC_REG(ch != 2 ? 0x007C : 0x03CC)
80
81/* DISPC GFX plane */
82#define DISPC_GFX_BA0 DISPC_REG(0x0080)
83#define DISPC_GFX_BA1 DISPC_REG(0x0084)
84#define DISPC_GFX_POSITION DISPC_REG(0x0088)
85#define DISPC_GFX_SIZE DISPC_REG(0x008C)
86#define DISPC_GFX_ATTRIBUTES DISPC_REG(0x00A0)
87#define DISPC_GFX_FIFO_THRESHOLD DISPC_REG(0x00A4)
88#define DISPC_GFX_FIFO_SIZE_STATUS DISPC_REG(0x00A8)
89#define DISPC_GFX_ROW_INC DISPC_REG(0x00AC)
90#define DISPC_GFX_PIXEL_INC DISPC_REG(0x00B0)
91#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4)
92#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8)
93
94#define DISPC_DATA_CYCLE1(ch) DISPC_REG(ch != 2 ? 0x01D4 : 0x03C0)
95#define DISPC_DATA_CYCLE2(ch) DISPC_REG(ch != 2 ? 0x01D8 : 0x03C4)
96#define DISPC_DATA_CYCLE3(ch) DISPC_REG(ch != 2 ? 0x01DC : 0x03C8)
97#define DISPC_CPR_COEF_R(ch) DISPC_REG(ch != 2 ? 0x0220 : 0x03BC)
98#define DISPC_CPR_COEF_G(ch) DISPC_REG(ch != 2 ? 0x0224 : 0x03B8)
99#define DISPC_CPR_COEF_B(ch) DISPC_REG(ch != 2 ? 0x0228 : 0x03B4)
100
101#define DISPC_GFX_PRELOAD DISPC_REG(0x022C)
102
103/* DISPC Video plane, n = 0 for VID1 and n = 1 for VID2 */
104#define DISPC_VID_REG(n, idx) DISPC_REG(0x00BC + (n)*0x90 + idx)
105
106#define DISPC_VID_BA0(n) DISPC_VID_REG(n, 0x0000)
107#define DISPC_VID_BA1(n) DISPC_VID_REG(n, 0x0004)
108#define DISPC_VID_POSITION(n) DISPC_VID_REG(n, 0x0008)
109#define DISPC_VID_SIZE(n) DISPC_VID_REG(n, 0x000C)
110#define DISPC_VID_ATTRIBUTES(n) DISPC_VID_REG(n, 0x0010)
111#define DISPC_VID_FIFO_THRESHOLD(n) DISPC_VID_REG(n, 0x0014)
112#define DISPC_VID_FIFO_SIZE_STATUS(n) DISPC_VID_REG(n, 0x0018)
113#define DISPC_VID_ROW_INC(n) DISPC_VID_REG(n, 0x001C)
114#define DISPC_VID_PIXEL_INC(n) DISPC_VID_REG(n, 0x0020)
115#define DISPC_VID_FIR(n) DISPC_VID_REG(n, 0x0024)
116#define DISPC_VID_PICTURE_SIZE(n) DISPC_VID_REG(n, 0x0028)
117#define DISPC_VID_ACCU0(n) DISPC_VID_REG(n, 0x002C)
118#define DISPC_VID_ACCU1(n) DISPC_VID_REG(n, 0x0030)
119
120/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
121#define DISPC_VID_FIR_COEF_H(n, i) DISPC_REG(0x00F0 + (n)*0x90 + (i)*0x8)
122/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
123#define DISPC_VID_FIR_COEF_HV(n, i) DISPC_REG(0x00F4 + (n)*0x90 + (i)*0x8)
124/* coef index i = {0, 1, 2, 3, 4} */
125#define DISPC_VID_CONV_COEF(n, i) DISPC_REG(0x0130 + (n)*0x90 + (i)*0x4)
126/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
127#define DISPC_VID_FIR_COEF_V(n, i) DISPC_REG(0x01E0 + (n)*0x20 + (i)*0x4)
128
129#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04)
130
131#define DISPC_DIVISOR DISPC_REG(0x0804)
132
133#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \ 49#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
134 DISPC_IRQ_OCP_ERR | \ 50 DISPC_IRQ_OCP_ERR | \
135 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \ 51 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
@@ -167,10 +83,6 @@ struct dispc_v_coef {
167#define REG_FLD_MOD(idx, val, start, end) \ 83#define REG_FLD_MOD(idx, val, start, end) \
168 dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end)) 84 dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
169 85
170static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
171 DISPC_VID_ATTRIBUTES(0),
172 DISPC_VID_ATTRIBUTES(1) };
173
174struct dispc_irq_stats { 86struct dispc_irq_stats {
175 unsigned long last_reset; 87 unsigned long last_reset;
176 unsigned irq_count; 88 unsigned irq_count;
@@ -198,25 +110,38 @@ static struct {
198#endif 110#endif
199} dispc; 111} dispc;
200 112
113enum omap_color_component {
114 /* used for all color formats for OMAP3 and earlier
115 * and for RGB and Y color component on OMAP4
116 */
117 DISPC_COLOR_COMPONENT_RGB_Y = 1 << 0,
118 /* used for UV component for
119 * OMAP_DSS_COLOR_YUV2, OMAP_DSS_COLOR_UYVY, OMAP_DSS_COLOR_NV12
120 * color formats on OMAP4
121 */
122 DISPC_COLOR_COMPONENT_UV = 1 << 1,
123};
124
201static void _omap_dispc_set_irqs(void); 125static void _omap_dispc_set_irqs(void);
202 126
203static inline void dispc_write_reg(const struct dispc_reg idx, u32 val) 127static inline void dispc_write_reg(const u16 idx, u32 val)
204{ 128{
205 __raw_writel(val, dispc.base + idx.idx); 129 __raw_writel(val, dispc.base + idx);
206} 130}
207 131
208static inline u32 dispc_read_reg(const struct dispc_reg idx) 132static inline u32 dispc_read_reg(const u16 idx)
209{ 133{
210 return __raw_readl(dispc.base + idx.idx); 134 return __raw_readl(dispc.base + idx);
211} 135}
212 136
213#define SR(reg) \ 137#define SR(reg) \
214 dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg) 138 dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
215#define RR(reg) \ 139#define RR(reg) \
216 dispc_write_reg(DISPC_##reg, dispc.ctx[(DISPC_##reg).idx / sizeof(u32)]) 140 dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
217 141
218void dispc_save_context(void) 142void dispc_save_context(void)
219{ 143{
144 int i;
220 if (cpu_is_omap24xx()) 145 if (cpu_is_omap24xx())
221 return; 146 return;
222 147
@@ -224,157 +149,153 @@ void dispc_save_context(void)
224 SR(IRQENABLE); 149 SR(IRQENABLE);
225 SR(CONTROL); 150 SR(CONTROL);
226 SR(CONFIG); 151 SR(CONFIG);
227 SR(DEFAULT_COLOR(0)); 152 SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
228 SR(DEFAULT_COLOR(1)); 153 SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
229 SR(TRANS_COLOR(0)); 154 SR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
230 SR(TRANS_COLOR(1)); 155 SR(TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
231 SR(LINE_NUMBER); 156 SR(LINE_NUMBER);
232 SR(TIMING_H(0)); 157 SR(TIMING_H(OMAP_DSS_CHANNEL_LCD));
233 SR(TIMING_V(0)); 158 SR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
234 SR(POL_FREQ(0)); 159 SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
235 SR(DIVISORo(0)); 160 SR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
236 SR(GLOBAL_ALPHA); 161 SR(GLOBAL_ALPHA);
237 SR(SIZE_DIG); 162 SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
238 SR(SIZE_LCD(0)); 163 SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
239 if (dss_has_feature(FEAT_MGR_LCD2)) { 164 if (dss_has_feature(FEAT_MGR_LCD2)) {
240 SR(CONTROL2); 165 SR(CONTROL2);
241 SR(DEFAULT_COLOR(2)); 166 SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
242 SR(TRANS_COLOR(2)); 167 SR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
243 SR(SIZE_LCD(2)); 168 SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
244 SR(TIMING_H(2)); 169 SR(TIMING_H(OMAP_DSS_CHANNEL_LCD2));
245 SR(TIMING_V(2)); 170 SR(TIMING_V(OMAP_DSS_CHANNEL_LCD2));
246 SR(POL_FREQ(2)); 171 SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
247 SR(DIVISORo(2)); 172 SR(DIVISORo(OMAP_DSS_CHANNEL_LCD2));
248 SR(CONFIG2); 173 SR(CONFIG2);
249 } 174 }
250 175
251 SR(GFX_BA0); 176 SR(OVL_BA0(OMAP_DSS_GFX));
252 SR(GFX_BA1); 177 SR(OVL_BA1(OMAP_DSS_GFX));
253 SR(GFX_POSITION); 178 SR(OVL_POSITION(OMAP_DSS_GFX));
254 SR(GFX_SIZE); 179 SR(OVL_SIZE(OMAP_DSS_GFX));
255 SR(GFX_ATTRIBUTES); 180 SR(OVL_ATTRIBUTES(OMAP_DSS_GFX));
256 SR(GFX_FIFO_THRESHOLD); 181 SR(OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
257 SR(GFX_ROW_INC); 182 SR(OVL_ROW_INC(OMAP_DSS_GFX));
258 SR(GFX_PIXEL_INC); 183 SR(OVL_PIXEL_INC(OMAP_DSS_GFX));
259 SR(GFX_WINDOW_SKIP); 184 SR(OVL_WINDOW_SKIP(OMAP_DSS_GFX));
260 SR(GFX_TABLE_BA); 185 SR(OVL_TABLE_BA(OMAP_DSS_GFX));
261 186
262 SR(DATA_CYCLE1(0)); 187 SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
263 SR(DATA_CYCLE2(0)); 188 SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
264 SR(DATA_CYCLE3(0)); 189 SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
265 190
266 SR(CPR_COEF_R(0)); 191 SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
267 SR(CPR_COEF_G(0)); 192 SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
268 SR(CPR_COEF_B(0)); 193 SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
269 if (dss_has_feature(FEAT_MGR_LCD2)) { 194 if (dss_has_feature(FEAT_MGR_LCD2)) {
270 SR(CPR_COEF_B(2)); 195 SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
271 SR(CPR_COEF_G(2)); 196 SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
272 SR(CPR_COEF_R(2)); 197 SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
273 198
274 SR(DATA_CYCLE1(2)); 199 SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
275 SR(DATA_CYCLE2(2)); 200 SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
276 SR(DATA_CYCLE3(2)); 201 SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
277 } 202 }
278 203
279 SR(GFX_PRELOAD); 204 SR(OVL_PRELOAD(OMAP_DSS_GFX));
280 205
281 /* VID1 */ 206 /* VID1 */
282 SR(VID_BA0(0)); 207 SR(OVL_BA0(OMAP_DSS_VIDEO1));
283 SR(VID_BA1(0)); 208 SR(OVL_BA1(OMAP_DSS_VIDEO1));
284 SR(VID_POSITION(0)); 209 SR(OVL_POSITION(OMAP_DSS_VIDEO1));
285 SR(VID_SIZE(0)); 210 SR(OVL_SIZE(OMAP_DSS_VIDEO1));
286 SR(VID_ATTRIBUTES(0)); 211 SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
287 SR(VID_FIFO_THRESHOLD(0)); 212 SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
288 SR(VID_ROW_INC(0)); 213 SR(OVL_ROW_INC(OMAP_DSS_VIDEO1));
289 SR(VID_PIXEL_INC(0)); 214 SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
290 SR(VID_FIR(0)); 215 SR(OVL_FIR(OMAP_DSS_VIDEO1));
291 SR(VID_PICTURE_SIZE(0)); 216 SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
292 SR(VID_ACCU0(0)); 217 SR(OVL_ACCU0(OMAP_DSS_VIDEO1));
293 SR(VID_ACCU1(0)); 218 SR(OVL_ACCU1(OMAP_DSS_VIDEO1));
294 219
295 SR(VID_FIR_COEF_H(0, 0)); 220 for (i = 0; i < 8; i++)
296 SR(VID_FIR_COEF_H(0, 1)); 221 SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i));
297 SR(VID_FIR_COEF_H(0, 2)); 222
298 SR(VID_FIR_COEF_H(0, 3)); 223 for (i = 0; i < 8; i++)
299 SR(VID_FIR_COEF_H(0, 4)); 224 SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i));
300 SR(VID_FIR_COEF_H(0, 5)); 225
301 SR(VID_FIR_COEF_H(0, 6)); 226 for (i = 0; i < 5; i++)
302 SR(VID_FIR_COEF_H(0, 7)); 227 SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
303 228
304 SR(VID_FIR_COEF_HV(0, 0)); 229 for (i = 0; i < 8; i++)
305 SR(VID_FIR_COEF_HV(0, 1)); 230 SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
306 SR(VID_FIR_COEF_HV(0, 2)); 231
307 SR(VID_FIR_COEF_HV(0, 3)); 232 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
308 SR(VID_FIR_COEF_HV(0, 4)); 233 SR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
309 SR(VID_FIR_COEF_HV(0, 5)); 234 SR(OVL_BA1_UV(OMAP_DSS_VIDEO1));
310 SR(VID_FIR_COEF_HV(0, 6)); 235 SR(OVL_FIR2(OMAP_DSS_VIDEO1));
311 SR(VID_FIR_COEF_HV(0, 7)); 236 SR(OVL_ACCU2_0(OMAP_DSS_VIDEO1));
312 237 SR(OVL_ACCU2_1(OMAP_DSS_VIDEO1));
313 SR(VID_CONV_COEF(0, 0)); 238
314 SR(VID_CONV_COEF(0, 1)); 239 for (i = 0; i < 8; i++)
315 SR(VID_CONV_COEF(0, 2)); 240 SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i));
316 SR(VID_CONV_COEF(0, 3)); 241
317 SR(VID_CONV_COEF(0, 4)); 242 for (i = 0; i < 8; i++)
318 243 SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i));
319 SR(VID_FIR_COEF_V(0, 0)); 244
320 SR(VID_FIR_COEF_V(0, 1)); 245 for (i = 0; i < 8; i++)
321 SR(VID_FIR_COEF_V(0, 2)); 246 SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i));
322 SR(VID_FIR_COEF_V(0, 3)); 247 }
323 SR(VID_FIR_COEF_V(0, 4)); 248 if (dss_has_feature(FEAT_ATTR2))
324 SR(VID_FIR_COEF_V(0, 5)); 249 SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
325 SR(VID_FIR_COEF_V(0, 6)); 250
326 SR(VID_FIR_COEF_V(0, 7)); 251 SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
327
328 SR(VID_PRELOAD(0));
329 252
330 /* VID2 */ 253 /* VID2 */
331 SR(VID_BA0(1)); 254 SR(OVL_BA0(OMAP_DSS_VIDEO2));
332 SR(VID_BA1(1)); 255 SR(OVL_BA1(OMAP_DSS_VIDEO2));
333 SR(VID_POSITION(1)); 256 SR(OVL_POSITION(OMAP_DSS_VIDEO2));
334 SR(VID_SIZE(1)); 257 SR(OVL_SIZE(OMAP_DSS_VIDEO2));
335 SR(VID_ATTRIBUTES(1)); 258 SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
336 SR(VID_FIFO_THRESHOLD(1)); 259 SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
337 SR(VID_ROW_INC(1)); 260 SR(OVL_ROW_INC(OMAP_DSS_VIDEO2));
338 SR(VID_PIXEL_INC(1)); 261 SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
339 SR(VID_FIR(1)); 262 SR(OVL_FIR(OMAP_DSS_VIDEO2));
340 SR(VID_PICTURE_SIZE(1)); 263 SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
341 SR(VID_ACCU0(1)); 264 SR(OVL_ACCU0(OMAP_DSS_VIDEO2));
342 SR(VID_ACCU1(1)); 265 SR(OVL_ACCU1(OMAP_DSS_VIDEO2));
343 266
344 SR(VID_FIR_COEF_H(1, 0)); 267 for (i = 0; i < 8; i++)
345 SR(VID_FIR_COEF_H(1, 1)); 268 SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i));
346 SR(VID_FIR_COEF_H(1, 2)); 269
347 SR(VID_FIR_COEF_H(1, 3)); 270 for (i = 0; i < 8; i++)
348 SR(VID_FIR_COEF_H(1, 4)); 271 SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i));
349 SR(VID_FIR_COEF_H(1, 5)); 272
350 SR(VID_FIR_COEF_H(1, 6)); 273 for (i = 0; i < 5; i++)
351 SR(VID_FIR_COEF_H(1, 7)); 274 SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
352 275
353 SR(VID_FIR_COEF_HV(1, 0)); 276 for (i = 0; i < 8; i++)
354 SR(VID_FIR_COEF_HV(1, 1)); 277 SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
355 SR(VID_FIR_COEF_HV(1, 2)); 278
356 SR(VID_FIR_COEF_HV(1, 3)); 279 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
357 SR(VID_FIR_COEF_HV(1, 4)); 280 SR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
358 SR(VID_FIR_COEF_HV(1, 5)); 281 SR(OVL_BA1_UV(OMAP_DSS_VIDEO2));
359 SR(VID_FIR_COEF_HV(1, 6)); 282 SR(OVL_FIR2(OMAP_DSS_VIDEO2));
360 SR(VID_FIR_COEF_HV(1, 7)); 283 SR(OVL_ACCU2_0(OMAP_DSS_VIDEO2));
361 284 SR(OVL_ACCU2_1(OMAP_DSS_VIDEO2));
362 SR(VID_CONV_COEF(1, 0)); 285
363 SR(VID_CONV_COEF(1, 1)); 286 for (i = 0; i < 8; i++)
364 SR(VID_CONV_COEF(1, 2)); 287 SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i));
365 SR(VID_CONV_COEF(1, 3)); 288
366 SR(VID_CONV_COEF(1, 4)); 289 for (i = 0; i < 8; i++)
367 290 SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i));
368 SR(VID_FIR_COEF_V(1, 0)); 291
369 SR(VID_FIR_COEF_V(1, 1)); 292 for (i = 0; i < 8; i++)
370 SR(VID_FIR_COEF_V(1, 2)); 293 SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i));
371 SR(VID_FIR_COEF_V(1, 3)); 294 }
372 SR(VID_FIR_COEF_V(1, 4)); 295 if (dss_has_feature(FEAT_ATTR2))
373 SR(VID_FIR_COEF_V(1, 5)); 296 SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
374 SR(VID_FIR_COEF_V(1, 6)); 297
375 SR(VID_FIR_COEF_V(1, 7)); 298 SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
376
377 SR(VID_PRELOAD(1));
378 299
379 if (dss_has_feature(FEAT_CORE_CLK_DIV)) 300 if (dss_has_feature(FEAT_CORE_CLK_DIV))
380 SR(DIVISOR); 301 SR(DIVISOR);
@@ -382,160 +303,158 @@ void dispc_save_context(void)
382 303
383void dispc_restore_context(void) 304void dispc_restore_context(void)
384{ 305{
306 int i;
385 RR(SYSCONFIG); 307 RR(SYSCONFIG);
386 /*RR(IRQENABLE);*/ 308 /*RR(IRQENABLE);*/
387 /*RR(CONTROL);*/ 309 /*RR(CONTROL);*/
388 RR(CONFIG); 310 RR(CONFIG);
389 RR(DEFAULT_COLOR(0)); 311 RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
390 RR(DEFAULT_COLOR(1)); 312 RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
391 RR(TRANS_COLOR(0)); 313 RR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
392 RR(TRANS_COLOR(1)); 314 RR(TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
393 RR(LINE_NUMBER); 315 RR(LINE_NUMBER);
394 RR(TIMING_H(0)); 316 RR(TIMING_H(OMAP_DSS_CHANNEL_LCD));
395 RR(TIMING_V(0)); 317 RR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
396 RR(POL_FREQ(0)); 318 RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
397 RR(DIVISORo(0)); 319 RR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
398 RR(GLOBAL_ALPHA); 320 RR(GLOBAL_ALPHA);
399 RR(SIZE_DIG); 321 RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
400 RR(SIZE_LCD(0)); 322 RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
401 if (dss_has_feature(FEAT_MGR_LCD2)) { 323 if (dss_has_feature(FEAT_MGR_LCD2)) {
402 RR(DEFAULT_COLOR(2)); 324 RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
403 RR(TRANS_COLOR(2)); 325 RR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
404 RR(SIZE_LCD(2)); 326 RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
405 RR(TIMING_H(2)); 327 RR(TIMING_H(OMAP_DSS_CHANNEL_LCD2));
406 RR(TIMING_V(2)); 328 RR(TIMING_V(OMAP_DSS_CHANNEL_LCD2));
407 RR(POL_FREQ(2)); 329 RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
408 RR(DIVISORo(2)); 330 RR(DIVISORo(OMAP_DSS_CHANNEL_LCD2));
409 RR(CONFIG2); 331 RR(CONFIG2);
410 } 332 }
411 333
412 RR(GFX_BA0); 334 RR(OVL_BA0(OMAP_DSS_GFX));
413 RR(GFX_BA1); 335 RR(OVL_BA1(OMAP_DSS_GFX));
414 RR(GFX_POSITION); 336 RR(OVL_POSITION(OMAP_DSS_GFX));
415 RR(GFX_SIZE); 337 RR(OVL_SIZE(OMAP_DSS_GFX));
416 RR(GFX_ATTRIBUTES); 338 RR(OVL_ATTRIBUTES(OMAP_DSS_GFX));
417 RR(GFX_FIFO_THRESHOLD); 339 RR(OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
418 RR(GFX_ROW_INC); 340 RR(OVL_ROW_INC(OMAP_DSS_GFX));
419 RR(GFX_PIXEL_INC); 341 RR(OVL_PIXEL_INC(OMAP_DSS_GFX));
420 RR(GFX_WINDOW_SKIP); 342 RR(OVL_WINDOW_SKIP(OMAP_DSS_GFX));
421 RR(GFX_TABLE_BA); 343 RR(OVL_TABLE_BA(OMAP_DSS_GFX));
422 344
423 RR(DATA_CYCLE1(0)); 345
424 RR(DATA_CYCLE2(0)); 346 RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
425 RR(DATA_CYCLE3(0)); 347 RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
426 348 RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
427 RR(CPR_COEF_R(0)); 349
428 RR(CPR_COEF_G(0)); 350 RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
429 RR(CPR_COEF_B(0)); 351 RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
352 RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
430 if (dss_has_feature(FEAT_MGR_LCD2)) { 353 if (dss_has_feature(FEAT_MGR_LCD2)) {
431 RR(DATA_CYCLE1(2)); 354 RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
432 RR(DATA_CYCLE2(2)); 355 RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
433 RR(DATA_CYCLE3(2)); 356 RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
434 357
435 RR(CPR_COEF_B(2)); 358 RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
436 RR(CPR_COEF_G(2)); 359 RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
437 RR(CPR_COEF_R(2)); 360 RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
438 } 361 }
439 362
440 RR(GFX_PRELOAD); 363 RR(OVL_PRELOAD(OMAP_DSS_GFX));
441 364
442 /* VID1 */ 365 /* VID1 */
443 RR(VID_BA0(0)); 366 RR(OVL_BA0(OMAP_DSS_VIDEO1));
444 RR(VID_BA1(0)); 367 RR(OVL_BA1(OMAP_DSS_VIDEO1));
445 RR(VID_POSITION(0)); 368 RR(OVL_POSITION(OMAP_DSS_VIDEO1));
446 RR(VID_SIZE(0)); 369 RR(OVL_SIZE(OMAP_DSS_VIDEO1));
447 RR(VID_ATTRIBUTES(0)); 370 RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
448 RR(VID_FIFO_THRESHOLD(0)); 371 RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
449 RR(VID_ROW_INC(0)); 372 RR(OVL_ROW_INC(OMAP_DSS_VIDEO1));
450 RR(VID_PIXEL_INC(0)); 373 RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
451 RR(VID_FIR(0)); 374 RR(OVL_FIR(OMAP_DSS_VIDEO1));
452 RR(VID_PICTURE_SIZE(0)); 375 RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
453 RR(VID_ACCU0(0)); 376 RR(OVL_ACCU0(OMAP_DSS_VIDEO1));
454 RR(VID_ACCU1(0)); 377 RR(OVL_ACCU1(OMAP_DSS_VIDEO1));
455 378
456 RR(VID_FIR_COEF_H(0, 0)); 379 for (i = 0; i < 8; i++)
457 RR(VID_FIR_COEF_H(0, 1)); 380 RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i));
458 RR(VID_FIR_COEF_H(0, 2)); 381
459 RR(VID_FIR_COEF_H(0, 3)); 382 for (i = 0; i < 8; i++)
460 RR(VID_FIR_COEF_H(0, 4)); 383 RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i));
461 RR(VID_FIR_COEF_H(0, 5)); 384
462 RR(VID_FIR_COEF_H(0, 6)); 385 for (i = 0; i < 5; i++)
463 RR(VID_FIR_COEF_H(0, 7)); 386 RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
464 387
465 RR(VID_FIR_COEF_HV(0, 0)); 388 for (i = 0; i < 8; i++)
466 RR(VID_FIR_COEF_HV(0, 1)); 389 RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
467 RR(VID_FIR_COEF_HV(0, 2)); 390
468 RR(VID_FIR_COEF_HV(0, 3)); 391 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
469 RR(VID_FIR_COEF_HV(0, 4)); 392 RR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
470 RR(VID_FIR_COEF_HV(0, 5)); 393 RR(OVL_BA1_UV(OMAP_DSS_VIDEO1));
471 RR(VID_FIR_COEF_HV(0, 6)); 394 RR(OVL_FIR2(OMAP_DSS_VIDEO1));
472 RR(VID_FIR_COEF_HV(0, 7)); 395 RR(OVL_ACCU2_0(OMAP_DSS_VIDEO1));
473 396 RR(OVL_ACCU2_1(OMAP_DSS_VIDEO1));
474 RR(VID_CONV_COEF(0, 0)); 397
475 RR(VID_CONV_COEF(0, 1)); 398 for (i = 0; i < 8; i++)
476 RR(VID_CONV_COEF(0, 2)); 399 RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i));
477 RR(VID_CONV_COEF(0, 3)); 400
478 RR(VID_CONV_COEF(0, 4)); 401 for (i = 0; i < 8; i++)
479 402 RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i));
480 RR(VID_FIR_COEF_V(0, 0)); 403
481 RR(VID_FIR_COEF_V(0, 1)); 404 for (i = 0; i < 8; i++)
482 RR(VID_FIR_COEF_V(0, 2)); 405 RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i));
483 RR(VID_FIR_COEF_V(0, 3)); 406 }
484 RR(VID_FIR_COEF_V(0, 4)); 407 if (dss_has_feature(FEAT_ATTR2))
485 RR(VID_FIR_COEF_V(0, 5)); 408 RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
486 RR(VID_FIR_COEF_V(0, 6)); 409
487 RR(VID_FIR_COEF_V(0, 7)); 410 RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
488
489 RR(VID_PRELOAD(0));
490 411
491 /* VID2 */ 412 /* VID2 */
492 RR(VID_BA0(1)); 413 RR(OVL_BA0(OMAP_DSS_VIDEO2));
493 RR(VID_BA1(1)); 414 RR(OVL_BA1(OMAP_DSS_VIDEO2));
494 RR(VID_POSITION(1)); 415 RR(OVL_POSITION(OMAP_DSS_VIDEO2));
495 RR(VID_SIZE(1)); 416 RR(OVL_SIZE(OMAP_DSS_VIDEO2));
496 RR(VID_ATTRIBUTES(1)); 417 RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
497 RR(VID_FIFO_THRESHOLD(1)); 418 RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
498 RR(VID_ROW_INC(1)); 419 RR(OVL_ROW_INC(OMAP_DSS_VIDEO2));
499 RR(VID_PIXEL_INC(1)); 420 RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
500 RR(VID_FIR(1)); 421 RR(OVL_FIR(OMAP_DSS_VIDEO2));
501 RR(VID_PICTURE_SIZE(1)); 422 RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
502 RR(VID_ACCU0(1)); 423 RR(OVL_ACCU0(OMAP_DSS_VIDEO2));
503 RR(VID_ACCU1(1)); 424 RR(OVL_ACCU1(OMAP_DSS_VIDEO2));
504 425
505 RR(VID_FIR_COEF_H(1, 0)); 426 for (i = 0; i < 8; i++)
506 RR(VID_FIR_COEF_H(1, 1)); 427 RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i));
507 RR(VID_FIR_COEF_H(1, 2)); 428
508 RR(VID_FIR_COEF_H(1, 3)); 429 for (i = 0; i < 8; i++)
509 RR(VID_FIR_COEF_H(1, 4)); 430 RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i));
510 RR(VID_FIR_COEF_H(1, 5)); 431
511 RR(VID_FIR_COEF_H(1, 6)); 432 for (i = 0; i < 5; i++)
512 RR(VID_FIR_COEF_H(1, 7)); 433 RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
513 434
514 RR(VID_FIR_COEF_HV(1, 0)); 435 for (i = 0; i < 8; i++)
515 RR(VID_FIR_COEF_HV(1, 1)); 436 RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
516 RR(VID_FIR_COEF_HV(1, 2)); 437
517 RR(VID_FIR_COEF_HV(1, 3)); 438 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
518 RR(VID_FIR_COEF_HV(1, 4)); 439 RR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
519 RR(VID_FIR_COEF_HV(1, 5)); 440 RR(OVL_BA1_UV(OMAP_DSS_VIDEO2));
520 RR(VID_FIR_COEF_HV(1, 6)); 441 RR(OVL_FIR2(OMAP_DSS_VIDEO2));
521 RR(VID_FIR_COEF_HV(1, 7)); 442 RR(OVL_ACCU2_0(OMAP_DSS_VIDEO2));
522 443 RR(OVL_ACCU2_1(OMAP_DSS_VIDEO2));
523 RR(VID_CONV_COEF(1, 0)); 444
524 RR(VID_CONV_COEF(1, 1)); 445 for (i = 0; i < 8; i++)
525 RR(VID_CONV_COEF(1, 2)); 446 RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i));
526 RR(VID_CONV_COEF(1, 3)); 447
527 RR(VID_CONV_COEF(1, 4)); 448 for (i = 0; i < 8; i++)
528 449 RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i));
529 RR(VID_FIR_COEF_V(1, 0)); 450
530 RR(VID_FIR_COEF_V(1, 1)); 451 for (i = 0; i < 8; i++)
531 RR(VID_FIR_COEF_V(1, 2)); 452 RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i));
532 RR(VID_FIR_COEF_V(1, 3)); 453 }
533 RR(VID_FIR_COEF_V(1, 4)); 454 if (dss_has_feature(FEAT_ATTR2))
534 RR(VID_FIR_COEF_V(1, 5)); 455 RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
535 RR(VID_FIR_COEF_V(1, 6)); 456
536 RR(VID_FIR_COEF_V(1, 7)); 457 RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
537
538 RR(VID_PRELOAD(1));
539 458
540 if (dss_has_feature(FEAT_CORE_CLK_DIV)) 459 if (dss_has_feature(FEAT_CORE_CLK_DIV))
541 RR(DIVISOR); 460 RR(DIVISOR);
@@ -632,27 +551,43 @@ end:
632 551
633static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value) 552static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
634{ 553{
554 dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
555}
556
557static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
558{
559 dispc_write_reg(DISPC_OVL_FIR_COEF_HV(plane, reg), value);
560}
561
562static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value)
563{
564 dispc_write_reg(DISPC_OVL_FIR_COEF_V(plane, reg), value);
565}
566
567static void _dispc_write_firh2_reg(enum omap_plane plane, int reg, u32 value)
568{
635 BUG_ON(plane == OMAP_DSS_GFX); 569 BUG_ON(plane == OMAP_DSS_GFX);
636 570
637 dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value); 571 dispc_write_reg(DISPC_OVL_FIR_COEF_H2(plane, reg), value);
638} 572}
639 573
640static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value) 574static void _dispc_write_firhv2_reg(enum omap_plane plane, int reg, u32 value)
641{ 575{
642 BUG_ON(plane == OMAP_DSS_GFX); 576 BUG_ON(plane == OMAP_DSS_GFX);
643 577
644 dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value); 578 dispc_write_reg(DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
645} 579}
646 580
647static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value) 581static void _dispc_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
648{ 582{
649 BUG_ON(plane == OMAP_DSS_GFX); 583 BUG_ON(plane == OMAP_DSS_GFX);
650 584
651 dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value); 585 dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
652} 586}
653 587
654static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup, 588static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
655 int vscaleup, int five_taps) 589 int vscaleup, int five_taps,
590 enum omap_color_component color_comp)
656{ 591{
657 /* Coefficients for horizontal up-sampling */ 592 /* Coefficients for horizontal up-sampling */
658 static const struct dispc_h_coef coef_hup[8] = { 593 static const struct dispc_h_coef coef_hup[8] = {
@@ -750,8 +685,14 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
750 | FLD_VAL(v_coef[i].vc1, 23, 16) 685 | FLD_VAL(v_coef[i].vc1, 23, 16)
751 | FLD_VAL(v_coef[i].vc2, 31, 24); 686 | FLD_VAL(v_coef[i].vc2, 31, 24);
752 687
753 _dispc_write_firh_reg(plane, i, h); 688 if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
754 _dispc_write_firhv_reg(plane, i, hv); 689 _dispc_write_firh_reg(plane, i, h);
690 _dispc_write_firhv_reg(plane, i, hv);
691 } else {
692 _dispc_write_firh2_reg(plane, i, h);
693 _dispc_write_firhv2_reg(plane, i, hv);
694 }
695
755 } 696 }
756 697
757 if (five_taps) { 698 if (five_taps) {
@@ -759,7 +700,10 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
759 u32 v; 700 u32 v;
760 v = FLD_VAL(v_coef[i].vc00, 7, 0) 701 v = FLD_VAL(v_coef[i].vc00, 7, 0)
761 | FLD_VAL(v_coef[i].vc22, 15, 8); 702 | FLD_VAL(v_coef[i].vc22, 15, 8);
762 _dispc_write_firv_reg(plane, i, v); 703 if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
704 _dispc_write_firv_reg(plane, i, v);
705 else
706 _dispc_write_firv2_reg(plane, i, v);
763 } 707 }
764 } 708 }
765} 709}
@@ -779,72 +723,83 @@ static void _dispc_setup_color_conv_coef(void)
779 723
780 ct = &ctbl_bt601_5; 724 ct = &ctbl_bt601_5;
781 725
782 dispc_write_reg(DISPC_VID_CONV_COEF(0, 0), CVAL(ct->rcr, ct->ry)); 726 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0),
783 dispc_write_reg(DISPC_VID_CONV_COEF(0, 1), CVAL(ct->gy, ct->rcb)); 727 CVAL(ct->rcr, ct->ry));
784 dispc_write_reg(DISPC_VID_CONV_COEF(0, 2), CVAL(ct->gcb, ct->gcr)); 728 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1),
785 dispc_write_reg(DISPC_VID_CONV_COEF(0, 3), CVAL(ct->bcr, ct->by)); 729 CVAL(ct->gy, ct->rcb));
786 dispc_write_reg(DISPC_VID_CONV_COEF(0, 4), CVAL(0, ct->bcb)); 730 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2),
787 731 CVAL(ct->gcb, ct->gcr));
788 dispc_write_reg(DISPC_VID_CONV_COEF(1, 0), CVAL(ct->rcr, ct->ry)); 732 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3),
789 dispc_write_reg(DISPC_VID_CONV_COEF(1, 1), CVAL(ct->gy, ct->rcb)); 733 CVAL(ct->bcr, ct->by));
790 dispc_write_reg(DISPC_VID_CONV_COEF(1, 2), CVAL(ct->gcb, ct->gcr)); 734 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4),
791 dispc_write_reg(DISPC_VID_CONV_COEF(1, 3), CVAL(ct->bcr, ct->by)); 735 CVAL(0, ct->bcb));
792 dispc_write_reg(DISPC_VID_CONV_COEF(1, 4), CVAL(0, ct->bcb)); 736
737 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0),
738 CVAL(ct->rcr, ct->ry));
739 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1),
740 CVAL(ct->gy, ct->rcb));
741 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2),
742 CVAL(ct->gcb, ct->gcr));
743 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3),
744 CVAL(ct->bcr, ct->by));
745 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4),
746 CVAL(0, ct->bcb));
793 747
794#undef CVAL 748#undef CVAL
795 749
796 REG_FLD_MOD(DISPC_VID_ATTRIBUTES(0), ct->full_range, 11, 11); 750 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1),
797 REG_FLD_MOD(DISPC_VID_ATTRIBUTES(1), ct->full_range, 11, 11); 751 ct->full_range, 11, 11);
752 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2),
753 ct->full_range, 11, 11);
798} 754}
799 755
800 756
801static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr) 757static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr)
802{ 758{
803 const struct dispc_reg ba0_reg[] = { DISPC_GFX_BA0, 759 dispc_write_reg(DISPC_OVL_BA0(plane), paddr);
804 DISPC_VID_BA0(0),
805 DISPC_VID_BA0(1) };
806
807 dispc_write_reg(ba0_reg[plane], paddr);
808} 760}
809 761
810static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr) 762static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr)
811{ 763{
812 const struct dispc_reg ba1_reg[] = { DISPC_GFX_BA1, 764 dispc_write_reg(DISPC_OVL_BA1(plane), paddr);
813 DISPC_VID_BA1(0), 765}
814 DISPC_VID_BA1(1) };
815 766
816 dispc_write_reg(ba1_reg[plane], paddr); 767static void _dispc_set_plane_ba0_uv(enum omap_plane plane, u32 paddr)
768{
769 dispc_write_reg(DISPC_OVL_BA0_UV(plane), paddr);
817} 770}
818 771
819static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y) 772static void _dispc_set_plane_ba1_uv(enum omap_plane plane, u32 paddr)
820{ 773{
821 const struct dispc_reg pos_reg[] = { DISPC_GFX_POSITION, 774 dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr);
822 DISPC_VID_POSITION(0), 775}
823 DISPC_VID_POSITION(1) };
824 776
777static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y)
778{
825 u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0); 779 u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
826 dispc_write_reg(pos_reg[plane], val); 780
781 dispc_write_reg(DISPC_OVL_POSITION(plane), val);
827} 782}
828 783
829static void _dispc_set_pic_size(enum omap_plane plane, int width, int height) 784static void _dispc_set_pic_size(enum omap_plane plane, int width, int height)
830{ 785{
831 const struct dispc_reg siz_reg[] = { DISPC_GFX_SIZE,
832 DISPC_VID_PICTURE_SIZE(0),
833 DISPC_VID_PICTURE_SIZE(1) };
834 u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 786 u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
835 dispc_write_reg(siz_reg[plane], val); 787
788 if (plane == OMAP_DSS_GFX)
789 dispc_write_reg(DISPC_OVL_SIZE(plane), val);
790 else
791 dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
836} 792}
837 793
838static void _dispc_set_vid_size(enum omap_plane plane, int width, int height) 794static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
839{ 795{
840 u32 val; 796 u32 val;
841 const struct dispc_reg vsi_reg[] = { DISPC_VID_SIZE(0),
842 DISPC_VID_SIZE(1) };
843 797
844 BUG_ON(plane == OMAP_DSS_GFX); 798 BUG_ON(plane == OMAP_DSS_GFX);
845 799
846 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 800 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
847 dispc_write_reg(vsi_reg[plane-1], val); 801
802 dispc_write_reg(DISPC_OVL_SIZE(plane), val);
848} 803}
849 804
850static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable) 805static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable)
@@ -856,7 +811,7 @@ static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable)
856 plane == OMAP_DSS_VIDEO1) 811 plane == OMAP_DSS_VIDEO1)
857 return; 812 return;
858 813
859 REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 28, 28); 814 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
860} 815}
861 816
862static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha) 817static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
@@ -876,61 +831,93 @@ static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
876 831
877static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc) 832static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc)
878{ 833{
879 const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC, 834 dispc_write_reg(DISPC_OVL_PIXEL_INC(plane), inc);
880 DISPC_VID_PIXEL_INC(0),
881 DISPC_VID_PIXEL_INC(1) };
882
883 dispc_write_reg(ri_reg[plane], inc);
884} 835}
885 836
886static void _dispc_set_row_inc(enum omap_plane plane, s32 inc) 837static void _dispc_set_row_inc(enum omap_plane plane, s32 inc)
887{ 838{
888 const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC, 839 dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc);
889 DISPC_VID_ROW_INC(0),
890 DISPC_VID_ROW_INC(1) };
891
892 dispc_write_reg(ri_reg[plane], inc);
893} 840}
894 841
895static void _dispc_set_color_mode(enum omap_plane plane, 842static void _dispc_set_color_mode(enum omap_plane plane,
896 enum omap_color_mode color_mode) 843 enum omap_color_mode color_mode)
897{ 844{
898 u32 m = 0; 845 u32 m = 0;
899 846 if (plane != OMAP_DSS_GFX) {
900 switch (color_mode) { 847 switch (color_mode) {
901 case OMAP_DSS_COLOR_CLUT1: 848 case OMAP_DSS_COLOR_NV12:
902 m = 0x0; break; 849 m = 0x0; break;
903 case OMAP_DSS_COLOR_CLUT2: 850 case OMAP_DSS_COLOR_RGB12U:
904 m = 0x1; break; 851 m = 0x1; break;
905 case OMAP_DSS_COLOR_CLUT4: 852 case OMAP_DSS_COLOR_RGBA16:
906 m = 0x2; break; 853 m = 0x2; break;
907 case OMAP_DSS_COLOR_CLUT8: 854 case OMAP_DSS_COLOR_RGBX16:
908 m = 0x3; break; 855 m = 0x4; break;
909 case OMAP_DSS_COLOR_RGB12U: 856 case OMAP_DSS_COLOR_ARGB16:
910 m = 0x4; break; 857 m = 0x5; break;
911 case OMAP_DSS_COLOR_ARGB16: 858 case OMAP_DSS_COLOR_RGB16:
912 m = 0x5; break; 859 m = 0x6; break;
913 case OMAP_DSS_COLOR_RGB16: 860 case OMAP_DSS_COLOR_ARGB16_1555:
914 m = 0x6; break; 861 m = 0x7; break;
915 case OMAP_DSS_COLOR_RGB24U: 862 case OMAP_DSS_COLOR_RGB24U:
916 m = 0x8; break; 863 m = 0x8; break;
917 case OMAP_DSS_COLOR_RGB24P: 864 case OMAP_DSS_COLOR_RGB24P:
918 m = 0x9; break; 865 m = 0x9; break;
919 case OMAP_DSS_COLOR_YUV2: 866 case OMAP_DSS_COLOR_YUV2:
920 m = 0xa; break; 867 m = 0xa; break;
921 case OMAP_DSS_COLOR_UYVY: 868 case OMAP_DSS_COLOR_UYVY:
922 m = 0xb; break; 869 m = 0xb; break;
923 case OMAP_DSS_COLOR_ARGB32: 870 case OMAP_DSS_COLOR_ARGB32:
924 m = 0xc; break; 871 m = 0xc; break;
925 case OMAP_DSS_COLOR_RGBA32: 872 case OMAP_DSS_COLOR_RGBA32:
926 m = 0xd; break; 873 m = 0xd; break;
927 case OMAP_DSS_COLOR_RGBX32: 874 case OMAP_DSS_COLOR_RGBX32:
928 m = 0xe; break; 875 m = 0xe; break;
929 default: 876 case OMAP_DSS_COLOR_XRGB16_1555:
930 BUG(); break; 877 m = 0xf; break;
878 default:
879 BUG(); break;
880 }
881 } else {
882 switch (color_mode) {
883 case OMAP_DSS_COLOR_CLUT1:
884 m = 0x0; break;
885 case OMAP_DSS_COLOR_CLUT2:
886 m = 0x1; break;
887 case OMAP_DSS_COLOR_CLUT4:
888 m = 0x2; break;
889 case OMAP_DSS_COLOR_CLUT8:
890 m = 0x3; break;
891 case OMAP_DSS_COLOR_RGB12U:
892 m = 0x4; break;
893 case OMAP_DSS_COLOR_ARGB16:
894 m = 0x5; break;
895 case OMAP_DSS_COLOR_RGB16:
896 m = 0x6; break;
897 case OMAP_DSS_COLOR_ARGB16_1555:
898 m = 0x7; break;
899 case OMAP_DSS_COLOR_RGB24U:
900 m = 0x8; break;
901 case OMAP_DSS_COLOR_RGB24P:
902 m = 0x9; break;
903 case OMAP_DSS_COLOR_YUV2:
904 m = 0xa; break;
905 case OMAP_DSS_COLOR_UYVY:
906 m = 0xb; break;
907 case OMAP_DSS_COLOR_ARGB32:
908 m = 0xc; break;
909 case OMAP_DSS_COLOR_RGBA32:
910 m = 0xd; break;
911 case OMAP_DSS_COLOR_RGBX32:
912 m = 0xe; break;
913 case OMAP_DSS_COLOR_XRGB16_1555:
914 m = 0xf; break;
915 default:
916 BUG(); break;
917 }
931 } 918 }
932 919
933 REG_FLD_MOD(dispc_reg_att[plane], m, 4, 1); 920 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
934} 921}
935 922
936static void _dispc_set_channel_out(enum omap_plane plane, 923static void _dispc_set_channel_out(enum omap_plane plane,
@@ -953,7 +940,7 @@ static void _dispc_set_channel_out(enum omap_plane plane,
953 return; 940 return;
954 } 941 }
955 942
956 val = dispc_read_reg(dispc_reg_att[plane]); 943 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
957 if (dss_has_feature(FEAT_MGR_LCD2)) { 944 if (dss_has_feature(FEAT_MGR_LCD2)) {
958 switch (channel) { 945 switch (channel) {
959 case OMAP_DSS_CHANNEL_LCD: 946 case OMAP_DSS_CHANNEL_LCD:
@@ -977,7 +964,7 @@ static void _dispc_set_channel_out(enum omap_plane plane,
977 } else { 964 } else {
978 val = FLD_MOD(val, channel, shift, shift); 965 val = FLD_MOD(val, channel, shift, shift);
979 } 966 }
980 dispc_write_reg(dispc_reg_att[plane], val); 967 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
981} 968}
982 969
983void dispc_set_burst_size(enum omap_plane plane, 970void dispc_set_burst_size(enum omap_plane plane,
@@ -1001,9 +988,9 @@ void dispc_set_burst_size(enum omap_plane plane,
1001 return; 988 return;
1002 } 989 }
1003 990
1004 val = dispc_read_reg(dispc_reg_att[plane]); 991 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
1005 val = FLD_MOD(val, burst_size, shift+1, shift); 992 val = FLD_MOD(val, burst_size, shift+1, shift);
1006 dispc_write_reg(dispc_reg_att[plane], val); 993 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
1007 994
1008 enable_clocks(0); 995 enable_clocks(0);
1009} 996}
@@ -1028,9 +1015,9 @@ static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
1028 1015
1029 BUG_ON(plane == OMAP_DSS_GFX); 1016 BUG_ON(plane == OMAP_DSS_GFX);
1030 1017
1031 val = dispc_read_reg(dispc_reg_att[plane]); 1018 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
1032 val = FLD_MOD(val, enable, 9, 9); 1019 val = FLD_MOD(val, enable, 9, 9);
1033 dispc_write_reg(dispc_reg_att[plane], val); 1020 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
1034} 1021}
1035 1022
1036void dispc_enable_replication(enum omap_plane plane, bool enable) 1023void dispc_enable_replication(enum omap_plane plane, bool enable)
@@ -1043,7 +1030,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable)
1043 bit = 10; 1030 bit = 10;
1044 1031
1045 enable_clocks(1); 1032 enable_clocks(1);
1046 REG_FLD_MOD(dispc_reg_att[plane], enable, bit, bit); 1033 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
1047 enable_clocks(0); 1034 enable_clocks(0);
1048} 1035}
1049 1036
@@ -1053,7 +1040,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
1053 BUG_ON((width > (1 << 11)) || (height > (1 << 11))); 1040 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
1054 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 1041 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
1055 enable_clocks(1); 1042 enable_clocks(1);
1056 dispc_write_reg(DISPC_SIZE_LCD(channel), val); 1043 dispc_write_reg(DISPC_SIZE_MGR(channel), val);
1057 enable_clocks(0); 1044 enable_clocks(0);
1058} 1045}
1059 1046
@@ -1063,15 +1050,12 @@ void dispc_set_digit_size(u16 width, u16 height)
1063 BUG_ON((width > (1 << 11)) || (height > (1 << 11))); 1050 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
1064 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 1051 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
1065 enable_clocks(1); 1052 enable_clocks(1);
1066 dispc_write_reg(DISPC_SIZE_DIG, val); 1053 dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
1067 enable_clocks(0); 1054 enable_clocks(0);
1068} 1055}
1069 1056
1070static void dispc_read_plane_fifo_sizes(void) 1057static void dispc_read_plane_fifo_sizes(void)
1071{ 1058{
1072 const struct dispc_reg fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
1073 DISPC_VID_FIFO_SIZE_STATUS(0),
1074 DISPC_VID_FIFO_SIZE_STATUS(1) };
1075 u32 size; 1059 u32 size;
1076 int plane; 1060 int plane;
1077 u8 start, end; 1061 u8 start, end;
@@ -1081,7 +1065,8 @@ static void dispc_read_plane_fifo_sizes(void)
1081 dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); 1065 dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
1082 1066
1083 for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) { 1067 for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
1084 size = FLD_GET(dispc_read_reg(fsz_reg[plane]), start, end); 1068 size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)),
1069 start, end);
1085 dispc.fifo_size[plane] = size; 1070 dispc.fifo_size[plane] = size;
1086 } 1071 }
1087 1072
@@ -1095,23 +1080,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane)
1095 1080
1096void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) 1081void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
1097{ 1082{
1098 const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
1099 DISPC_VID_FIFO_THRESHOLD(0),
1100 DISPC_VID_FIFO_THRESHOLD(1) };
1101 u8 hi_start, hi_end, lo_start, lo_end; 1083 u8 hi_start, hi_end, lo_start, lo_end;
1102 1084
1085 dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
1086 dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
1087
1103 enable_clocks(1); 1088 enable_clocks(1);
1104 1089
1105 DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n", 1090 DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
1106 plane, 1091 plane,
1107 REG_GET(ftrs_reg[plane], 11, 0), 1092 REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
1108 REG_GET(ftrs_reg[plane], 27, 16), 1093 lo_start, lo_end),
1094 REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
1095 hi_start, hi_end),
1109 low, high); 1096 low, high);
1110 1097
1111 dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); 1098 dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
1112 dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
1113
1114 dispc_write_reg(ftrs_reg[plane],
1115 FLD_VAL(high, hi_start, hi_end) | 1099 FLD_VAL(high, hi_start, hi_end) |
1116 FLD_VAL(low, lo_start, lo_end)); 1100 FLD_VAL(low, lo_start, lo_end));
1117 1101
@@ -1128,106 +1112,120 @@ void dispc_enable_fifomerge(bool enable)
1128 enable_clocks(0); 1112 enable_clocks(0);
1129} 1113}
1130 1114
1131static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc) 1115static void _dispc_set_fir(enum omap_plane plane,
1116 int hinc, int vinc,
1117 enum omap_color_component color_comp)
1132{ 1118{
1133 u32 val; 1119 u32 val;
1134 const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0),
1135 DISPC_VID_FIR(1) };
1136 u8 hinc_start, hinc_end, vinc_start, vinc_end;
1137
1138 BUG_ON(plane == OMAP_DSS_GFX);
1139 1120
1140 dss_feat_get_reg_field(FEAT_REG_FIRHINC, &hinc_start, &hinc_end); 1121 if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
1141 dss_feat_get_reg_field(FEAT_REG_FIRVINC, &vinc_start, &vinc_end); 1122 u8 hinc_start, hinc_end, vinc_start, vinc_end;
1142 1123
1143 val = FLD_VAL(vinc, vinc_start, vinc_end) | 1124 dss_feat_get_reg_field(FEAT_REG_FIRHINC,
1144 FLD_VAL(hinc, hinc_start, hinc_end); 1125 &hinc_start, &hinc_end);
1126 dss_feat_get_reg_field(FEAT_REG_FIRVINC,
1127 &vinc_start, &vinc_end);
1128 val = FLD_VAL(vinc, vinc_start, vinc_end) |
1129 FLD_VAL(hinc, hinc_start, hinc_end);
1145 1130
1146 dispc_write_reg(fir_reg[plane-1], val); 1131 dispc_write_reg(DISPC_OVL_FIR(plane), val);
1132 } else {
1133 val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
1134 dispc_write_reg(DISPC_OVL_FIR2(plane), val);
1135 }
1147} 1136}
1148 1137
1149static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu) 1138static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
1150{ 1139{
1151 u32 val; 1140 u32 val;
1152 const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0),
1153 DISPC_VID_ACCU0(1) };
1154 u8 hor_start, hor_end, vert_start, vert_end; 1141 u8 hor_start, hor_end, vert_start, vert_end;
1155 1142
1156 BUG_ON(plane == OMAP_DSS_GFX);
1157
1158 dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end); 1143 dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
1159 dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end); 1144 dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
1160 1145
1161 val = FLD_VAL(vaccu, vert_start, vert_end) | 1146 val = FLD_VAL(vaccu, vert_start, vert_end) |
1162 FLD_VAL(haccu, hor_start, hor_end); 1147 FLD_VAL(haccu, hor_start, hor_end);
1163 1148
1164 dispc_write_reg(ac0_reg[plane-1], val); 1149 dispc_write_reg(DISPC_OVL_ACCU0(plane), val);
1165} 1150}
1166 1151
1167static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu) 1152static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
1168{ 1153{
1169 u32 val; 1154 u32 val;
1170 const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0),
1171 DISPC_VID_ACCU1(1) };
1172 u8 hor_start, hor_end, vert_start, vert_end; 1155 u8 hor_start, hor_end, vert_start, vert_end;
1173 1156
1174 BUG_ON(plane == OMAP_DSS_GFX);
1175
1176 dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end); 1157 dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
1177 dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end); 1158 dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
1178 1159
1179 val = FLD_VAL(vaccu, vert_start, vert_end) | 1160 val = FLD_VAL(vaccu, vert_start, vert_end) |
1180 FLD_VAL(haccu, hor_start, hor_end); 1161 FLD_VAL(haccu, hor_start, hor_end);
1181 1162
1182 dispc_write_reg(ac1_reg[plane-1], val); 1163 dispc_write_reg(DISPC_OVL_ACCU1(plane), val);
1164}
1165
1166static void _dispc_set_vid_accu2_0(enum omap_plane plane, int haccu, int vaccu)
1167{
1168 u32 val;
1169
1170 val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
1171 dispc_write_reg(DISPC_OVL_ACCU2_0(plane), val);
1183} 1172}
1184 1173
1174static void _dispc_set_vid_accu2_1(enum omap_plane plane, int haccu, int vaccu)
1175{
1176 u32 val;
1185 1177
1186static void _dispc_set_scaling(enum omap_plane plane, 1178 val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
1179 dispc_write_reg(DISPC_OVL_ACCU2_1(plane), val);
1180}
1181
1182static void _dispc_set_scale_param(enum omap_plane plane,
1187 u16 orig_width, u16 orig_height, 1183 u16 orig_width, u16 orig_height,
1188 u16 out_width, u16 out_height, 1184 u16 out_width, u16 out_height,
1189 bool ilace, bool five_taps, 1185 bool five_taps, u8 rotation,
1190 bool fieldmode) 1186 enum omap_color_component color_comp)
1191{ 1187{
1192 int fir_hinc; 1188 int fir_hinc, fir_vinc;
1193 int fir_vinc;
1194 int hscaleup, vscaleup; 1189 int hscaleup, vscaleup;
1195 int accu0 = 0;
1196 int accu1 = 0;
1197 u32 l;
1198
1199 BUG_ON(plane == OMAP_DSS_GFX);
1200 1190
1201 hscaleup = orig_width <= out_width; 1191 hscaleup = orig_width <= out_width;
1202 vscaleup = orig_height <= out_height; 1192 vscaleup = orig_height <= out_height;
1203 1193
1204 _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps); 1194 _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps, color_comp);
1205 1195
1206 if (!orig_width || orig_width == out_width) 1196 fir_hinc = 1024 * orig_width / out_width;
1207 fir_hinc = 0; 1197 fir_vinc = 1024 * orig_height / out_height;
1208 else
1209 fir_hinc = 1024 * orig_width / out_width;
1210 1198
1211 if (!orig_height || orig_height == out_height) 1199 _dispc_set_fir(plane, fir_hinc, fir_vinc, color_comp);
1212 fir_vinc = 0; 1200}
1213 else
1214 fir_vinc = 1024 * orig_height / out_height;
1215 1201
1216 _dispc_set_fir(plane, fir_hinc, fir_vinc); 1202static void _dispc_set_scaling_common(enum omap_plane plane,
1203 u16 orig_width, u16 orig_height,
1204 u16 out_width, u16 out_height,
1205 bool ilace, bool five_taps,
1206 bool fieldmode, enum omap_color_mode color_mode,
1207 u8 rotation)
1208{
1209 int accu0 = 0;
1210 int accu1 = 0;
1211 u32 l;
1217 1212
1218 l = dispc_read_reg(dispc_reg_att[plane]); 1213 _dispc_set_scale_param(plane, orig_width, orig_height,
1214 out_width, out_height, five_taps,
1215 rotation, DISPC_COLOR_COMPONENT_RGB_Y);
1216 l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
1219 1217
1220 /* RESIZEENABLE and VERTICALTAPS */ 1218 /* RESIZEENABLE and VERTICALTAPS */
1221 l &= ~((0x3 << 5) | (0x1 << 21)); 1219 l &= ~((0x3 << 5) | (0x1 << 21));
1222 l |= fir_hinc ? (1 << 5) : 0; 1220 l |= (orig_width != out_width) ? (1 << 5) : 0;
1223 l |= fir_vinc ? (1 << 6) : 0; 1221 l |= (orig_height != out_height) ? (1 << 6) : 0;
1224 l |= five_taps ? (1 << 21) : 0; 1222 l |= five_taps ? (1 << 21) : 0;
1225 1223
1226 /* VRESIZECONF and HRESIZECONF */ 1224 /* VRESIZECONF and HRESIZECONF */
1227 if (dss_has_feature(FEAT_RESIZECONF)) { 1225 if (dss_has_feature(FEAT_RESIZECONF)) {
1228 l &= ~(0x3 << 7); 1226 l &= ~(0x3 << 7);
1229 l |= hscaleup ? 0 : (1 << 7); 1227 l |= (orig_width <= out_width) ? 0 : (1 << 7);
1230 l |= vscaleup ? 0 : (1 << 8); 1228 l |= (orig_height <= out_height) ? 0 : (1 << 8);
1231 } 1229 }
1232 1230
1233 /* LINEBUFFERSPLIT */ 1231 /* LINEBUFFERSPLIT */
@@ -1236,7 +1234,7 @@ static void _dispc_set_scaling(enum omap_plane plane,
1236 l |= five_taps ? (1 << 22) : 0; 1234 l |= five_taps ? (1 << 22) : 0;
1237 } 1235 }
1238 1236
1239 dispc_write_reg(dispc_reg_att[plane], l); 1237 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
1240 1238
1241 /* 1239 /*
1242 * field 0 = even field = bottom field 1240 * field 0 = even field = bottom field
@@ -1244,7 +1242,7 @@ static void _dispc_set_scaling(enum omap_plane plane,
1244 */ 1242 */
1245 if (ilace && !fieldmode) { 1243 if (ilace && !fieldmode) {
1246 accu1 = 0; 1244 accu1 = 0;
1247 accu0 = (fir_vinc / 2) & 0x3ff; 1245 accu0 = ((1024 * orig_height / out_height) / 2) & 0x3ff;
1248 if (accu0 >= 1024/2) { 1246 if (accu0 >= 1024/2) {
1249 accu1 = 1024/2; 1247 accu1 = 1024/2;
1250 accu0 -= accu1; 1248 accu0 -= accu1;
@@ -1255,6 +1253,93 @@ static void _dispc_set_scaling(enum omap_plane plane,
1255 _dispc_set_vid_accu1(plane, 0, accu1); 1253 _dispc_set_vid_accu1(plane, 0, accu1);
1256} 1254}
1257 1255
1256static void _dispc_set_scaling_uv(enum omap_plane plane,
1257 u16 orig_width, u16 orig_height,
1258 u16 out_width, u16 out_height,
1259 bool ilace, bool five_taps,
1260 bool fieldmode, enum omap_color_mode color_mode,
1261 u8 rotation)
1262{
1263 int scale_x = out_width != orig_width;
1264 int scale_y = out_height != orig_height;
1265
1266 if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
1267 return;
1268 if ((color_mode != OMAP_DSS_COLOR_YUV2 &&
1269 color_mode != OMAP_DSS_COLOR_UYVY &&
1270 color_mode != OMAP_DSS_COLOR_NV12)) {
1271 /* reset chroma resampling for RGB formats */
1272 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
1273 return;
1274 }
1275 switch (color_mode) {
1276 case OMAP_DSS_COLOR_NV12:
1277 /* UV is subsampled by 2 vertically*/
1278 orig_height >>= 1;
1279 /* UV is subsampled by 2 horz.*/
1280 orig_width >>= 1;
1281 break;
1282 case OMAP_DSS_COLOR_YUV2:
1283 case OMAP_DSS_COLOR_UYVY:
1284 /*For YUV422 with 90/270 rotation,
1285 *we don't upsample chroma
1286 */
1287 if (rotation == OMAP_DSS_ROT_0 ||
1288 rotation == OMAP_DSS_ROT_180)
1289 /* UV is subsampled by 2 hrz*/
1290 orig_width >>= 1;
1291 /* must use FIR for YUV422 if rotated */
1292 if (rotation != OMAP_DSS_ROT_0)
1293 scale_x = scale_y = true;
1294 break;
1295 default:
1296 BUG();
1297 }
1298
1299 if (out_width != orig_width)
1300 scale_x = true;
1301 if (out_height != orig_height)
1302 scale_y = true;
1303
1304 _dispc_set_scale_param(plane, orig_width, orig_height,
1305 out_width, out_height, five_taps,
1306 rotation, DISPC_COLOR_COMPONENT_UV);
1307
1308 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane),
1309 (scale_x || scale_y) ? 1 : 0, 8, 8);
1310 /* set H scaling */
1311 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
1312 /* set V scaling */
1313 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
1314
1315 _dispc_set_vid_accu2_0(plane, 0x80, 0);
1316 _dispc_set_vid_accu2_1(plane, 0x80, 0);
1317}
1318
1319static void _dispc_set_scaling(enum omap_plane plane,
1320 u16 orig_width, u16 orig_height,
1321 u16 out_width, u16 out_height,
1322 bool ilace, bool five_taps,
1323 bool fieldmode, enum omap_color_mode color_mode,
1324 u8 rotation)
1325{
1326 BUG_ON(plane == OMAP_DSS_GFX);
1327
1328 _dispc_set_scaling_common(plane,
1329 orig_width, orig_height,
1330 out_width, out_height,
1331 ilace, five_taps,
1332 fieldmode, color_mode,
1333 rotation);
1334
1335 _dispc_set_scaling_uv(plane,
1336 orig_width, orig_height,
1337 out_width, out_height,
1338 ilace, five_taps,
1339 fieldmode, color_mode,
1340 rotation);
1341}
1342
1258static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation, 1343static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
1259 bool mirroring, enum omap_color_mode color_mode) 1344 bool mirroring, enum omap_color_mode color_mode)
1260{ 1345{
@@ -1302,9 +1387,10 @@ static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
1302 row_repeat = false; 1387 row_repeat = false;
1303 } 1388 }
1304 1389
1305 REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12); 1390 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
1306 if (dss_has_feature(FEAT_ROWREPEATENABLE)) 1391 if (dss_has_feature(FEAT_ROWREPEATENABLE))
1307 REG_FLD_MOD(dispc_reg_att[plane], row_repeat ? 1 : 0, 18, 18); 1392 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
1393 row_repeat ? 1 : 0, 18, 18);
1308} 1394}
1309 1395
1310static int color_mode_to_bpp(enum omap_color_mode color_mode) 1396static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -1317,12 +1403,17 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode)
1317 case OMAP_DSS_COLOR_CLUT4: 1403 case OMAP_DSS_COLOR_CLUT4:
1318 return 4; 1404 return 4;
1319 case OMAP_DSS_COLOR_CLUT8: 1405 case OMAP_DSS_COLOR_CLUT8:
1406 case OMAP_DSS_COLOR_NV12:
1320 return 8; 1407 return 8;
1321 case OMAP_DSS_COLOR_RGB12U: 1408 case OMAP_DSS_COLOR_RGB12U:
1322 case OMAP_DSS_COLOR_RGB16: 1409 case OMAP_DSS_COLOR_RGB16:
1323 case OMAP_DSS_COLOR_ARGB16: 1410 case OMAP_DSS_COLOR_ARGB16:
1324 case OMAP_DSS_COLOR_YUV2: 1411 case OMAP_DSS_COLOR_YUV2:
1325 case OMAP_DSS_COLOR_UYVY: 1412 case OMAP_DSS_COLOR_UYVY:
1413 case OMAP_DSS_COLOR_RGBA16:
1414 case OMAP_DSS_COLOR_RGBX16:
1415 case OMAP_DSS_COLOR_ARGB16_1555:
1416 case OMAP_DSS_COLOR_XRGB16_1555:
1326 return 16; 1417 return 16;
1327 case OMAP_DSS_COLOR_RGB24P: 1418 case OMAP_DSS_COLOR_RGB24P:
1328 return 24; 1419 return 24;
@@ -1655,7 +1746,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
1655 enum omap_dss_rotation_type rotation_type, 1746 enum omap_dss_rotation_type rotation_type,
1656 u8 rotation, int mirror, 1747 u8 rotation, int mirror,
1657 u8 global_alpha, u8 pre_mult_alpha, 1748 u8 global_alpha, u8 pre_mult_alpha,
1658 enum omap_channel channel) 1749 enum omap_channel channel, u32 puv_addr)
1659{ 1750{
1660 const int maxdownscale = cpu_is_omap34xx() ? 4 : 2; 1751 const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
1661 bool five_taps = 0; 1752 bool five_taps = 0;
@@ -1704,7 +1795,8 @@ static int _dispc_setup_plane(enum omap_plane plane,
1704 return -EINVAL; 1795 return -EINVAL;
1705 1796
1706 if (color_mode == OMAP_DSS_COLOR_YUV2 || 1797 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1707 color_mode == OMAP_DSS_COLOR_UYVY) 1798 color_mode == OMAP_DSS_COLOR_UYVY ||
1799 color_mode == OMAP_DSS_COLOR_NV12)
1708 cconv = 1; 1800 cconv = 1;
1709 1801
1710 /* Must use 5-tap filter? */ 1802 /* Must use 5-tap filter? */
@@ -1778,6 +1870,12 @@ static int _dispc_setup_plane(enum omap_plane plane,
1778 _dispc_set_plane_ba0(plane, paddr + offset0); 1870 _dispc_set_plane_ba0(plane, paddr + offset0);
1779 _dispc_set_plane_ba1(plane, paddr + offset1); 1871 _dispc_set_plane_ba1(plane, paddr + offset1);
1780 1872
1873 if (OMAP_DSS_COLOR_NV12 == color_mode) {
1874 _dispc_set_plane_ba0_uv(plane, puv_addr + offset0);
1875 _dispc_set_plane_ba1_uv(plane, puv_addr + offset1);
1876 }
1877
1878
1781 _dispc_set_row_inc(plane, row_inc); 1879 _dispc_set_row_inc(plane, row_inc);
1782 _dispc_set_pix_inc(plane, pix_inc); 1880 _dispc_set_pix_inc(plane, pix_inc);
1783 1881
@@ -1791,7 +1889,8 @@ static int _dispc_setup_plane(enum omap_plane plane,
1791 if (plane != OMAP_DSS_GFX) { 1889 if (plane != OMAP_DSS_GFX) {
1792 _dispc_set_scaling(plane, width, height, 1890 _dispc_set_scaling(plane, width, height,
1793 out_width, out_height, 1891 out_width, out_height,
1794 ilace, five_taps, fieldmode); 1892 ilace, five_taps, fieldmode,
1893 color_mode, rotation);
1795 _dispc_set_vid_size(plane, out_width, out_height); 1894 _dispc_set_vid_size(plane, out_width, out_height);
1796 _dispc_set_vid_color_conv(plane, cconv); 1895 _dispc_set_vid_color_conv(plane, cconv);
1797 } 1896 }
@@ -1806,7 +1905,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
1806 1905
1807static void _dispc_enable_plane(enum omap_plane plane, bool enable) 1906static void _dispc_enable_plane(enum omap_plane plane, bool enable)
1808{ 1907{
1809 REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 0, 0); 1908 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
1810} 1909}
1811 1910
1812static void dispc_disable_isr(void *data, u32 mask) 1911static void dispc_disable_isr(void *data, u32 mask)
@@ -2353,14 +2452,20 @@ static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
2353 2452
2354unsigned long dispc_fclk_rate(void) 2453unsigned long dispc_fclk_rate(void)
2355{ 2454{
2455 struct platform_device *dsidev;
2356 unsigned long r = 0; 2456 unsigned long r = 0;
2357 2457
2358 switch (dss_get_dispc_clk_source()) { 2458 switch (dss_get_dispc_clk_source()) {
2359 case DSS_CLK_SRC_FCK: 2459 case OMAP_DSS_CLK_SRC_FCK:
2360 r = dss_clk_get_rate(DSS_CLK_FCK); 2460 r = dss_clk_get_rate(DSS_CLK_FCK);
2361 break; 2461 break;
2362 case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 2462 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
2363 r = dsi_get_pll_hsdiv_dispc_rate(); 2463 dsidev = dsi_get_dsidev_from_id(0);
2464 r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
2465 break;
2466 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
2467 dsidev = dsi_get_dsidev_from_id(1);
2468 r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
2364 break; 2469 break;
2365 default: 2470 default:
2366 BUG(); 2471 BUG();
@@ -2371,6 +2476,7 @@ unsigned long dispc_fclk_rate(void)
2371 2476
2372unsigned long dispc_lclk_rate(enum omap_channel channel) 2477unsigned long dispc_lclk_rate(enum omap_channel channel)
2373{ 2478{
2479 struct platform_device *dsidev;
2374 int lcd; 2480 int lcd;
2375 unsigned long r; 2481 unsigned long r;
2376 u32 l; 2482 u32 l;
@@ -2380,11 +2486,16 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
2380 lcd = FLD_GET(l, 23, 16); 2486 lcd = FLD_GET(l, 23, 16);
2381 2487
2382 switch (dss_get_lcd_clk_source(channel)) { 2488 switch (dss_get_lcd_clk_source(channel)) {
2383 case DSS_CLK_SRC_FCK: 2489 case OMAP_DSS_CLK_SRC_FCK:
2384 r = dss_clk_get_rate(DSS_CLK_FCK); 2490 r = dss_clk_get_rate(DSS_CLK_FCK);
2385 break; 2491 break;
2386 case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 2492 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
2387 r = dsi_get_pll_hsdiv_dispc_rate(); 2493 dsidev = dsi_get_dsidev_from_id(0);
2494 r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
2495 break;
2496 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
2497 dsidev = dsi_get_dsidev_from_id(1);
2498 r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
2388 break; 2499 break;
2389 default: 2500 default:
2390 BUG(); 2501 BUG();
@@ -2412,8 +2523,8 @@ void dispc_dump_clocks(struct seq_file *s)
2412{ 2523{
2413 int lcd, pcd; 2524 int lcd, pcd;
2414 u32 l; 2525 u32 l;
2415 enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); 2526 enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
2416 enum dss_clk_source lcd_clk_src; 2527 enum omap_dss_clk_source lcd_clk_src;
2417 2528
2418 enable_clocks(1); 2529 enable_clocks(1);
2419 2530
@@ -2516,7 +2627,7 @@ void dispc_dump_irqs(struct seq_file *s)
2516 2627
2517void dispc_dump_regs(struct seq_file *s) 2628void dispc_dump_regs(struct seq_file *s)
2518{ 2629{
2519#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r)) 2630#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
2520 2631
2521 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 2632 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
2522 2633
@@ -2528,152 +2639,227 @@ void dispc_dump_regs(struct seq_file *s)
2528 DUMPREG(DISPC_CONTROL); 2639 DUMPREG(DISPC_CONTROL);
2529 DUMPREG(DISPC_CONFIG); 2640 DUMPREG(DISPC_CONFIG);
2530 DUMPREG(DISPC_CAPABLE); 2641 DUMPREG(DISPC_CAPABLE);
2531 DUMPREG(DISPC_DEFAULT_COLOR(0)); 2642 DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
2532 DUMPREG(DISPC_DEFAULT_COLOR(1)); 2643 DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
2533 DUMPREG(DISPC_TRANS_COLOR(0)); 2644 DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
2534 DUMPREG(DISPC_TRANS_COLOR(1)); 2645 DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
2535 DUMPREG(DISPC_LINE_STATUS); 2646 DUMPREG(DISPC_LINE_STATUS);
2536 DUMPREG(DISPC_LINE_NUMBER); 2647 DUMPREG(DISPC_LINE_NUMBER);
2537 DUMPREG(DISPC_TIMING_H(0)); 2648 DUMPREG(DISPC_TIMING_H(OMAP_DSS_CHANNEL_LCD));
2538 DUMPREG(DISPC_TIMING_V(0)); 2649 DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD));
2539 DUMPREG(DISPC_POL_FREQ(0)); 2650 DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD));
2540 DUMPREG(DISPC_DIVISORo(0)); 2651 DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD));
2541 DUMPREG(DISPC_GLOBAL_ALPHA); 2652 DUMPREG(DISPC_GLOBAL_ALPHA);
2542 DUMPREG(DISPC_SIZE_DIG); 2653 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
2543 DUMPREG(DISPC_SIZE_LCD(0)); 2654 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
2544 if (dss_has_feature(FEAT_MGR_LCD2)) { 2655 if (dss_has_feature(FEAT_MGR_LCD2)) {
2545 DUMPREG(DISPC_CONTROL2); 2656 DUMPREG(DISPC_CONTROL2);
2546 DUMPREG(DISPC_CONFIG2); 2657 DUMPREG(DISPC_CONFIG2);
2547 DUMPREG(DISPC_DEFAULT_COLOR(2)); 2658 DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
2548 DUMPREG(DISPC_TRANS_COLOR(2)); 2659 DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
2549 DUMPREG(DISPC_TIMING_H(2)); 2660 DUMPREG(DISPC_TIMING_H(OMAP_DSS_CHANNEL_LCD2));
2550 DUMPREG(DISPC_TIMING_V(2)); 2661 DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD2));
2551 DUMPREG(DISPC_POL_FREQ(2)); 2662 DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
2552 DUMPREG(DISPC_DIVISORo(2)); 2663 DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD2));
2553 DUMPREG(DISPC_SIZE_LCD(2)); 2664 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
2554 } 2665 }
2555 2666
2556 DUMPREG(DISPC_GFX_BA0); 2667 DUMPREG(DISPC_OVL_BA0(OMAP_DSS_GFX));
2557 DUMPREG(DISPC_GFX_BA1); 2668 DUMPREG(DISPC_OVL_BA1(OMAP_DSS_GFX));
2558 DUMPREG(DISPC_GFX_POSITION); 2669 DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_GFX));
2559 DUMPREG(DISPC_GFX_SIZE); 2670 DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_GFX));
2560 DUMPREG(DISPC_GFX_ATTRIBUTES); 2671 DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_GFX));
2561 DUMPREG(DISPC_GFX_FIFO_THRESHOLD); 2672 DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
2562 DUMPREG(DISPC_GFX_FIFO_SIZE_STATUS); 2673 DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_GFX));
2563 DUMPREG(DISPC_GFX_ROW_INC); 2674 DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_GFX));
2564 DUMPREG(DISPC_GFX_PIXEL_INC); 2675 DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_GFX));
2565 DUMPREG(DISPC_GFX_WINDOW_SKIP); 2676 DUMPREG(DISPC_OVL_WINDOW_SKIP(OMAP_DSS_GFX));
2566 DUMPREG(DISPC_GFX_TABLE_BA); 2677 DUMPREG(DISPC_OVL_TABLE_BA(OMAP_DSS_GFX));
2567 2678
2568 DUMPREG(DISPC_DATA_CYCLE1(0)); 2679 DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
2569 DUMPREG(DISPC_DATA_CYCLE2(0)); 2680 DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
2570 DUMPREG(DISPC_DATA_CYCLE3(0)); 2681 DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
2571 2682
2572 DUMPREG(DISPC_CPR_COEF_R(0)); 2683 DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
2573 DUMPREG(DISPC_CPR_COEF_G(0)); 2684 DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
2574 DUMPREG(DISPC_CPR_COEF_B(0)); 2685 DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
2575 if (dss_has_feature(FEAT_MGR_LCD2)) { 2686 if (dss_has_feature(FEAT_MGR_LCD2)) {
2576 DUMPREG(DISPC_DATA_CYCLE1(2)); 2687 DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
2577 DUMPREG(DISPC_DATA_CYCLE2(2)); 2688 DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
2578 DUMPREG(DISPC_DATA_CYCLE3(2)); 2689 DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
2579 2690
2580 DUMPREG(DISPC_CPR_COEF_R(2)); 2691 DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
2581 DUMPREG(DISPC_CPR_COEF_G(2)); 2692 DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
2582 DUMPREG(DISPC_CPR_COEF_B(2)); 2693 DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
2583 } 2694 }
2584 2695
2585 DUMPREG(DISPC_GFX_PRELOAD); 2696 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
2586 2697
2587 DUMPREG(DISPC_VID_BA0(0)); 2698 DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1));
2588 DUMPREG(DISPC_VID_BA1(0)); 2699 DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1));
2589 DUMPREG(DISPC_VID_POSITION(0)); 2700 DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO1));
2590 DUMPREG(DISPC_VID_SIZE(0)); 2701 DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO1));
2591 DUMPREG(DISPC_VID_ATTRIBUTES(0)); 2702 DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
2592 DUMPREG(DISPC_VID_FIFO_THRESHOLD(0)); 2703 DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
2593 DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(0)); 2704 DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO1));
2594 DUMPREG(DISPC_VID_ROW_INC(0)); 2705 DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO1));
2595 DUMPREG(DISPC_VID_PIXEL_INC(0)); 2706 DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
2596 DUMPREG(DISPC_VID_FIR(0)); 2707 DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO1));
2597 DUMPREG(DISPC_VID_PICTURE_SIZE(0)); 2708 DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
2598 DUMPREG(DISPC_VID_ACCU0(0)); 2709 DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO1));
2599 DUMPREG(DISPC_VID_ACCU1(0)); 2710 DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO1));
2600 2711
2601 DUMPREG(DISPC_VID_BA0(1)); 2712 DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO2));
2602 DUMPREG(DISPC_VID_BA1(1)); 2713 DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO2));
2603 DUMPREG(DISPC_VID_POSITION(1)); 2714 DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO2));
2604 DUMPREG(DISPC_VID_SIZE(1)); 2715 DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO2));
2605 DUMPREG(DISPC_VID_ATTRIBUTES(1)); 2716 DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
2606 DUMPREG(DISPC_VID_FIFO_THRESHOLD(1)); 2717 DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
2607 DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(1)); 2718 DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO2));
2608 DUMPREG(DISPC_VID_ROW_INC(1)); 2719 DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO2));
2609 DUMPREG(DISPC_VID_PIXEL_INC(1)); 2720 DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
2610 DUMPREG(DISPC_VID_FIR(1)); 2721 DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO2));
2611 DUMPREG(DISPC_VID_PICTURE_SIZE(1)); 2722 DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
2612 DUMPREG(DISPC_VID_ACCU0(1)); 2723 DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO2));
2613 DUMPREG(DISPC_VID_ACCU1(1)); 2724 DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO2));
2614 2725
2615 DUMPREG(DISPC_VID_FIR_COEF_H(0, 0)); 2726 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 0));
2616 DUMPREG(DISPC_VID_FIR_COEF_H(0, 1)); 2727 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 1));
2617 DUMPREG(DISPC_VID_FIR_COEF_H(0, 2)); 2728 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 2));
2618 DUMPREG(DISPC_VID_FIR_COEF_H(0, 3)); 2729 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 3));
2619 DUMPREG(DISPC_VID_FIR_COEF_H(0, 4)); 2730 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 4));
2620 DUMPREG(DISPC_VID_FIR_COEF_H(0, 5)); 2731 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 5));
2621 DUMPREG(DISPC_VID_FIR_COEF_H(0, 6)); 2732 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 6));
2622 DUMPREG(DISPC_VID_FIR_COEF_H(0, 7)); 2733 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 7));
2623 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 0)); 2734 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 0));
2624 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 1)); 2735 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 1));
2625 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 2)); 2736 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 2));
2626 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 3)); 2737 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 3));
2627 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 4)); 2738 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 4));
2628 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 5)); 2739 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 5));
2629 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 6)); 2740 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 6));
2630 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 7)); 2741 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 7));
2631 DUMPREG(DISPC_VID_CONV_COEF(0, 0)); 2742 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0));
2632 DUMPREG(DISPC_VID_CONV_COEF(0, 1)); 2743 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1));
2633 DUMPREG(DISPC_VID_CONV_COEF(0, 2)); 2744 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2));
2634 DUMPREG(DISPC_VID_CONV_COEF(0, 3)); 2745 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3));
2635 DUMPREG(DISPC_VID_CONV_COEF(0, 4)); 2746 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4));
2636 DUMPREG(DISPC_VID_FIR_COEF_V(0, 0)); 2747 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
2637 DUMPREG(DISPC_VID_FIR_COEF_V(0, 1)); 2748 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
2638 DUMPREG(DISPC_VID_FIR_COEF_V(0, 2)); 2749 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
2639 DUMPREG(DISPC_VID_FIR_COEF_V(0, 3)); 2750 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
2640 DUMPREG(DISPC_VID_FIR_COEF_V(0, 4)); 2751 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
2641 DUMPREG(DISPC_VID_FIR_COEF_V(0, 5)); 2752 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
2642 DUMPREG(DISPC_VID_FIR_COEF_V(0, 6)); 2753 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
2643 DUMPREG(DISPC_VID_FIR_COEF_V(0, 7)); 2754 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
2644 2755
2645 DUMPREG(DISPC_VID_FIR_COEF_H(1, 0)); 2756 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
2646 DUMPREG(DISPC_VID_FIR_COEF_H(1, 1)); 2757 DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1));
2647 DUMPREG(DISPC_VID_FIR_COEF_H(1, 2)); 2758 DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO1));
2648 DUMPREG(DISPC_VID_FIR_COEF_H(1, 3)); 2759 DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO1));
2649 DUMPREG(DISPC_VID_FIR_COEF_H(1, 4)); 2760 DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO1));
2650 DUMPREG(DISPC_VID_FIR_COEF_H(1, 5)); 2761 DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO1));
2651 DUMPREG(DISPC_VID_FIR_COEF_H(1, 6)); 2762
2652 DUMPREG(DISPC_VID_FIR_COEF_H(1, 7)); 2763 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 0));
2653 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 0)); 2764 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 1));
2654 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 1)); 2765 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 2));
2655 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 2)); 2766 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 3));
2656 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 3)); 2767 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 4));
2657 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 4)); 2768 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 5));
2658 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 5)); 2769 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 6));
2659 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 6)); 2770 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 7));
2660 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 7)); 2771
2661 DUMPREG(DISPC_VID_CONV_COEF(1, 0)); 2772 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 0));
2662 DUMPREG(DISPC_VID_CONV_COEF(1, 1)); 2773 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 1));
2663 DUMPREG(DISPC_VID_CONV_COEF(1, 2)); 2774 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 2));
2664 DUMPREG(DISPC_VID_CONV_COEF(1, 3)); 2775 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 3));
2665 DUMPREG(DISPC_VID_CONV_COEF(1, 4)); 2776 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 4));
2666 DUMPREG(DISPC_VID_FIR_COEF_V(1, 0)); 2777 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 5));
2667 DUMPREG(DISPC_VID_FIR_COEF_V(1, 1)); 2778 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 6));
2668 DUMPREG(DISPC_VID_FIR_COEF_V(1, 2)); 2779 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 7));
2669 DUMPREG(DISPC_VID_FIR_COEF_V(1, 3)); 2780
2670 DUMPREG(DISPC_VID_FIR_COEF_V(1, 4)); 2781 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 0));
2671 DUMPREG(DISPC_VID_FIR_COEF_V(1, 5)); 2782 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 1));
2672 DUMPREG(DISPC_VID_FIR_COEF_V(1, 6)); 2783 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 2));
2673 DUMPREG(DISPC_VID_FIR_COEF_V(1, 7)); 2784 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 3));
2674 2785 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 4));
2675 DUMPREG(DISPC_VID_PRELOAD(0)); 2786 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 5));
2676 DUMPREG(DISPC_VID_PRELOAD(1)); 2787 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 6));
2788 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 7));
2789 }
2790 if (dss_has_feature(FEAT_ATTR2))
2791 DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
2792
2793
2794 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 0));
2795 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 1));
2796 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 2));
2797 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 3));
2798 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 4));
2799 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 5));
2800 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 6));
2801 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 7));
2802 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 0));
2803 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 1));
2804 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 2));
2805 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 3));
2806 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 4));
2807 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 5));
2808 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 6));
2809 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 7));
2810 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0));
2811 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1));
2812 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2));
2813 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3));
2814 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4));
2815 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
2816 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
2817 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
2818 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
2819 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
2820 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
2821 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
2822 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
2823
2824 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
2825 DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2));
2826 DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO2));
2827 DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO2));
2828 DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO2));
2829 DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO2));
2830
2831 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 0));
2832 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 1));
2833 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 2));
2834 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 3));
2835 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 4));
2836 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 5));
2837 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 6));
2838 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 7));
2839
2840 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 0));
2841 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 1));
2842 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 2));
2843 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 3));
2844 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 4));
2845 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 5));
2846 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 6));
2847 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 7));
2848
2849 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 0));
2850 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 1));
2851 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 2));
2852 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 3));
2853 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 4));
2854 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 5));
2855 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 6));
2856 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 7));
2857 }
2858 if (dss_has_feature(FEAT_ATTR2))
2859 DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
2860
2861 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
2862 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
2677 2863
2678 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 2864 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
2679#undef DUMPREG 2865#undef DUMPREG
@@ -3388,11 +3574,12 @@ int dispc_setup_plane(enum omap_plane plane,
3388 bool ilace, 3574 bool ilace,
3389 enum omap_dss_rotation_type rotation_type, 3575 enum omap_dss_rotation_type rotation_type,
3390 u8 rotation, bool mirror, u8 global_alpha, 3576 u8 rotation, bool mirror, u8 global_alpha,
3391 u8 pre_mult_alpha, enum omap_channel channel) 3577 u8 pre_mult_alpha, enum omap_channel channel,
3578 u32 puv_addr)
3392{ 3579{
3393 int r = 0; 3580 int r = 0;
3394 3581
3395 DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> " 3582 DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> "
3396 "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n", 3583 "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
3397 plane, paddr, screen_width, pos_x, pos_y, 3584 plane, paddr, screen_width, pos_x, pos_y,
3398 width, height, 3585 width, height,
@@ -3411,7 +3598,8 @@ int dispc_setup_plane(enum omap_plane plane,
3411 rotation_type, 3598 rotation_type,
3412 rotation, mirror, 3599 rotation, mirror,
3413 global_alpha, 3600 global_alpha,
3414 pre_mult_alpha, channel); 3601 pre_mult_alpha,
3602 channel, puv_addr);
3415 3603
3416 enable_clocks(0); 3604 enable_clocks(0);
3417 3605
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
new file mode 100644
index 000000000000..6c9ee0a0efb3
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc.h
@@ -0,0 +1,691 @@
1/*
2 * linux/drivers/video/omap2/dss/dispc.h
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Archit Taneja <archit@ti.com>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __OMAP2_DISPC_REG_H
22#define __OMAP2_DISPC_REG_H
23
24/* DISPC common registers */
25#define DISPC_REVISION 0x0000
26#define DISPC_SYSCONFIG 0x0010
27#define DISPC_SYSSTATUS 0x0014
28#define DISPC_IRQSTATUS 0x0018
29#define DISPC_IRQENABLE 0x001C
30#define DISPC_CONTROL 0x0040
31#define DISPC_CONFIG 0x0044
32#define DISPC_CAPABLE 0x0048
33#define DISPC_LINE_STATUS 0x005C
34#define DISPC_LINE_NUMBER 0x0060
35#define DISPC_GLOBAL_ALPHA 0x0074
36#define DISPC_CONTROL2 0x0238
37#define DISPC_CONFIG2 0x0620
38#define DISPC_DIVISOR 0x0804
39
40/* DISPC overlay registers */
41#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
42 DISPC_BA0_OFFSET(n))
43#define DISPC_OVL_BA1(n) (DISPC_OVL_BASE(n) + \
44 DISPC_BA1_OFFSET(n))
45#define DISPC_OVL_BA0_UV(n) (DISPC_OVL_BASE(n) + \
46 DISPC_BA0_UV_OFFSET(n))
47#define DISPC_OVL_BA1_UV(n) (DISPC_OVL_BASE(n) + \
48 DISPC_BA1_UV_OFFSET(n))
49#define DISPC_OVL_POSITION(n) (DISPC_OVL_BASE(n) + \
50 DISPC_POS_OFFSET(n))
51#define DISPC_OVL_SIZE(n) (DISPC_OVL_BASE(n) + \
52 DISPC_SIZE_OFFSET(n))
53#define DISPC_OVL_ATTRIBUTES(n) (DISPC_OVL_BASE(n) + \
54 DISPC_ATTR_OFFSET(n))
55#define DISPC_OVL_ATTRIBUTES2(n) (DISPC_OVL_BASE(n) + \
56 DISPC_ATTR2_OFFSET(n))
57#define DISPC_OVL_FIFO_THRESHOLD(n) (DISPC_OVL_BASE(n) + \
58 DISPC_FIFO_THRESH_OFFSET(n))
59#define DISPC_OVL_FIFO_SIZE_STATUS(n) (DISPC_OVL_BASE(n) + \
60 DISPC_FIFO_SIZE_STATUS_OFFSET(n))
61#define DISPC_OVL_ROW_INC(n) (DISPC_OVL_BASE(n) + \
62 DISPC_ROW_INC_OFFSET(n))
63#define DISPC_OVL_PIXEL_INC(n) (DISPC_OVL_BASE(n) + \
64 DISPC_PIX_INC_OFFSET(n))
65#define DISPC_OVL_WINDOW_SKIP(n) (DISPC_OVL_BASE(n) + \
66 DISPC_WINDOW_SKIP_OFFSET(n))
67#define DISPC_OVL_TABLE_BA(n) (DISPC_OVL_BASE(n) + \
68 DISPC_TABLE_BA_OFFSET(n))
69#define DISPC_OVL_FIR(n) (DISPC_OVL_BASE(n) + \
70 DISPC_FIR_OFFSET(n))
71#define DISPC_OVL_FIR2(n) (DISPC_OVL_BASE(n) + \
72 DISPC_FIR2_OFFSET(n))
73#define DISPC_OVL_PICTURE_SIZE(n) (DISPC_OVL_BASE(n) + \
74 DISPC_PIC_SIZE_OFFSET(n))
75#define DISPC_OVL_ACCU0(n) (DISPC_OVL_BASE(n) + \
76 DISPC_ACCU0_OFFSET(n))
77#define DISPC_OVL_ACCU1(n) (DISPC_OVL_BASE(n) + \
78 DISPC_ACCU1_OFFSET(n))
79#define DISPC_OVL_ACCU2_0(n) (DISPC_OVL_BASE(n) + \
80 DISPC_ACCU2_0_OFFSET(n))
81#define DISPC_OVL_ACCU2_1(n) (DISPC_OVL_BASE(n) + \
82 DISPC_ACCU2_1_OFFSET(n))
83#define DISPC_OVL_FIR_COEF_H(n, i) (DISPC_OVL_BASE(n) + \
84 DISPC_FIR_COEF_H_OFFSET(n, i))
85#define DISPC_OVL_FIR_COEF_HV(n, i) (DISPC_OVL_BASE(n) + \
86 DISPC_FIR_COEF_HV_OFFSET(n, i))
87#define DISPC_OVL_FIR_COEF_H2(n, i) (DISPC_OVL_BASE(n) + \
88 DISPC_FIR_COEF_H2_OFFSET(n, i))
89#define DISPC_OVL_FIR_COEF_HV2(n, i) (DISPC_OVL_BASE(n) + \
90 DISPC_FIR_COEF_HV2_OFFSET(n, i))
91#define DISPC_OVL_CONV_COEF(n, i) (DISPC_OVL_BASE(n) + \
92 DISPC_CONV_COEF_OFFSET(n, i))
93#define DISPC_OVL_FIR_COEF_V(n, i) (DISPC_OVL_BASE(n) + \
94 DISPC_FIR_COEF_V_OFFSET(n, i))
95#define DISPC_OVL_FIR_COEF_V2(n, i) (DISPC_OVL_BASE(n) + \
96 DISPC_FIR_COEF_V2_OFFSET(n, i))
97#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \
98 DISPC_PRELOAD_OFFSET(n))
99
100/* DISPC manager/channel specific registers */
101static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
102{
103 switch (channel) {
104 case OMAP_DSS_CHANNEL_LCD:
105 return 0x004C;
106 case OMAP_DSS_CHANNEL_DIGIT:
107 return 0x0050;
108 case OMAP_DSS_CHANNEL_LCD2:
109 return 0x03AC;
110 default:
111 BUG();
112 }
113}
114
115static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
116{
117 switch (channel) {
118 case OMAP_DSS_CHANNEL_LCD:
119 return 0x0054;
120 case OMAP_DSS_CHANNEL_DIGIT:
121 return 0x0058;
122 case OMAP_DSS_CHANNEL_LCD2:
123 return 0x03B0;
124 default:
125 BUG();
126 }
127}
128
129static inline u16 DISPC_TIMING_H(enum omap_channel channel)
130{
131 switch (channel) {
132 case OMAP_DSS_CHANNEL_LCD:
133 return 0x0064;
134 case OMAP_DSS_CHANNEL_DIGIT:
135 BUG();
136 case OMAP_DSS_CHANNEL_LCD2:
137 return 0x0400;
138 default:
139 BUG();
140 }
141}
142
143static inline u16 DISPC_TIMING_V(enum omap_channel channel)
144{
145 switch (channel) {
146 case OMAP_DSS_CHANNEL_LCD:
147 return 0x0068;
148 case OMAP_DSS_CHANNEL_DIGIT:
149 BUG();
150 case OMAP_DSS_CHANNEL_LCD2:
151 return 0x0404;
152 default:
153 BUG();
154 }
155}
156
157static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
158{
159 switch (channel) {
160 case OMAP_DSS_CHANNEL_LCD:
161 return 0x006C;
162 case OMAP_DSS_CHANNEL_DIGIT:
163 BUG();
164 case OMAP_DSS_CHANNEL_LCD2:
165 return 0x0408;
166 default:
167 BUG();
168 }
169}
170
171static inline u16 DISPC_DIVISORo(enum omap_channel channel)
172{
173 switch (channel) {
174 case OMAP_DSS_CHANNEL_LCD:
175 return 0x0070;
176 case OMAP_DSS_CHANNEL_DIGIT:
177 BUG();
178 case OMAP_DSS_CHANNEL_LCD2:
179 return 0x040C;
180 default:
181 BUG();
182 }
183}
184
185/* Named as DISPC_SIZE_LCD, DISPC_SIZE_DIGIT and DISPC_SIZE_LCD2 in TRM */
186static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
187{
188 switch (channel) {
189 case OMAP_DSS_CHANNEL_LCD:
190 return 0x007C;
191 case OMAP_DSS_CHANNEL_DIGIT:
192 return 0x0078;
193 case OMAP_DSS_CHANNEL_LCD2:
194 return 0x03CC;
195 default:
196 BUG();
197 }
198}
199
200static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
201{
202 switch (channel) {
203 case OMAP_DSS_CHANNEL_LCD:
204 return 0x01D4;
205 case OMAP_DSS_CHANNEL_DIGIT:
206 BUG();
207 case OMAP_DSS_CHANNEL_LCD2:
208 return 0x03C0;
209 default:
210 BUG();
211 }
212}
213
214static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
215{
216 switch (channel) {
217 case OMAP_DSS_CHANNEL_LCD:
218 return 0x01D8;
219 case OMAP_DSS_CHANNEL_DIGIT:
220 BUG();
221 case OMAP_DSS_CHANNEL_LCD2:
222 return 0x03C4;
223 default:
224 BUG();
225 }
226}
227
228static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
229{
230 switch (channel) {
231 case OMAP_DSS_CHANNEL_LCD:
232 return 0x01DC;
233 case OMAP_DSS_CHANNEL_DIGIT:
234 BUG();
235 case OMAP_DSS_CHANNEL_LCD2:
236 return 0x03C8;
237 default:
238 BUG();
239 }
240}
241
242static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
243{
244 switch (channel) {
245 case OMAP_DSS_CHANNEL_LCD:
246 return 0x0220;
247 case OMAP_DSS_CHANNEL_DIGIT:
248 BUG();
249 case OMAP_DSS_CHANNEL_LCD2:
250 return 0x03BC;
251 default:
252 BUG();
253 }
254}
255
256static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
257{
258 switch (channel) {
259 case OMAP_DSS_CHANNEL_LCD:
260 return 0x0224;
261 case OMAP_DSS_CHANNEL_DIGIT:
262 BUG();
263 case OMAP_DSS_CHANNEL_LCD2:
264 return 0x03B8;
265 default:
266 BUG();
267 }
268}
269
270static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
271{
272 switch (channel) {
273 case OMAP_DSS_CHANNEL_LCD:
274 return 0x0228;
275 case OMAP_DSS_CHANNEL_DIGIT:
276 BUG();
277 case OMAP_DSS_CHANNEL_LCD2:
278 return 0x03B4;
279 default:
280 BUG();
281 }
282}
283
284/* DISPC overlay register base addresses */
285static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
286{
287 switch (plane) {
288 case OMAP_DSS_GFX:
289 return 0x0080;
290 case OMAP_DSS_VIDEO1:
291 return 0x00BC;
292 case OMAP_DSS_VIDEO2:
293 return 0x014C;
294 default:
295 BUG();
296 }
297}
298
299/* DISPC overlay register offsets */
300static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
301{
302 switch (plane) {
303 case OMAP_DSS_GFX:
304 case OMAP_DSS_VIDEO1:
305 case OMAP_DSS_VIDEO2:
306 return 0x0000;
307 default:
308 BUG();
309 }
310}
311
312static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
313{
314 switch (plane) {
315 case OMAP_DSS_GFX:
316 case OMAP_DSS_VIDEO1:
317 case OMAP_DSS_VIDEO2:
318 return 0x0004;
319 default:
320 BUG();
321 }
322}
323
324static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
325{
326 switch (plane) {
327 case OMAP_DSS_GFX:
328 BUG();
329 case OMAP_DSS_VIDEO1:
330 return 0x0544;
331 case OMAP_DSS_VIDEO2:
332 return 0x04BC;
333 default:
334 BUG();
335 }
336}
337
338static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
339{
340 switch (plane) {
341 case OMAP_DSS_GFX:
342 BUG();
343 case OMAP_DSS_VIDEO1:
344 return 0x0548;
345 case OMAP_DSS_VIDEO2:
346 return 0x04C0;
347 default:
348 BUG();
349 }
350}
351
352static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
353{
354 switch (plane) {
355 case OMAP_DSS_GFX:
356 case OMAP_DSS_VIDEO1:
357 case OMAP_DSS_VIDEO2:
358 return 0x0008;
359 default:
360 BUG();
361 }
362}
363
364static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
365{
366 switch (plane) {
367 case OMAP_DSS_GFX:
368 case OMAP_DSS_VIDEO1:
369 case OMAP_DSS_VIDEO2:
370 return 0x000C;
371 default:
372 BUG();
373 }
374}
375
376static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
377{
378 switch (plane) {
379 case OMAP_DSS_GFX:
380 return 0x0020;
381 case OMAP_DSS_VIDEO1:
382 case OMAP_DSS_VIDEO2:
383 return 0x0010;
384 default:
385 BUG();
386 }
387}
388
389static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
390{
391 switch (plane) {
392 case OMAP_DSS_GFX:
393 BUG();
394 case OMAP_DSS_VIDEO1:
395 return 0x0568;
396 case OMAP_DSS_VIDEO2:
397 return 0x04DC;
398 default:
399 BUG();
400 }
401}
402
403static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
404{
405 switch (plane) {
406 case OMAP_DSS_GFX:
407 return 0x0024;
408 case OMAP_DSS_VIDEO1:
409 case OMAP_DSS_VIDEO2:
410 return 0x0014;
411 default:
412 BUG();
413 }
414}
415
416static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
417{
418 switch (plane) {
419 case OMAP_DSS_GFX:
420 return 0x0028;
421 case OMAP_DSS_VIDEO1:
422 case OMAP_DSS_VIDEO2:
423 return 0x0018;
424 default:
425 BUG();
426 }
427}
428
429static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
430{
431 switch (plane) {
432 case OMAP_DSS_GFX:
433 return 0x002C;
434 case OMAP_DSS_VIDEO1:
435 case OMAP_DSS_VIDEO2:
436 return 0x001C;
437 default:
438 BUG();
439 }
440}
441
442static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
443{
444 switch (plane) {
445 case OMAP_DSS_GFX:
446 return 0x0030;
447 case OMAP_DSS_VIDEO1:
448 case OMAP_DSS_VIDEO2:
449 return 0x0020;
450 default:
451 BUG();
452 }
453}
454
455static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
456{
457 switch (plane) {
458 case OMAP_DSS_GFX:
459 return 0x0034;
460 case OMAP_DSS_VIDEO1:
461 case OMAP_DSS_VIDEO2:
462 BUG();
463 default:
464 BUG();
465 }
466}
467
468static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
469{
470 switch (plane) {
471 case OMAP_DSS_GFX:
472 return 0x0038;
473 case OMAP_DSS_VIDEO1:
474 case OMAP_DSS_VIDEO2:
475 BUG();
476 default:
477 BUG();
478 }
479}
480
481static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
482{
483 switch (plane) {
484 case OMAP_DSS_GFX:
485 BUG();
486 case OMAP_DSS_VIDEO1:
487 case OMAP_DSS_VIDEO2:
488 return 0x0024;
489 default:
490 BUG();
491 }
492}
493
494static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
495{
496 switch (plane) {
497 case OMAP_DSS_GFX:
498 BUG();
499 case OMAP_DSS_VIDEO1:
500 return 0x0580;
501 case OMAP_DSS_VIDEO2:
502 return 0x055C;
503 default:
504 BUG();
505 }
506}
507
508static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
509{
510 switch (plane) {
511 case OMAP_DSS_GFX:
512 BUG();
513 case OMAP_DSS_VIDEO1:
514 case OMAP_DSS_VIDEO2:
515 return 0x0028;
516 default:
517 BUG();
518 }
519}
520
521
522static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
523{
524 switch (plane) {
525 case OMAP_DSS_GFX:
526 BUG();
527 case OMAP_DSS_VIDEO1:
528 case OMAP_DSS_VIDEO2:
529 return 0x002C;
530 default:
531 BUG();
532 }
533}
534
535static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
536{
537 switch (plane) {
538 case OMAP_DSS_GFX:
539 BUG();
540 case OMAP_DSS_VIDEO1:
541 return 0x0584;
542 case OMAP_DSS_VIDEO2:
543 return 0x0560;
544 default:
545 BUG();
546 }
547}
548
549static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
550{
551 switch (plane) {
552 case OMAP_DSS_GFX:
553 BUG();
554 case OMAP_DSS_VIDEO1:
555 case OMAP_DSS_VIDEO2:
556 return 0x0030;
557 default:
558 BUG();
559 }
560}
561
562static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
563{
564 switch (plane) {
565 case OMAP_DSS_GFX:
566 BUG();
567 case OMAP_DSS_VIDEO1:
568 return 0x0588;
569 case OMAP_DSS_VIDEO2:
570 return 0x0564;
571 default:
572 BUG();
573 }
574}
575
576/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
577static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
578{
579 switch (plane) {
580 case OMAP_DSS_GFX:
581 BUG();
582 case OMAP_DSS_VIDEO1:
583 case OMAP_DSS_VIDEO2:
584 return 0x0034 + i * 0x8;
585 default:
586 BUG();
587 }
588}
589
590/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
591static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
592{
593 switch (plane) {
594 case OMAP_DSS_GFX:
595 BUG();
596 case OMAP_DSS_VIDEO1:
597 return 0x058C + i * 0x8;
598 case OMAP_DSS_VIDEO2:
599 return 0x0568 + i * 0x8;
600 default:
601 BUG();
602 }
603}
604
605/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
606static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
607{
608 switch (plane) {
609 case OMAP_DSS_GFX:
610 BUG();
611 case OMAP_DSS_VIDEO1:
612 case OMAP_DSS_VIDEO2:
613 return 0x0038 + i * 0x8;
614 default:
615 BUG();
616 }
617}
618
619/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
620static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
621{
622 switch (plane) {
623 case OMAP_DSS_GFX:
624 BUG();
625 case OMAP_DSS_VIDEO1:
626 return 0x0590 + i * 8;
627 case OMAP_DSS_VIDEO2:
628 return 0x056C + i * 0x8;
629 default:
630 BUG();
631 }
632}
633
634/* coef index i = {0, 1, 2, 3, 4,} */
635static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
636{
637 switch (plane) {
638 case OMAP_DSS_GFX:
639 BUG();
640 case OMAP_DSS_VIDEO1:
641 case OMAP_DSS_VIDEO2:
642 return 0x0074 + i * 0x4;
643 default:
644 BUG();
645 }
646}
647
648/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
649static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
650{
651 switch (plane) {
652 case OMAP_DSS_GFX:
653 BUG();
654 case OMAP_DSS_VIDEO1:
655 return 0x0124 + i * 0x4;
656 case OMAP_DSS_VIDEO2:
657 return 0x00B4 + i * 0x4;
658 default:
659 BUG();
660 }
661}
662
663/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
664static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
665{
666 switch (plane) {
667 case OMAP_DSS_GFX:
668 BUG();
669 case OMAP_DSS_VIDEO1:
670 return 0x05CC + i * 0x4;
671 case OMAP_DSS_VIDEO2:
672 return 0x05A8 + i * 0x4;
673 default:
674 BUG();
675 }
676}
677
678static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
679{
680 switch (plane) {
681 case OMAP_DSS_GFX:
682 return 0x01AC;
683 case OMAP_DSS_VIDEO1:
684 return 0x0174;
685 case OMAP_DSS_VIDEO2:
686 return 0x00E8;
687 default:
688 BUG();
689 }
690}
691#endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index a85a6f38b40c..c2dfc8c50057 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -27,7 +27,7 @@
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29 29
30#include <plat/display.h> 30#include <video/omapdss.h>
31#include "dss.h" 31#include "dss.h"
32 32
33static ssize_t display_enabled_show(struct device *dev, 33static ssize_t display_enabled_show(struct device *dev,
@@ -44,9 +44,13 @@ static ssize_t display_enabled_store(struct device *dev,
44 const char *buf, size_t size) 44 const char *buf, size_t size)
45{ 45{
46 struct omap_dss_device *dssdev = to_dss_device(dev); 46 struct omap_dss_device *dssdev = to_dss_device(dev);
47 bool enabled, r; 47 int r, enabled;
48 48
49 enabled = simple_strtoul(buf, NULL, 10); 49 r = kstrtoint(buf, 0, &enabled);
50 if (r)
51 return r;
52
53 enabled = !!enabled;
50 54
51 if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) { 55 if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
52 if (enabled) { 56 if (enabled) {
@@ -82,7 +86,9 @@ static ssize_t display_upd_mode_store(struct device *dev,
82 if (!dssdev->driver->set_update_mode) 86 if (!dssdev->driver->set_update_mode)
83 return -EINVAL; 87 return -EINVAL;
84 88
85 val = simple_strtoul(buf, NULL, 10); 89 r = kstrtoint(buf, 0, &val);
90 if (r)
91 return r;
86 92
87 switch (val) { 93 switch (val) {
88 case OMAP_DSS_UPDATE_DISABLED: 94 case OMAP_DSS_UPDATE_DISABLED:
@@ -114,13 +120,16 @@ static ssize_t display_tear_store(struct device *dev,
114 struct device_attribute *attr, const char *buf, size_t size) 120 struct device_attribute *attr, const char *buf, size_t size)
115{ 121{
116 struct omap_dss_device *dssdev = to_dss_device(dev); 122 struct omap_dss_device *dssdev = to_dss_device(dev);
117 unsigned long te; 123 int te, r;
118 int r;
119 124
120 if (!dssdev->driver->enable_te || !dssdev->driver->get_te) 125 if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
121 return -ENOENT; 126 return -ENOENT;
122 127
123 te = simple_strtoul(buf, NULL, 0); 128 r = kstrtoint(buf, 0, &te);
129 if (r)
130 return r;
131
132 te = !!te;
124 133
125 r = dssdev->driver->enable_te(dssdev, te); 134 r = dssdev->driver->enable_te(dssdev, te);
126 if (r) 135 if (r)
@@ -196,13 +205,14 @@ static ssize_t display_rotate_store(struct device *dev,
196 struct device_attribute *attr, const char *buf, size_t size) 205 struct device_attribute *attr, const char *buf, size_t size)
197{ 206{
198 struct omap_dss_device *dssdev = to_dss_device(dev); 207 struct omap_dss_device *dssdev = to_dss_device(dev);
199 unsigned long rot; 208 int rot, r;
200 int r;
201 209
202 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) 210 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
203 return -ENOENT; 211 return -ENOENT;
204 212
205 rot = simple_strtoul(buf, NULL, 0); 213 r = kstrtoint(buf, 0, &rot);
214 if (r)
215 return r;
206 216
207 r = dssdev->driver->set_rotate(dssdev, rot); 217 r = dssdev->driver->set_rotate(dssdev, rot);
208 if (r) 218 if (r)
@@ -226,13 +236,16 @@ static ssize_t display_mirror_store(struct device *dev,
226 struct device_attribute *attr, const char *buf, size_t size) 236 struct device_attribute *attr, const char *buf, size_t size)
227{ 237{
228 struct omap_dss_device *dssdev = to_dss_device(dev); 238 struct omap_dss_device *dssdev = to_dss_device(dev);
229 unsigned long mirror; 239 int mirror, r;
230 int r;
231 240
232 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror) 241 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
233 return -ENOENT; 242 return -ENOENT;
234 243
235 mirror = simple_strtoul(buf, NULL, 0); 244 r = kstrtoint(buf, 0, &mirror);
245 if (r)
246 return r;
247
248 mirror = !!mirror;
236 249
237 r = dssdev->driver->set_mirror(dssdev, mirror); 250 r = dssdev->driver->set_mirror(dssdev, mirror);
238 if (r) 251 if (r)
@@ -259,14 +272,15 @@ static ssize_t display_wss_store(struct device *dev,
259 struct device_attribute *attr, const char *buf, size_t size) 272 struct device_attribute *attr, const char *buf, size_t size)
260{ 273{
261 struct omap_dss_device *dssdev = to_dss_device(dev); 274 struct omap_dss_device *dssdev = to_dss_device(dev);
262 unsigned long wss; 275 u32 wss;
263 int r; 276 int r;
264 277
265 if (!dssdev->driver->get_wss || !dssdev->driver->set_wss) 278 if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
266 return -ENOENT; 279 return -ENOENT;
267 280
268 if (strict_strtoul(buf, 0, &wss)) 281 r = kstrtou32(buf, 0, &wss);
269 return -EINVAL; 282 if (r)
283 return r;
270 284
271 if (wss > 0xfffff) 285 if (wss > 0xfffff)
272 return -EINVAL; 286 return -EINVAL;
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 2d3ca4ca4a05..ff6bd30132df 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -30,16 +30,40 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/regulator/consumer.h> 31#include <linux/regulator/consumer.h>
32 32
33#include <plat/display.h> 33#include <video/omapdss.h>
34#include <plat/cpu.h> 34#include <plat/cpu.h>
35 35
36#include "dss.h" 36#include "dss.h"
37 37
38static struct { 38static struct {
39 struct regulator *vdds_dsi_reg; 39 struct regulator *vdds_dsi_reg;
40 struct platform_device *dsidev;
40} dpi; 41} dpi;
41 42
42#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 43static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk)
44{
45 int dsi_module;
46
47 dsi_module = clk == OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ? 0 : 1;
48
49 return dsi_get_dsidev_from_id(dsi_module);
50}
51
52static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev)
53{
54 if (dssdev->clocks.dispc.dispc_fclk_src ==
55 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ||
56 dssdev->clocks.dispc.dispc_fclk_src ==
57 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC ||
58 dssdev->clocks.dispc.channel.lcd_clk_src ==
59 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ||
60 dssdev->clocks.dispc.channel.lcd_clk_src ==
61 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC)
62 return true;
63 else
64 return false;
65}
66
43static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, 67static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
44 unsigned long pck_req, unsigned long *fck, int *lck_div, 68 unsigned long pck_req, unsigned long *fck, int *lck_div,
45 int *pck_div) 69 int *pck_div)
@@ -48,16 +72,16 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
48 struct dispc_clock_info dispc_cinfo; 72 struct dispc_clock_info dispc_cinfo;
49 int r; 73 int r;
50 74
51 r = dsi_pll_calc_clock_div_pck(is_tft, pck_req, &dsi_cinfo, 75 r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, pck_req,
52 &dispc_cinfo); 76 &dsi_cinfo, &dispc_cinfo);
53 if (r) 77 if (r)
54 return r; 78 return r;
55 79
56 r = dsi_pll_set_clock_div(&dsi_cinfo); 80 r = dsi_pll_set_clock_div(dpi.dsidev, &dsi_cinfo);
57 if (r) 81 if (r)
58 return r; 82 return r;
59 83
60 dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC); 84 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
61 85
62 r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); 86 r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
63 if (r) 87 if (r)
@@ -69,7 +93,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
69 93
70 return 0; 94 return 0;
71} 95}
72#else 96
73static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, 97static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
74 unsigned long pck_req, unsigned long *fck, int *lck_div, 98 unsigned long pck_req, unsigned long *fck, int *lck_div,
75 int *pck_div) 99 int *pck_div)
@@ -96,13 +120,12 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
96 120
97 return 0; 121 return 0;
98} 122}
99#endif
100 123
101static int dpi_set_mode(struct omap_dss_device *dssdev) 124static int dpi_set_mode(struct omap_dss_device *dssdev)
102{ 125{
103 struct omap_video_timings *t = &dssdev->panel.timings; 126 struct omap_video_timings *t = &dssdev->panel.timings;
104 int lck_div, pck_div; 127 int lck_div = 0, pck_div = 0;
105 unsigned long fck; 128 unsigned long fck = 0;
106 unsigned long pck; 129 unsigned long pck;
107 bool is_tft; 130 bool is_tft;
108 int r = 0; 131 int r = 0;
@@ -114,13 +137,12 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
114 137
115 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; 138 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
116 139
117#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 140 if (dpi_use_dsi_pll(dssdev))
118 r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, 141 r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000,
119 &lck_div, &pck_div); 142 &fck, &lck_div, &pck_div);
120#else 143 else
121 r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, 144 r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000,
122 &lck_div, &pck_div); 145 &fck, &lck_div, &pck_div);
123#endif
124 if (r) 146 if (r)
125 goto err0; 147 goto err0;
126 148
@@ -179,12 +201,13 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
179 if (r) 201 if (r)
180 goto err2; 202 goto err2;
181 203
182#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 204 if (dpi_use_dsi_pll(dssdev)) {
183 dss_clk_enable(DSS_CLK_SYSCK); 205 dss_clk_enable(DSS_CLK_SYSCK);
184 r = dsi_pll_init(dssdev, 0, 1); 206 r = dsi_pll_init(dpi.dsidev, 0, 1);
185 if (r) 207 if (r)
186 goto err3; 208 goto err3;
187#endif 209 }
210
188 r = dpi_set_mode(dssdev); 211 r = dpi_set_mode(dssdev);
189 if (r) 212 if (r)
190 goto err4; 213 goto err4;
@@ -196,11 +219,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
196 return 0; 219 return 0;
197 220
198err4: 221err4:
199#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 222 if (dpi_use_dsi_pll(dssdev))
200 dsi_pll_uninit(); 223 dsi_pll_uninit(dpi.dsidev, true);
201err3: 224err3:
202 dss_clk_disable(DSS_CLK_SYSCK); 225 if (dpi_use_dsi_pll(dssdev))
203#endif 226 dss_clk_disable(DSS_CLK_SYSCK);
204err2: 227err2:
205 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 228 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
206 if (cpu_is_omap34xx()) 229 if (cpu_is_omap34xx())
@@ -216,11 +239,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
216{ 239{
217 dssdev->manager->disable(dssdev->manager); 240 dssdev->manager->disable(dssdev->manager);
218 241
219#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 242 if (dpi_use_dsi_pll(dssdev)) {
220 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); 243 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
221 dsi_pll_uninit(); 244 dsi_pll_uninit(dpi.dsidev, true);
222 dss_clk_disable(DSS_CLK_SYSCK); 245 dss_clk_disable(DSS_CLK_SYSCK);
223#endif 246 }
224 247
225 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 248 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
226 249
@@ -251,6 +274,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
251 int lck_div, pck_div; 274 int lck_div, pck_div;
252 unsigned long fck; 275 unsigned long fck;
253 unsigned long pck; 276 unsigned long pck;
277 struct dispc_clock_info dispc_cinfo;
254 278
255 if (!dispc_lcd_timings_ok(timings)) 279 if (!dispc_lcd_timings_ok(timings))
256 return -EINVAL; 280 return -EINVAL;
@@ -260,11 +284,9 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
260 284
261 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; 285 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
262 286
263#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 287 if (dpi_use_dsi_pll(dssdev)) {
264 {
265 struct dsi_clock_info dsi_cinfo; 288 struct dsi_clock_info dsi_cinfo;
266 struct dispc_clock_info dispc_cinfo; 289 r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft,
267 r = dsi_pll_calc_clock_div_pck(is_tft,
268 timings->pixel_clock * 1000, 290 timings->pixel_clock * 1000,
269 &dsi_cinfo, &dispc_cinfo); 291 &dsi_cinfo, &dispc_cinfo);
270 292
@@ -272,13 +294,8 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
272 return r; 294 return r;
273 295
274 fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; 296 fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
275 lck_div = dispc_cinfo.lck_div; 297 } else {
276 pck_div = dispc_cinfo.pck_div;
277 }
278#else
279 {
280 struct dss_clock_info dss_cinfo; 298 struct dss_clock_info dss_cinfo;
281 struct dispc_clock_info dispc_cinfo;
282 r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000, 299 r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000,
283 &dss_cinfo, &dispc_cinfo); 300 &dss_cinfo, &dispc_cinfo);
284 301
@@ -286,10 +303,10 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
286 return r; 303 return r;
287 304
288 fck = dss_cinfo.fck; 305 fck = dss_cinfo.fck;
289 lck_div = dispc_cinfo.lck_div;
290 pck_div = dispc_cinfo.pck_div;
291 } 306 }
292#endif 307
308 lck_div = dispc_cinfo.lck_div;
309 pck_div = dispc_cinfo.pck_div;
293 310
294 pck = fck / lck_div / pck_div / 1000; 311 pck = fck / lck_div / pck_div / 1000;
295 312
@@ -316,6 +333,12 @@ int dpi_init_display(struct omap_dss_device *dssdev)
316 dpi.vdds_dsi_reg = vdds_dsi; 333 dpi.vdds_dsi_reg = vdds_dsi;
317 } 334 }
318 335
336 if (dpi_use_dsi_pll(dssdev)) {
337 enum omap_dss_clk_source dispc_fclk_src =
338 dssdev->clocks.dispc.dispc_fclk_src;
339 dpi.dsidev = dpi_get_dsidev(dispc_fclk_src);
340 }
341
319 return 0; 342 return 0;
320} 343}
321 344
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 0a7f1a47f8e3..345757cfcbee 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -33,8 +33,11 @@
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/debugfs.h>
36 39
37#include <plat/display.h> 40#include <video/omapdss.h>
38#include <plat/clock.h> 41#include <plat/clock.h>
39 42
40#include "dss.h" 43#include "dss.h"
@@ -56,6 +59,7 @@ struct dsi_reg { u16 idx; };
56#define DSI_IRQSTATUS DSI_REG(0x0018) 59#define DSI_IRQSTATUS DSI_REG(0x0018)
57#define DSI_IRQENABLE DSI_REG(0x001C) 60#define DSI_IRQENABLE DSI_REG(0x001C)
58#define DSI_CTRL DSI_REG(0x0040) 61#define DSI_CTRL DSI_REG(0x0040)
62#define DSI_GNQ DSI_REG(0x0044)
59#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048) 63#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
60#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C) 64#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
61#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050) 65#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
@@ -90,6 +94,7 @@ struct dsi_reg { u16 idx; };
90#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004) 94#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
91#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008) 95#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
92#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014) 96#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
97#define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
93 98
94/* DSI_PLL_CTRL_SCP */ 99/* DSI_PLL_CTRL_SCP */
95 100
@@ -99,11 +104,11 @@ struct dsi_reg { u16 idx; };
99#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C) 104#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
100#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010) 105#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
101 106
102#define REG_GET(idx, start, end) \ 107#define REG_GET(dsidev, idx, start, end) \
103 FLD_GET(dsi_read_reg(idx), start, end) 108 FLD_GET(dsi_read_reg(dsidev, idx), start, end)
104 109
105#define REG_FLD_MOD(idx, val, start, end) \ 110#define REG_FLD_MOD(dsidev, idx, val, start, end) \
106 dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end)) 111 dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
107 112
108/* Global interrupts */ 113/* Global interrupts */
109#define DSI_IRQ_VC0 (1 << 0) 114#define DSI_IRQ_VC0 (1 << 0)
@@ -147,31 +152,50 @@ struct dsi_reg { u16 idx; };
147#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0) 152#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
148#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1) 153#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
149#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2) 154#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
155#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
156#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
150#define DSI_CIO_IRQ_ERRESC1 (1 << 5) 157#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
151#define DSI_CIO_IRQ_ERRESC2 (1 << 6) 158#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
152#define DSI_CIO_IRQ_ERRESC3 (1 << 7) 159#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
160#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
161#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
153#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10) 162#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
154#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11) 163#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
155#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12) 164#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
165#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
166#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
156#define DSI_CIO_IRQ_STATEULPS1 (1 << 15) 167#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
157#define DSI_CIO_IRQ_STATEULPS2 (1 << 16) 168#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
158#define DSI_CIO_IRQ_STATEULPS3 (1 << 17) 169#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
170#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
171#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
159#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20) 172#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
160#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21) 173#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
161#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22) 174#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
162#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23) 175#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
163#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24) 176#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
164#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) 177#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
178#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
179#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
180#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
181#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
165#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) 182#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
166#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) 183#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
167#define DSI_CIO_IRQ_ERROR_MASK \ 184#define DSI_CIO_IRQ_ERROR_MASK \
168 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \ 185 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
169 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \ 186 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
170 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRCONTROL1 | \ 187 DSI_CIO_IRQ_ERRSYNCESC5 | \
171 DSI_CIO_IRQ_ERRCONTROL2 | DSI_CIO_IRQ_ERRCONTROL3 | \ 188 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
189 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
190 DSI_CIO_IRQ_ERRESC5 | \
191 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
192 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
193 DSI_CIO_IRQ_ERRCONTROL5 | \
172 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \ 194 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
173 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \ 195 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
174 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3) 196 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
197 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
198 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
175 199
176#define DSI_DT_DCS_SHORT_WRITE_0 0x05 200#define DSI_DT_DCS_SHORT_WRITE_0 0x05
177#define DSI_DT_DCS_SHORT_WRITE_1 0x15 201#define DSI_DT_DCS_SHORT_WRITE_1 0x15
@@ -208,6 +232,19 @@ enum dsi_vc_mode {
208 DSI_VC_MODE_VP, 232 DSI_VC_MODE_VP,
209}; 233};
210 234
235enum dsi_lane {
236 DSI_CLK_P = 1 << 0,
237 DSI_CLK_N = 1 << 1,
238 DSI_DATA1_P = 1 << 2,
239 DSI_DATA1_N = 1 << 3,
240 DSI_DATA2_P = 1 << 4,
241 DSI_DATA2_N = 1 << 5,
242 DSI_DATA3_P = 1 << 6,
243 DSI_DATA3_N = 1 << 7,
244 DSI_DATA4_P = 1 << 8,
245 DSI_DATA4_N = 1 << 9,
246};
247
211struct dsi_update_region { 248struct dsi_update_region {
212 u16 x, y, w, h; 249 u16 x, y, w, h;
213 struct omap_dss_device *device; 250 struct omap_dss_device *device;
@@ -227,14 +264,16 @@ struct dsi_isr_tables {
227 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; 264 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
228}; 265};
229 266
230static struct 267struct dsi_data {
231{
232 struct platform_device *pdev; 268 struct platform_device *pdev;
233 void __iomem *base; 269 void __iomem *base;
234 int irq; 270 int irq;
235 271
272 void (*dsi_mux_pads)(bool enable);
273
236 struct dsi_clock_info current_cinfo; 274 struct dsi_clock_info current_cinfo;
237 275
276 bool vdds_dsi_enabled;
238 struct regulator *vdds_dsi_reg; 277 struct regulator *vdds_dsi_reg;
239 278
240 struct { 279 struct {
@@ -258,8 +297,7 @@ static struct
258 struct dsi_update_region update_region; 297 struct dsi_update_region update_region;
259 298
260 bool te_enabled; 299 bool te_enabled;
261 300 bool ulps_enabled;
262 struct workqueue_struct *workqueue;
263 301
264 void (*framedone_callback)(int, void *); 302 void (*framedone_callback)(int, void *);
265 void *framedone_data; 303 void *framedone_data;
@@ -292,21 +330,63 @@ static struct
292 unsigned long regm_dispc_max, regm_dsi_max; 330 unsigned long regm_dispc_max, regm_dsi_max;
293 unsigned long fint_min, fint_max; 331 unsigned long fint_min, fint_max;
294 unsigned long lpdiv_max; 332 unsigned long lpdiv_max;
295} dsi; 333
334 int num_data_lanes;
335
336 unsigned scp_clk_refcount;
337};
338
339struct dsi_packet_sent_handler_data {
340 struct platform_device *dsidev;
341 struct completion *completion;
342};
343
344static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
296 345
297#ifdef DEBUG 346#ifdef DEBUG
298static unsigned int dsi_perf; 347static unsigned int dsi_perf;
299module_param_named(dsi_perf, dsi_perf, bool, 0644); 348module_param_named(dsi_perf, dsi_perf, bool, 0644);
300#endif 349#endif
301 350
302static inline void dsi_write_reg(const struct dsi_reg idx, u32 val) 351static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
303{ 352{
304 __raw_writel(val, dsi.base + idx.idx); 353 return dev_get_drvdata(&dsidev->dev);
305} 354}
306 355
307static inline u32 dsi_read_reg(const struct dsi_reg idx) 356static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
308{ 357{
309 return __raw_readl(dsi.base + idx.idx); 358 return dsi_pdev_map[dssdev->phy.dsi.module];
359}
360
361struct platform_device *dsi_get_dsidev_from_id(int module)
362{
363 return dsi_pdev_map[module];
364}
365
366static int dsi_get_dsidev_id(struct platform_device *dsidev)
367{
368 /* TEMP: Pass 0 as the dsi module index till the time the dsi platform
369 * device names aren't changed to the form "omapdss_dsi.0",
370 * "omapdss_dsi.1" and so on */
371 BUG_ON(dsidev->id != -1);
372
373 return 0;
374}
375
376static inline void dsi_write_reg(struct platform_device *dsidev,
377 const struct dsi_reg idx, u32 val)
378{
379 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
380
381 __raw_writel(val, dsi->base + idx.idx);
382}
383
384static inline u32 dsi_read_reg(struct platform_device *dsidev,
385 const struct dsi_reg idx)
386{
387 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
388
389 return __raw_readl(dsi->base + idx.idx);
310} 390}
311 391
312 392
@@ -318,21 +398,29 @@ void dsi_restore_context(void)
318{ 398{
319} 399}
320 400
321void dsi_bus_lock(void) 401void dsi_bus_lock(struct omap_dss_device *dssdev)
322{ 402{
323 down(&dsi.bus_lock); 403 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
404 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
405
406 down(&dsi->bus_lock);
324} 407}
325EXPORT_SYMBOL(dsi_bus_lock); 408EXPORT_SYMBOL(dsi_bus_lock);
326 409
327void dsi_bus_unlock(void) 410void dsi_bus_unlock(struct omap_dss_device *dssdev)
328{ 411{
329 up(&dsi.bus_lock); 412 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
413 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
414
415 up(&dsi->bus_lock);
330} 416}
331EXPORT_SYMBOL(dsi_bus_unlock); 417EXPORT_SYMBOL(dsi_bus_unlock);
332 418
333static bool dsi_bus_is_locked(void) 419static bool dsi_bus_is_locked(struct platform_device *dsidev)
334{ 420{
335 return dsi.bus_lock.count == 0; 421 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
422
423 return dsi->bus_lock.count == 0;
336} 424}
337 425
338static void dsi_completion_handler(void *data, u32 mask) 426static void dsi_completion_handler(void *data, u32 mask)
@@ -340,12 +428,12 @@ static void dsi_completion_handler(void *data, u32 mask)
340 complete((struct completion *)data); 428 complete((struct completion *)data);
341} 429}
342 430
343static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum, 431static inline int wait_for_bit_change(struct platform_device *dsidev,
344 int value) 432 const struct dsi_reg idx, int bitnum, int value)
345{ 433{
346 int t = 100000; 434 int t = 100000;
347 435
348 while (REG_GET(idx, bitnum, bitnum) != value) { 436 while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
349 if (--t == 0) 437 if (--t == 0)
350 return !value; 438 return !value;
351 } 439 }
@@ -354,18 +442,21 @@ static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
354} 442}
355 443
356#ifdef DEBUG 444#ifdef DEBUG
357static void dsi_perf_mark_setup(void) 445static void dsi_perf_mark_setup(struct platform_device *dsidev)
358{ 446{
359 dsi.perf_setup_time = ktime_get(); 447 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
448 dsi->perf_setup_time = ktime_get();
360} 449}
361 450
362static void dsi_perf_mark_start(void) 451static void dsi_perf_mark_start(struct platform_device *dsidev)
363{ 452{
364 dsi.perf_start_time = ktime_get(); 453 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
454 dsi->perf_start_time = ktime_get();
365} 455}
366 456
367static void dsi_perf_show(const char *name) 457static void dsi_perf_show(struct platform_device *dsidev, const char *name)
368{ 458{
459 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
369 ktime_t t, setup_time, trans_time; 460 ktime_t t, setup_time, trans_time;
370 u32 total_bytes; 461 u32 total_bytes;
371 u32 setup_us, trans_us, total_us; 462 u32 setup_us, trans_us, total_us;
@@ -375,21 +466,21 @@ static void dsi_perf_show(const char *name)
375 466
376 t = ktime_get(); 467 t = ktime_get();
377 468
378 setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time); 469 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
379 setup_us = (u32)ktime_to_us(setup_time); 470 setup_us = (u32)ktime_to_us(setup_time);
380 if (setup_us == 0) 471 if (setup_us == 0)
381 setup_us = 1; 472 setup_us = 1;
382 473
383 trans_time = ktime_sub(t, dsi.perf_start_time); 474 trans_time = ktime_sub(t, dsi->perf_start_time);
384 trans_us = (u32)ktime_to_us(trans_time); 475 trans_us = (u32)ktime_to_us(trans_time);
385 if (trans_us == 0) 476 if (trans_us == 0)
386 trans_us = 1; 477 trans_us = 1;
387 478
388 total_us = setup_us + trans_us; 479 total_us = setup_us + trans_us;
389 480
390 total_bytes = dsi.update_region.w * 481 total_bytes = dsi->update_region.w *
391 dsi.update_region.h * 482 dsi->update_region.h *
392 dsi.update_region.device->ctrl.pixel_size / 8; 483 dsi->update_region.device->ctrl.pixel_size / 8;
393 484
394 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), " 485 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
395 "%u bytes, %u kbytes/sec\n", 486 "%u bytes, %u kbytes/sec\n",
@@ -402,9 +493,9 @@ static void dsi_perf_show(const char *name)
402 total_bytes * 1000 / total_us); 493 total_bytes * 1000 / total_us);
403} 494}
404#else 495#else
405#define dsi_perf_mark_setup() 496#define dsi_perf_mark_setup(x)
406#define dsi_perf_mark_start() 497#define dsi_perf_mark_start(x)
407#define dsi_perf_show(x) 498#define dsi_perf_show(x, y)
408#endif 499#endif
409 500
410static void print_irq_status(u32 status) 501static void print_irq_status(u32 status)
@@ -510,38 +601,42 @@ static void print_irq_status_cio(u32 status)
510} 601}
511 602
512#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 603#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
513static void dsi_collect_irq_stats(u32 irqstatus, u32 *vcstatus, u32 ciostatus) 604static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
605 u32 *vcstatus, u32 ciostatus)
514{ 606{
607 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
515 int i; 608 int i;
516 609
517 spin_lock(&dsi.irq_stats_lock); 610 spin_lock(&dsi->irq_stats_lock);
518 611
519 dsi.irq_stats.irq_count++; 612 dsi->irq_stats.irq_count++;
520 dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs); 613 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
521 614
522 for (i = 0; i < 4; ++i) 615 for (i = 0; i < 4; ++i)
523 dss_collect_irq_stats(vcstatus[i], dsi.irq_stats.vc_irqs[i]); 616 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
524 617
525 dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs); 618 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
526 619
527 spin_unlock(&dsi.irq_stats_lock); 620 spin_unlock(&dsi->irq_stats_lock);
528} 621}
529#else 622#else
530#define dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus) 623#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
531#endif 624#endif
532 625
533static int debug_irq; 626static int debug_irq;
534 627
535static void dsi_handle_irq_errors(u32 irqstatus, u32 *vcstatus, u32 ciostatus) 628static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
629 u32 *vcstatus, u32 ciostatus)
536{ 630{
631 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
537 int i; 632 int i;
538 633
539 if (irqstatus & DSI_IRQ_ERROR_MASK) { 634 if (irqstatus & DSI_IRQ_ERROR_MASK) {
540 DSSERR("DSI error, irqstatus %x\n", irqstatus); 635 DSSERR("DSI error, irqstatus %x\n", irqstatus);
541 print_irq_status(irqstatus); 636 print_irq_status(irqstatus);
542 spin_lock(&dsi.errors_lock); 637 spin_lock(&dsi->errors_lock);
543 dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK; 638 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
544 spin_unlock(&dsi.errors_lock); 639 spin_unlock(&dsi->errors_lock);
545 } else if (debug_irq) { 640 } else if (debug_irq) {
546 print_irq_status(irqstatus); 641 print_irq_status(irqstatus);
547 } 642 }
@@ -602,22 +697,27 @@ static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
602 697
603static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) 698static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
604{ 699{
700 struct platform_device *dsidev;
701 struct dsi_data *dsi;
605 u32 irqstatus, vcstatus[4], ciostatus; 702 u32 irqstatus, vcstatus[4], ciostatus;
606 int i; 703 int i;
607 704
608 spin_lock(&dsi.irq_lock); 705 dsidev = (struct platform_device *) arg;
706 dsi = dsi_get_dsidrv_data(dsidev);
707
708 spin_lock(&dsi->irq_lock);
609 709
610 irqstatus = dsi_read_reg(DSI_IRQSTATUS); 710 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
611 711
612 /* IRQ is not for us */ 712 /* IRQ is not for us */
613 if (!irqstatus) { 713 if (!irqstatus) {
614 spin_unlock(&dsi.irq_lock); 714 spin_unlock(&dsi->irq_lock);
615 return IRQ_NONE; 715 return IRQ_NONE;
616 } 716 }
617 717
618 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); 718 dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
619 /* flush posted write */ 719 /* flush posted write */
620 dsi_read_reg(DSI_IRQSTATUS); 720 dsi_read_reg(dsidev, DSI_IRQSTATUS);
621 721
622 for (i = 0; i < 4; ++i) { 722 for (i = 0; i < 4; ++i) {
623 if ((irqstatus & (1 << i)) == 0) { 723 if ((irqstatus & (1 << i)) == 0) {
@@ -625,45 +725,47 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
625 continue; 725 continue;
626 } 726 }
627 727
628 vcstatus[i] = dsi_read_reg(DSI_VC_IRQSTATUS(i)); 728 vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
629 729
630 dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus[i]); 730 dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
631 /* flush posted write */ 731 /* flush posted write */
632 dsi_read_reg(DSI_VC_IRQSTATUS(i)); 732 dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
633 } 733 }
634 734
635 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { 735 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
636 ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); 736 ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
637 737
638 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus); 738 dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
639 /* flush posted write */ 739 /* flush posted write */
640 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); 740 dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
641 } else { 741 } else {
642 ciostatus = 0; 742 ciostatus = 0;
643 } 743 }
644 744
645#ifdef DSI_CATCH_MISSING_TE 745#ifdef DSI_CATCH_MISSING_TE
646 if (irqstatus & DSI_IRQ_TE_TRIGGER) 746 if (irqstatus & DSI_IRQ_TE_TRIGGER)
647 del_timer(&dsi.te_timer); 747 del_timer(&dsi->te_timer);
648#endif 748#endif
649 749
650 /* make a copy and unlock, so that isrs can unregister 750 /* make a copy and unlock, so that isrs can unregister
651 * themselves */ 751 * themselves */
652 memcpy(&dsi.isr_tables_copy, &dsi.isr_tables, sizeof(dsi.isr_tables)); 752 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
753 sizeof(dsi->isr_tables));
653 754
654 spin_unlock(&dsi.irq_lock); 755 spin_unlock(&dsi->irq_lock);
655 756
656 dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus); 757 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
657 758
658 dsi_handle_irq_errors(irqstatus, vcstatus, ciostatus); 759 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
659 760
660 dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus); 761 dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
661 762
662 return IRQ_HANDLED; 763 return IRQ_HANDLED;
663} 764}
664 765
665/* dsi.irq_lock has to be locked by the caller */ 766/* dsi->irq_lock has to be locked by the caller */
666static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array, 767static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
768 struct dsi_isr_data *isr_array,
667 unsigned isr_array_size, u32 default_mask, 769 unsigned isr_array_size, u32 default_mask,
668 const struct dsi_reg enable_reg, 770 const struct dsi_reg enable_reg,
669 const struct dsi_reg status_reg) 771 const struct dsi_reg status_reg)
@@ -684,61 +786,67 @@ static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array,
684 mask |= isr_data->mask; 786 mask |= isr_data->mask;
685 } 787 }
686 788
687 old_mask = dsi_read_reg(enable_reg); 789 old_mask = dsi_read_reg(dsidev, enable_reg);
688 /* clear the irqstatus for newly enabled irqs */ 790 /* clear the irqstatus for newly enabled irqs */
689 dsi_write_reg(status_reg, (mask ^ old_mask) & mask); 791 dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
690 dsi_write_reg(enable_reg, mask); 792 dsi_write_reg(dsidev, enable_reg, mask);
691 793
692 /* flush posted writes */ 794 /* flush posted writes */
693 dsi_read_reg(enable_reg); 795 dsi_read_reg(dsidev, enable_reg);
694 dsi_read_reg(status_reg); 796 dsi_read_reg(dsidev, status_reg);
695} 797}
696 798
697/* dsi.irq_lock has to be locked by the caller */ 799/* dsi->irq_lock has to be locked by the caller */
698static void _omap_dsi_set_irqs(void) 800static void _omap_dsi_set_irqs(struct platform_device *dsidev)
699{ 801{
802 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
700 u32 mask = DSI_IRQ_ERROR_MASK; 803 u32 mask = DSI_IRQ_ERROR_MASK;
701#ifdef DSI_CATCH_MISSING_TE 804#ifdef DSI_CATCH_MISSING_TE
702 mask |= DSI_IRQ_TE_TRIGGER; 805 mask |= DSI_IRQ_TE_TRIGGER;
703#endif 806#endif
704 _omap_dsi_configure_irqs(dsi.isr_tables.isr_table, 807 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
705 ARRAY_SIZE(dsi.isr_tables.isr_table), mask, 808 ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
706 DSI_IRQENABLE, DSI_IRQSTATUS); 809 DSI_IRQENABLE, DSI_IRQSTATUS);
707} 810}
708 811
709/* dsi.irq_lock has to be locked by the caller */ 812/* dsi->irq_lock has to be locked by the caller */
710static void _omap_dsi_set_irqs_vc(int vc) 813static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
711{ 814{
712 _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_vc[vc], 815 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
713 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]), 816
817 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
818 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
714 DSI_VC_IRQ_ERROR_MASK, 819 DSI_VC_IRQ_ERROR_MASK,
715 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); 820 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
716} 821}
717 822
718/* dsi.irq_lock has to be locked by the caller */ 823/* dsi->irq_lock has to be locked by the caller */
719static void _omap_dsi_set_irqs_cio(void) 824static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
720{ 825{
721 _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_cio, 826 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
722 ARRAY_SIZE(dsi.isr_tables.isr_table_cio), 827
828 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
829 ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
723 DSI_CIO_IRQ_ERROR_MASK, 830 DSI_CIO_IRQ_ERROR_MASK,
724 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); 831 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
725} 832}
726 833
727static void _dsi_initialize_irq(void) 834static void _dsi_initialize_irq(struct platform_device *dsidev)
728{ 835{
836 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
729 unsigned long flags; 837 unsigned long flags;
730 int vc; 838 int vc;
731 839
732 spin_lock_irqsave(&dsi.irq_lock, flags); 840 spin_lock_irqsave(&dsi->irq_lock, flags);
733 841
734 memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables)); 842 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
735 843
736 _omap_dsi_set_irqs(); 844 _omap_dsi_set_irqs(dsidev);
737 for (vc = 0; vc < 4; ++vc) 845 for (vc = 0; vc < 4; ++vc)
738 _omap_dsi_set_irqs_vc(vc); 846 _omap_dsi_set_irqs_vc(dsidev, vc);
739 _omap_dsi_set_irqs_cio(); 847 _omap_dsi_set_irqs_cio(dsidev);
740 848
741 spin_unlock_irqrestore(&dsi.irq_lock, flags); 849 spin_unlock_irqrestore(&dsi->irq_lock, flags);
742} 850}
743 851
744static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask, 852static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
@@ -797,126 +905,137 @@ static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
797 return -EINVAL; 905 return -EINVAL;
798} 906}
799 907
800static int dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask) 908static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
909 void *arg, u32 mask)
801{ 910{
911 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
802 unsigned long flags; 912 unsigned long flags;
803 int r; 913 int r;
804 914
805 spin_lock_irqsave(&dsi.irq_lock, flags); 915 spin_lock_irqsave(&dsi->irq_lock, flags);
806 916
807 r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table, 917 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
808 ARRAY_SIZE(dsi.isr_tables.isr_table)); 918 ARRAY_SIZE(dsi->isr_tables.isr_table));
809 919
810 if (r == 0) 920 if (r == 0)
811 _omap_dsi_set_irqs(); 921 _omap_dsi_set_irqs(dsidev);
812 922
813 spin_unlock_irqrestore(&dsi.irq_lock, flags); 923 spin_unlock_irqrestore(&dsi->irq_lock, flags);
814 924
815 return r; 925 return r;
816} 926}
817 927
818static int dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask) 928static int dsi_unregister_isr(struct platform_device *dsidev,
929 omap_dsi_isr_t isr, void *arg, u32 mask)
819{ 930{
931 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
820 unsigned long flags; 932 unsigned long flags;
821 int r; 933 int r;
822 934
823 spin_lock_irqsave(&dsi.irq_lock, flags); 935 spin_lock_irqsave(&dsi->irq_lock, flags);
824 936
825 r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table, 937 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
826 ARRAY_SIZE(dsi.isr_tables.isr_table)); 938 ARRAY_SIZE(dsi->isr_tables.isr_table));
827 939
828 if (r == 0) 940 if (r == 0)
829 _omap_dsi_set_irqs(); 941 _omap_dsi_set_irqs(dsidev);
830 942
831 spin_unlock_irqrestore(&dsi.irq_lock, flags); 943 spin_unlock_irqrestore(&dsi->irq_lock, flags);
832 944
833 return r; 945 return r;
834} 946}
835 947
836static int dsi_register_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, 948static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
837 u32 mask) 949 omap_dsi_isr_t isr, void *arg, u32 mask)
838{ 950{
951 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
839 unsigned long flags; 952 unsigned long flags;
840 int r; 953 int r;
841 954
842 spin_lock_irqsave(&dsi.irq_lock, flags); 955 spin_lock_irqsave(&dsi->irq_lock, flags);
843 956
844 r = _dsi_register_isr(isr, arg, mask, 957 r = _dsi_register_isr(isr, arg, mask,
845 dsi.isr_tables.isr_table_vc[channel], 958 dsi->isr_tables.isr_table_vc[channel],
846 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); 959 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
847 960
848 if (r == 0) 961 if (r == 0)
849 _omap_dsi_set_irqs_vc(channel); 962 _omap_dsi_set_irqs_vc(dsidev, channel);
850 963
851 spin_unlock_irqrestore(&dsi.irq_lock, flags); 964 spin_unlock_irqrestore(&dsi->irq_lock, flags);
852 965
853 return r; 966 return r;
854} 967}
855 968
856static int dsi_unregister_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, 969static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
857 u32 mask) 970 omap_dsi_isr_t isr, void *arg, u32 mask)
858{ 971{
972 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
859 unsigned long flags; 973 unsigned long flags;
860 int r; 974 int r;
861 975
862 spin_lock_irqsave(&dsi.irq_lock, flags); 976 spin_lock_irqsave(&dsi->irq_lock, flags);
863 977
864 r = _dsi_unregister_isr(isr, arg, mask, 978 r = _dsi_unregister_isr(isr, arg, mask,
865 dsi.isr_tables.isr_table_vc[channel], 979 dsi->isr_tables.isr_table_vc[channel],
866 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); 980 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
867 981
868 if (r == 0) 982 if (r == 0)
869 _omap_dsi_set_irqs_vc(channel); 983 _omap_dsi_set_irqs_vc(dsidev, channel);
870 984
871 spin_unlock_irqrestore(&dsi.irq_lock, flags); 985 spin_unlock_irqrestore(&dsi->irq_lock, flags);
872 986
873 return r; 987 return r;
874} 988}
875 989
876static int dsi_register_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) 990static int dsi_register_isr_cio(struct platform_device *dsidev,
991 omap_dsi_isr_t isr, void *arg, u32 mask)
877{ 992{
993 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
878 unsigned long flags; 994 unsigned long flags;
879 int r; 995 int r;
880 996
881 spin_lock_irqsave(&dsi.irq_lock, flags); 997 spin_lock_irqsave(&dsi->irq_lock, flags);
882 998
883 r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio, 999 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
884 ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); 1000 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
885 1001
886 if (r == 0) 1002 if (r == 0)
887 _omap_dsi_set_irqs_cio(); 1003 _omap_dsi_set_irqs_cio(dsidev);
888 1004
889 spin_unlock_irqrestore(&dsi.irq_lock, flags); 1005 spin_unlock_irqrestore(&dsi->irq_lock, flags);
890 1006
891 return r; 1007 return r;
892} 1008}
893 1009
894static int dsi_unregister_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) 1010static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1011 omap_dsi_isr_t isr, void *arg, u32 mask)
895{ 1012{
1013 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
896 unsigned long flags; 1014 unsigned long flags;
897 int r; 1015 int r;
898 1016
899 spin_lock_irqsave(&dsi.irq_lock, flags); 1017 spin_lock_irqsave(&dsi->irq_lock, flags);
900 1018
901 r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio, 1019 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
902 ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); 1020 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
903 1021
904 if (r == 0) 1022 if (r == 0)
905 _omap_dsi_set_irqs_cio(); 1023 _omap_dsi_set_irqs_cio(dsidev);
906 1024
907 spin_unlock_irqrestore(&dsi.irq_lock, flags); 1025 spin_unlock_irqrestore(&dsi->irq_lock, flags);
908 1026
909 return r; 1027 return r;
910} 1028}
911 1029
912static u32 dsi_get_errors(void) 1030static u32 dsi_get_errors(struct platform_device *dsidev)
913{ 1031{
1032 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
914 unsigned long flags; 1033 unsigned long flags;
915 u32 e; 1034 u32 e;
916 spin_lock_irqsave(&dsi.errors_lock, flags); 1035 spin_lock_irqsave(&dsi->errors_lock, flags);
917 e = dsi.errors; 1036 e = dsi->errors;
918 dsi.errors = 0; 1037 dsi->errors = 0;
919 spin_unlock_irqrestore(&dsi.errors_lock, flags); 1038 spin_unlock_irqrestore(&dsi->errors_lock, flags);
920 return e; 1039 return e;
921} 1040}
922 1041
@@ -930,23 +1049,27 @@ static inline void enable_clocks(bool enable)
930} 1049}
931 1050
932/* source clock for DSI PLL. this could also be PCLKFREE */ 1051/* source clock for DSI PLL. this could also be PCLKFREE */
933static inline void dsi_enable_pll_clock(bool enable) 1052static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1053 bool enable)
934{ 1054{
1055 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1056
935 if (enable) 1057 if (enable)
936 dss_clk_enable(DSS_CLK_SYSCK); 1058 dss_clk_enable(DSS_CLK_SYSCK);
937 else 1059 else
938 dss_clk_disable(DSS_CLK_SYSCK); 1060 dss_clk_disable(DSS_CLK_SYSCK);
939 1061
940 if (enable && dsi.pll_locked) { 1062 if (enable && dsi->pll_locked) {
941 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) 1063 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
942 DSSERR("cannot lock PLL when enabling clocks\n"); 1064 DSSERR("cannot lock PLL when enabling clocks\n");
943 } 1065 }
944} 1066}
945 1067
946#ifdef DEBUG 1068#ifdef DEBUG
947static void _dsi_print_reset_status(void) 1069static void _dsi_print_reset_status(struct platform_device *dsidev)
948{ 1070{
949 u32 l; 1071 u32 l;
1072 int b0, b1, b2;
950 1073
951 if (!dss_debug) 1074 if (!dss_debug)
952 return; 1075 return;
@@ -954,35 +1077,47 @@ static void _dsi_print_reset_status(void)
954 /* A dummy read using the SCP interface to any DSIPHY register is 1077 /* A dummy read using the SCP interface to any DSIPHY register is
955 * required after DSIPHY reset to complete the reset of the DSI complex 1078 * required after DSIPHY reset to complete the reset of the DSI complex
956 * I/O. */ 1079 * I/O. */
957 l = dsi_read_reg(DSI_DSIPHY_CFG5); 1080 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
958 1081
959 printk(KERN_DEBUG "DSI resets: "); 1082 printk(KERN_DEBUG "DSI resets: ");
960 1083
961 l = dsi_read_reg(DSI_PLL_STATUS); 1084 l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
962 printk("PLL (%d) ", FLD_GET(l, 0, 0)); 1085 printk("PLL (%d) ", FLD_GET(l, 0, 0));
963 1086
964 l = dsi_read_reg(DSI_COMPLEXIO_CFG1); 1087 l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
965 printk("CIO (%d) ", FLD_GET(l, 29, 29)); 1088 printk("CIO (%d) ", FLD_GET(l, 29, 29));
966 1089
967 l = dsi_read_reg(DSI_DSIPHY_CFG5); 1090 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
968 printk("PHY (%x, %d, %d, %d)\n", 1091 b0 = 28;
969 FLD_GET(l, 28, 26), 1092 b1 = 27;
1093 b2 = 26;
1094 } else {
1095 b0 = 24;
1096 b1 = 25;
1097 b2 = 26;
1098 }
1099
1100 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1101 printk("PHY (%x%x%x, %d, %d, %d)\n",
1102 FLD_GET(l, b0, b0),
1103 FLD_GET(l, b1, b1),
1104 FLD_GET(l, b2, b2),
970 FLD_GET(l, 29, 29), 1105 FLD_GET(l, 29, 29),
971 FLD_GET(l, 30, 30), 1106 FLD_GET(l, 30, 30),
972 FLD_GET(l, 31, 31)); 1107 FLD_GET(l, 31, 31));
973} 1108}
974#else 1109#else
975#define _dsi_print_reset_status() 1110#define _dsi_print_reset_status(x)
976#endif 1111#endif
977 1112
978static inline int dsi_if_enable(bool enable) 1113static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
979{ 1114{
980 DSSDBG("dsi_if_enable(%d)\n", enable); 1115 DSSDBG("dsi_if_enable(%d)\n", enable);
981 1116
982 enable = enable ? 1 : 0; 1117 enable = enable ? 1 : 0;
983 REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */ 1118 REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
984 1119
985 if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) { 1120 if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
986 DSSERR("Failed to set dsi_if_enable to %d\n", enable); 1121 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
987 return -EIO; 1122 return -EIO;
988 } 1123 }
@@ -990,31 +1125,38 @@ static inline int dsi_if_enable(bool enable)
990 return 0; 1125 return 0;
991} 1126}
992 1127
993unsigned long dsi_get_pll_hsdiv_dispc_rate(void) 1128unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
994{ 1129{
995 return dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk; 1130 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1131
1132 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
996} 1133}
997 1134
998static unsigned long dsi_get_pll_hsdiv_dsi_rate(void) 1135static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
999{ 1136{
1000 return dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk; 1137 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1138
1139 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1001} 1140}
1002 1141
1003static unsigned long dsi_get_txbyteclkhs(void) 1142static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1004{ 1143{
1005 return dsi.current_cinfo.clkin4ddr / 16; 1144 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1145
1146 return dsi->current_cinfo.clkin4ddr / 16;
1006} 1147}
1007 1148
1008static unsigned long dsi_fclk_rate(void) 1149static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1009{ 1150{
1010 unsigned long r; 1151 unsigned long r;
1152 int dsi_module = dsi_get_dsidev_id(dsidev);
1011 1153
1012 if (dss_get_dsi_clk_source() == DSS_CLK_SRC_FCK) { 1154 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
1013 /* DSI FCLK source is DSS_CLK_FCK */ 1155 /* DSI FCLK source is DSS_CLK_FCK */
1014 r = dss_clk_get_rate(DSS_CLK_FCK); 1156 r = dss_clk_get_rate(DSS_CLK_FCK);
1015 } else { 1157 } else {
1016 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ 1158 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1017 r = dsi_get_pll_hsdiv_dsi_rate(); 1159 r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1018 } 1160 }
1019 1161
1020 return r; 1162 return r;
@@ -1022,31 +1164,50 @@ static unsigned long dsi_fclk_rate(void)
1022 1164
1023static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev) 1165static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1024{ 1166{
1167 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1168 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1025 unsigned long dsi_fclk; 1169 unsigned long dsi_fclk;
1026 unsigned lp_clk_div; 1170 unsigned lp_clk_div;
1027 unsigned long lp_clk; 1171 unsigned long lp_clk;
1028 1172
1029 lp_clk_div = dssdev->phy.dsi.div.lp_clk_div; 1173 lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1030 1174
1031 if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max) 1175 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1032 return -EINVAL; 1176 return -EINVAL;
1033 1177
1034 dsi_fclk = dsi_fclk_rate(); 1178 dsi_fclk = dsi_fclk_rate(dsidev);
1035 1179
1036 lp_clk = dsi_fclk / 2 / lp_clk_div; 1180 lp_clk = dsi_fclk / 2 / lp_clk_div;
1037 1181
1038 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk); 1182 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1039 dsi.current_cinfo.lp_clk = lp_clk; 1183 dsi->current_cinfo.lp_clk = lp_clk;
1040 dsi.current_cinfo.lp_clk_div = lp_clk_div; 1184 dsi->current_cinfo.lp_clk_div = lp_clk_div;
1041 1185
1042 REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */ 1186 /* LP_CLK_DIVISOR */
1187 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1043 1188
1044 REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 1189 /* LP_RX_SYNCHRO_ENABLE */
1045 21, 21); /* LP_RX_SYNCHRO_ENABLE */ 1190 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1046 1191
1047 return 0; 1192 return 0;
1048} 1193}
1049 1194
1195static void dsi_enable_scp_clk(struct platform_device *dsidev)
1196{
1197 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1198
1199 if (dsi->scp_clk_refcount++ == 0)
1200 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1201}
1202
1203static void dsi_disable_scp_clk(struct platform_device *dsidev)
1204{
1205 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1206
1207 WARN_ON(dsi->scp_clk_refcount == 0);
1208 if (--dsi->scp_clk_refcount == 0)
1209 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1210}
1050 1211
1051enum dsi_pll_power_state { 1212enum dsi_pll_power_state {
1052 DSI_PLL_POWER_OFF = 0x0, 1213 DSI_PLL_POWER_OFF = 0x0,
@@ -1055,14 +1216,21 @@ enum dsi_pll_power_state {
1055 DSI_PLL_POWER_ON_DIV = 0x3, 1216 DSI_PLL_POWER_ON_DIV = 0x3,
1056}; 1217};
1057 1218
1058static int dsi_pll_power(enum dsi_pll_power_state state) 1219static int dsi_pll_power(struct platform_device *dsidev,
1220 enum dsi_pll_power_state state)
1059{ 1221{
1060 int t = 0; 1222 int t = 0;
1061 1223
1062 REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */ 1224 /* DSI-PLL power command 0x3 is not working */
1225 if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1226 state == DSI_PLL_POWER_ON_DIV)
1227 state = DSI_PLL_POWER_ON_ALL;
1228
1229 /* PLL_PWR_CMD */
1230 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1063 1231
1064 /* PLL_PWR_STATUS */ 1232 /* PLL_PWR_STATUS */
1065 while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) { 1233 while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1066 if (++t > 1000) { 1234 if (++t > 1000) {
1067 DSSERR("Failed to set DSI PLL power mode to %d\n", 1235 DSSERR("Failed to set DSI PLL power mode to %d\n",
1068 state); 1236 state);
@@ -1078,16 +1246,19 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
1078static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, 1246static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1079 struct dsi_clock_info *cinfo) 1247 struct dsi_clock_info *cinfo)
1080{ 1248{
1081 if (cinfo->regn == 0 || cinfo->regn > dsi.regn_max) 1249 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1250 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1251
1252 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1082 return -EINVAL; 1253 return -EINVAL;
1083 1254
1084 if (cinfo->regm == 0 || cinfo->regm > dsi.regm_max) 1255 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1085 return -EINVAL; 1256 return -EINVAL;
1086 1257
1087 if (cinfo->regm_dispc > dsi.regm_dispc_max) 1258 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1088 return -EINVAL; 1259 return -EINVAL;
1089 1260
1090 if (cinfo->regm_dsi > dsi.regm_dsi_max) 1261 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1091 return -EINVAL; 1262 return -EINVAL;
1092 1263
1093 if (cinfo->use_sys_clk) { 1264 if (cinfo->use_sys_clk) {
@@ -1106,7 +1277,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1106 1277
1107 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1)); 1278 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1108 1279
1109 if (cinfo->fint > dsi.fint_max || cinfo->fint < dsi.fint_min) 1280 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1110 return -EINVAL; 1281 return -EINVAL;
1111 1282
1112 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint; 1283 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
@@ -1129,10 +1300,11 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1129 return 0; 1300 return 0;
1130} 1301}
1131 1302
1132int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, 1303int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1133 struct dsi_clock_info *dsi_cinfo, 1304 unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1134 struct dispc_clock_info *dispc_cinfo) 1305 struct dispc_clock_info *dispc_cinfo)
1135{ 1306{
1307 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1136 struct dsi_clock_info cur, best; 1308 struct dsi_clock_info cur, best;
1137 struct dispc_clock_info best_dispc; 1309 struct dispc_clock_info best_dispc;
1138 int min_fck_per_pck; 1310 int min_fck_per_pck;
@@ -1143,10 +1315,10 @@ int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
1143 1315
1144 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); 1316 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1145 1317
1146 if (req_pck == dsi.cache_req_pck && 1318 if (req_pck == dsi->cache_req_pck &&
1147 dsi.cache_cinfo.clkin == dss_sys_clk) { 1319 dsi->cache_cinfo.clkin == dss_sys_clk) {
1148 DSSDBG("DSI clock info found from cache\n"); 1320 DSSDBG("DSI clock info found from cache\n");
1149 *dsi_cinfo = dsi.cache_cinfo; 1321 *dsi_cinfo = dsi->cache_cinfo;
1150 dispc_find_clk_divs(is_tft, req_pck, 1322 dispc_find_clk_divs(is_tft, req_pck,
1151 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo); 1323 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
1152 return 0; 1324 return 0;
@@ -1176,17 +1348,17 @@ retry:
1176 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ 1348 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
1177 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */ 1349 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1178 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ 1350 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1179 for (cur.regn = 1; cur.regn < dsi.regn_max; ++cur.regn) { 1351 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1180 if (cur.highfreq == 0) 1352 if (cur.highfreq == 0)
1181 cur.fint = cur.clkin / cur.regn; 1353 cur.fint = cur.clkin / cur.regn;
1182 else 1354 else
1183 cur.fint = cur.clkin / (2 * cur.regn); 1355 cur.fint = cur.clkin / (2 * cur.regn);
1184 1356
1185 if (cur.fint > dsi.fint_max || cur.fint < dsi.fint_min) 1357 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1186 continue; 1358 continue;
1187 1359
1188 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ 1360 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
1189 for (cur.regm = 1; cur.regm < dsi.regm_max; ++cur.regm) { 1361 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1190 unsigned long a, b; 1362 unsigned long a, b;
1191 1363
1192 a = 2 * cur.regm * (cur.clkin/1000); 1364 a = 2 * cur.regm * (cur.clkin/1000);
@@ -1198,8 +1370,8 @@ retry:
1198 1370
1199 /* dsi_pll_hsdiv_dispc_clk(MHz) = 1371 /* dsi_pll_hsdiv_dispc_clk(MHz) =
1200 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */ 1372 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
1201 for (cur.regm_dispc = 1; cur.regm_dispc < dsi.regm_dispc_max; 1373 for (cur.regm_dispc = 1; cur.regm_dispc <
1202 ++cur.regm_dispc) { 1374 dsi->regm_dispc_max; ++cur.regm_dispc) {
1203 struct dispc_clock_info cur_dispc; 1375 struct dispc_clock_info cur_dispc;
1204 cur.dsi_pll_hsdiv_dispc_clk = 1376 cur.dsi_pll_hsdiv_dispc_clk =
1205 cur.clkin4ddr / cur.regm_dispc; 1377 cur.clkin4ddr / cur.regm_dispc;
@@ -1259,34 +1431,39 @@ found:
1259 if (dispc_cinfo) 1431 if (dispc_cinfo)
1260 *dispc_cinfo = best_dispc; 1432 *dispc_cinfo = best_dispc;
1261 1433
1262 dsi.cache_req_pck = req_pck; 1434 dsi->cache_req_pck = req_pck;
1263 dsi.cache_clk_freq = 0; 1435 dsi->cache_clk_freq = 0;
1264 dsi.cache_cinfo = best; 1436 dsi->cache_cinfo = best;
1265 1437
1266 return 0; 1438 return 0;
1267} 1439}
1268 1440
1269int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) 1441int dsi_pll_set_clock_div(struct platform_device *dsidev,
1442 struct dsi_clock_info *cinfo)
1270{ 1443{
1444 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1271 int r = 0; 1445 int r = 0;
1272 u32 l; 1446 u32 l;
1273 int f; 1447 int f = 0;
1274 u8 regn_start, regn_end, regm_start, regm_end; 1448 u8 regn_start, regn_end, regm_start, regm_end;
1275 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end; 1449 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1276 1450
1277 DSSDBGF(); 1451 DSSDBGF();
1278 1452
1279 dsi.current_cinfo.fint = cinfo->fint; 1453 dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
1280 dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr; 1454 dsi->current_cinfo.highfreq = cinfo->highfreq;
1281 dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk = 1455
1456 dsi->current_cinfo.fint = cinfo->fint;
1457 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1458 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1282 cinfo->dsi_pll_hsdiv_dispc_clk; 1459 cinfo->dsi_pll_hsdiv_dispc_clk;
1283 dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk = 1460 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1284 cinfo->dsi_pll_hsdiv_dsi_clk; 1461 cinfo->dsi_pll_hsdiv_dsi_clk;
1285 1462
1286 dsi.current_cinfo.regn = cinfo->regn; 1463 dsi->current_cinfo.regn = cinfo->regn;
1287 dsi.current_cinfo.regm = cinfo->regm; 1464 dsi->current_cinfo.regm = cinfo->regm;
1288 dsi.current_cinfo.regm_dispc = cinfo->regm_dispc; 1465 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1289 dsi.current_cinfo.regm_dsi = cinfo->regm_dsi; 1466 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1290 1467
1291 DSSDBG("DSI Fint %ld\n", cinfo->fint); 1468 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1292 1469
@@ -1309,12 +1486,12 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1309 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4); 1486 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1310 1487
1311 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc, 1488 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1312 dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), 1489 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1313 dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), 1490 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1314 cinfo->dsi_pll_hsdiv_dispc_clk); 1491 cinfo->dsi_pll_hsdiv_dispc_clk);
1315 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi, 1492 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1316 dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), 1493 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1317 dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), 1494 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1318 cinfo->dsi_pll_hsdiv_dsi_clk); 1495 cinfo->dsi_pll_hsdiv_dsi_clk);
1319 1496
1320 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end); 1497 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
@@ -1324,9 +1501,10 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1324 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start, 1501 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
1325 &regm_dsi_end); 1502 &regm_dsi_end);
1326 1503
1327 REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */ 1504 /* DSI_PLL_AUTOMODE = manual */
1505 REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1328 1506
1329 l = dsi_read_reg(DSI_PLL_CONFIGURATION1); 1507 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1330 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */ 1508 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1331 /* DSI_PLL_REGN */ 1509 /* DSI_PLL_REGN */
1332 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end); 1510 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
@@ -1338,22 +1516,22 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1338 /* DSIPROTO_CLOCK_DIV */ 1516 /* DSIPROTO_CLOCK_DIV */
1339 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0, 1517 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1340 regm_dsi_start, regm_dsi_end); 1518 regm_dsi_start, regm_dsi_end);
1341 dsi_write_reg(DSI_PLL_CONFIGURATION1, l); 1519 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1342 1520
1343 BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max); 1521 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1344 if (cinfo->fint < 1000000) 1522
1345 f = 0x3; 1523 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1346 else if (cinfo->fint < 1250000) 1524 f = cinfo->fint < 1000000 ? 0x3 :
1347 f = 0x4; 1525 cinfo->fint < 1250000 ? 0x4 :
1348 else if (cinfo->fint < 1500000) 1526 cinfo->fint < 1500000 ? 0x5 :
1349 f = 0x5; 1527 cinfo->fint < 1750000 ? 0x6 :
1350 else if (cinfo->fint < 1750000) 1528 0x7;
1351 f = 0x6; 1529 }
1352 else 1530
1353 f = 0x7; 1531 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1354 1532
1355 l = dsi_read_reg(DSI_PLL_CONFIGURATION2); 1533 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1356 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ 1534 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1357 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1, 1535 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1358 11, 11); /* DSI_PLL_CLKSEL */ 1536 11, 11); /* DSI_PLL_CLKSEL */
1359 l = FLD_MOD(l, cinfo->highfreq, 1537 l = FLD_MOD(l, cinfo->highfreq,
@@ -1361,25 +1539,25 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1361 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ 1539 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1362 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ 1540 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1363 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ 1541 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1364 dsi_write_reg(DSI_PLL_CONFIGURATION2, l); 1542 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1365 1543
1366 REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */ 1544 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1367 1545
1368 if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) { 1546 if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1369 DSSERR("dsi pll go bit not going down.\n"); 1547 DSSERR("dsi pll go bit not going down.\n");
1370 r = -EIO; 1548 r = -EIO;
1371 goto err; 1549 goto err;
1372 } 1550 }
1373 1551
1374 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) { 1552 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1375 DSSERR("cannot lock PLL\n"); 1553 DSSERR("cannot lock PLL\n");
1376 r = -EIO; 1554 r = -EIO;
1377 goto err; 1555 goto err;
1378 } 1556 }
1379 1557
1380 dsi.pll_locked = 1; 1558 dsi->pll_locked = 1;
1381 1559
1382 l = dsi_read_reg(DSI_PLL_CONFIGURATION2); 1560 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1383 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */ 1561 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1384 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */ 1562 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1385 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */ 1563 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
@@ -1394,52 +1572,53 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1394 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */ 1572 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1395 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */ 1573 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1396 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */ 1574 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1397 dsi_write_reg(DSI_PLL_CONFIGURATION2, l); 1575 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1398 1576
1399 DSSDBG("PLL config done\n"); 1577 DSSDBG("PLL config done\n");
1400err: 1578err:
1401 return r; 1579 return r;
1402} 1580}
1403 1581
1404int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk, 1582int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1405 bool enable_hsdiv) 1583 bool enable_hsdiv)
1406{ 1584{
1585 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1407 int r = 0; 1586 int r = 0;
1408 enum dsi_pll_power_state pwstate; 1587 enum dsi_pll_power_state pwstate;
1409 1588
1410 DSSDBG("PLL init\n"); 1589 DSSDBG("PLL init\n");
1411 1590
1412#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 1591 if (dsi->vdds_dsi_reg == NULL) {
1413 /*
1414 * HACK: this is just a quick hack to get the USE_DSI_PLL
1415 * option working. USE_DSI_PLL is itself a big hack, and
1416 * should be removed.
1417 */
1418 if (dsi.vdds_dsi_reg == NULL) {
1419 struct regulator *vdds_dsi; 1592 struct regulator *vdds_dsi;
1420 1593
1421 vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi"); 1594 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1422 1595
1423 if (IS_ERR(vdds_dsi)) { 1596 if (IS_ERR(vdds_dsi)) {
1424 DSSERR("can't get VDDS_DSI regulator\n"); 1597 DSSERR("can't get VDDS_DSI regulator\n");
1425 return PTR_ERR(vdds_dsi); 1598 return PTR_ERR(vdds_dsi);
1426 } 1599 }
1427 1600
1428 dsi.vdds_dsi_reg = vdds_dsi; 1601 dsi->vdds_dsi_reg = vdds_dsi;
1429 } 1602 }
1430#endif
1431 1603
1432 enable_clocks(1); 1604 enable_clocks(1);
1433 dsi_enable_pll_clock(1); 1605 dsi_enable_pll_clock(dsidev, 1);
1606 /*
1607 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1608 */
1609 dsi_enable_scp_clk(dsidev);
1434 1610
1435 r = regulator_enable(dsi.vdds_dsi_reg); 1611 if (!dsi->vdds_dsi_enabled) {
1436 if (r) 1612 r = regulator_enable(dsi->vdds_dsi_reg);
1437 goto err0; 1613 if (r)
1614 goto err0;
1615 dsi->vdds_dsi_enabled = true;
1616 }
1438 1617
1439 /* XXX PLL does not come out of reset without this... */ 1618 /* XXX PLL does not come out of reset without this... */
1440 dispc_pck_free_enable(1); 1619 dispc_pck_free_enable(1);
1441 1620
1442 if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) { 1621 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1443 DSSERR("PLL not coming out of reset.\n"); 1622 DSSERR("PLL not coming out of reset.\n");
1444 r = -ENODEV; 1623 r = -ENODEV;
1445 dispc_pck_free_enable(0); 1624 dispc_pck_free_enable(0);
@@ -1459,7 +1638,7 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
1459 else 1638 else
1460 pwstate = DSI_PLL_POWER_OFF; 1639 pwstate = DSI_PLL_POWER_OFF;
1461 1640
1462 r = dsi_pll_power(pwstate); 1641 r = dsi_pll_power(dsidev, pwstate);
1463 1642
1464 if (r) 1643 if (r)
1465 goto err1; 1644 goto err1;
@@ -1468,42 +1647,53 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
1468 1647
1469 return 0; 1648 return 0;
1470err1: 1649err1:
1471 regulator_disable(dsi.vdds_dsi_reg); 1650 if (dsi->vdds_dsi_enabled) {
1651 regulator_disable(dsi->vdds_dsi_reg);
1652 dsi->vdds_dsi_enabled = false;
1653 }
1472err0: 1654err0:
1655 dsi_disable_scp_clk(dsidev);
1473 enable_clocks(0); 1656 enable_clocks(0);
1474 dsi_enable_pll_clock(0); 1657 dsi_enable_pll_clock(dsidev, 0);
1475 return r; 1658 return r;
1476} 1659}
1477 1660
1478void dsi_pll_uninit(void) 1661void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1479{ 1662{
1663 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1664
1665 dsi->pll_locked = 0;
1666 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1667 if (disconnect_lanes) {
1668 WARN_ON(!dsi->vdds_dsi_enabled);
1669 regulator_disable(dsi->vdds_dsi_reg);
1670 dsi->vdds_dsi_enabled = false;
1671 }
1672
1673 dsi_disable_scp_clk(dsidev);
1480 enable_clocks(0); 1674 enable_clocks(0);
1481 dsi_enable_pll_clock(0); 1675 dsi_enable_pll_clock(dsidev, 0);
1482 1676
1483 dsi.pll_locked = 0;
1484 dsi_pll_power(DSI_PLL_POWER_OFF);
1485 regulator_disable(dsi.vdds_dsi_reg);
1486 DSSDBG("PLL uninit done\n"); 1677 DSSDBG("PLL uninit done\n");
1487} 1678}
1488 1679
1489void dsi_dump_clocks(struct seq_file *s) 1680static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1681 struct seq_file *s)
1490{ 1682{
1491 int clksel; 1683 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1492 struct dsi_clock_info *cinfo = &dsi.current_cinfo; 1684 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1493 enum dss_clk_source dispc_clk_src, dsi_clk_src; 1685 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1686 int dsi_module = dsi_get_dsidev_id(dsidev);
1494 1687
1495 dispc_clk_src = dss_get_dispc_clk_source(); 1688 dispc_clk_src = dss_get_dispc_clk_source();
1496 dsi_clk_src = dss_get_dsi_clk_source(); 1689 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1497 1690
1498 enable_clocks(1); 1691 enable_clocks(1);
1499 1692
1500 clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11); 1693 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1501
1502 seq_printf(s, "- DSI PLL -\n");
1503 1694
1504 seq_printf(s, "dsi pll source = %s\n", 1695 seq_printf(s, "dsi pll source = %s\n",
1505 clksel == 0 ? 1696 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1506 "dss_sys_clk" : "pclkfree");
1507 1697
1508 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn); 1698 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1509 1699
@@ -1515,7 +1705,7 @@ void dsi_dump_clocks(struct seq_file *s)
1515 dss_feat_get_clk_source_name(dispc_clk_src), 1705 dss_feat_get_clk_source_name(dispc_clk_src),
1516 cinfo->dsi_pll_hsdiv_dispc_clk, 1706 cinfo->dsi_pll_hsdiv_dispc_clk,
1517 cinfo->regm_dispc, 1707 cinfo->regm_dispc,
1518 dispc_clk_src == DSS_CLK_SRC_FCK ? 1708 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1519 "off" : "on"); 1709 "off" : "on");
1520 1710
1521 seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n", 1711 seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
@@ -1523,45 +1713,55 @@ void dsi_dump_clocks(struct seq_file *s)
1523 dss_feat_get_clk_source_name(dsi_clk_src), 1713 dss_feat_get_clk_source_name(dsi_clk_src),
1524 cinfo->dsi_pll_hsdiv_dsi_clk, 1714 cinfo->dsi_pll_hsdiv_dsi_clk,
1525 cinfo->regm_dsi, 1715 cinfo->regm_dsi,
1526 dsi_clk_src == DSS_CLK_SRC_FCK ? 1716 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1527 "off" : "on"); 1717 "off" : "on");
1528 1718
1529 seq_printf(s, "- DSI -\n"); 1719 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1530 1720
1531 seq_printf(s, "dsi fclk source = %s (%s)\n", 1721 seq_printf(s, "dsi fclk source = %s (%s)\n",
1532 dss_get_generic_clk_source_name(dsi_clk_src), 1722 dss_get_generic_clk_source_name(dsi_clk_src),
1533 dss_feat_get_clk_source_name(dsi_clk_src)); 1723 dss_feat_get_clk_source_name(dsi_clk_src));
1534 1724
1535 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate()); 1725 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1536 1726
1537 seq_printf(s, "DDR_CLK\t\t%lu\n", 1727 seq_printf(s, "DDR_CLK\t\t%lu\n",
1538 cinfo->clkin4ddr / 4); 1728 cinfo->clkin4ddr / 4);
1539 1729
1540 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs()); 1730 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1541 1731
1542 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); 1732 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1543 1733
1544 seq_printf(s, "VP_CLK\t\t%lu\n"
1545 "VP_PCLK\t\t%lu\n",
1546 dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD),
1547 dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD));
1548
1549 enable_clocks(0); 1734 enable_clocks(0);
1550} 1735}
1551 1736
1737void dsi_dump_clocks(struct seq_file *s)
1738{
1739 struct platform_device *dsidev;
1740 int i;
1741
1742 for (i = 0; i < MAX_NUM_DSI; i++) {
1743 dsidev = dsi_get_dsidev_from_id(i);
1744 if (dsidev)
1745 dsi_dump_dsidev_clocks(dsidev, s);
1746 }
1747}
1748
1552#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 1749#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1553void dsi_dump_irqs(struct seq_file *s) 1750static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1751 struct seq_file *s)
1554{ 1752{
1753 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1555 unsigned long flags; 1754 unsigned long flags;
1556 struct dsi_irq_stats stats; 1755 struct dsi_irq_stats stats;
1756 int dsi_module = dsi_get_dsidev_id(dsidev);
1557 1757
1558 spin_lock_irqsave(&dsi.irq_stats_lock, flags); 1758 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1559 1759
1560 stats = dsi.irq_stats; 1760 stats = dsi->irq_stats;
1561 memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats)); 1761 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1562 dsi.irq_stats.last_reset = jiffies; 1762 dsi->irq_stats.last_reset = jiffies;
1563 1763
1564 spin_unlock_irqrestore(&dsi.irq_stats_lock, flags); 1764 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1565 1765
1566 seq_printf(s, "period %u ms\n", 1766 seq_printf(s, "period %u ms\n",
1567 jiffies_to_msecs(jiffies - stats.last_reset)); 1767 jiffies_to_msecs(jiffies - stats.last_reset));
@@ -1570,7 +1770,7 @@ void dsi_dump_irqs(struct seq_file *s)
1570#define PIS(x) \ 1770#define PIS(x) \
1571 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); 1771 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1572 1772
1573 seq_printf(s, "-- DSI interrupts --\n"); 1773 seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
1574 PIS(VC0); 1774 PIS(VC0);
1575 PIS(VC1); 1775 PIS(VC1);
1576 PIS(VC2); 1776 PIS(VC2);
@@ -1636,13 +1836,45 @@ void dsi_dump_irqs(struct seq_file *s)
1636 PIS(ULPSACTIVENOT_ALL1); 1836 PIS(ULPSACTIVENOT_ALL1);
1637#undef PIS 1837#undef PIS
1638} 1838}
1839
1840static void dsi1_dump_irqs(struct seq_file *s)
1841{
1842 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1843
1844 dsi_dump_dsidev_irqs(dsidev, s);
1845}
1846
1847static void dsi2_dump_irqs(struct seq_file *s)
1848{
1849 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1850
1851 dsi_dump_dsidev_irqs(dsidev, s);
1852}
1853
1854void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1855 const struct file_operations *debug_fops)
1856{
1857 struct platform_device *dsidev;
1858
1859 dsidev = dsi_get_dsidev_from_id(0);
1860 if (dsidev)
1861 debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1862 &dsi1_dump_irqs, debug_fops);
1863
1864 dsidev = dsi_get_dsidev_from_id(1);
1865 if (dsidev)
1866 debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1867 &dsi2_dump_irqs, debug_fops);
1868}
1639#endif 1869#endif
1640 1870
1641void dsi_dump_regs(struct seq_file *s) 1871static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1872 struct seq_file *s)
1642{ 1873{
1643#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r)) 1874#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1644 1875
1645 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 1876 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1877 dsi_enable_scp_clk(dsidev);
1646 1878
1647 DUMPREG(DSI_REVISION); 1879 DUMPREG(DSI_REVISION);
1648 DUMPREG(DSI_SYSCONFIG); 1880 DUMPREG(DSI_SYSCONFIG);
@@ -1714,25 +1946,57 @@ void dsi_dump_regs(struct seq_file *s)
1714 DUMPREG(DSI_PLL_CONFIGURATION1); 1946 DUMPREG(DSI_PLL_CONFIGURATION1);
1715 DUMPREG(DSI_PLL_CONFIGURATION2); 1947 DUMPREG(DSI_PLL_CONFIGURATION2);
1716 1948
1949 dsi_disable_scp_clk(dsidev);
1717 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 1950 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1718#undef DUMPREG 1951#undef DUMPREG
1719} 1952}
1720 1953
1721enum dsi_complexio_power_state { 1954static void dsi1_dump_regs(struct seq_file *s)
1955{
1956 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1957
1958 dsi_dump_dsidev_regs(dsidev, s);
1959}
1960
1961static void dsi2_dump_regs(struct seq_file *s)
1962{
1963 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1964
1965 dsi_dump_dsidev_regs(dsidev, s);
1966}
1967
1968void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
1969 const struct file_operations *debug_fops)
1970{
1971 struct platform_device *dsidev;
1972
1973 dsidev = dsi_get_dsidev_from_id(0);
1974 if (dsidev)
1975 debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
1976 &dsi1_dump_regs, debug_fops);
1977
1978 dsidev = dsi_get_dsidev_from_id(1);
1979 if (dsidev)
1980 debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
1981 &dsi2_dump_regs, debug_fops);
1982}
1983enum dsi_cio_power_state {
1722 DSI_COMPLEXIO_POWER_OFF = 0x0, 1984 DSI_COMPLEXIO_POWER_OFF = 0x0,
1723 DSI_COMPLEXIO_POWER_ON = 0x1, 1985 DSI_COMPLEXIO_POWER_ON = 0x1,
1724 DSI_COMPLEXIO_POWER_ULPS = 0x2, 1986 DSI_COMPLEXIO_POWER_ULPS = 0x2,
1725}; 1987};
1726 1988
1727static int dsi_complexio_power(enum dsi_complexio_power_state state) 1989static int dsi_cio_power(struct platform_device *dsidev,
1990 enum dsi_cio_power_state state)
1728{ 1991{
1729 int t = 0; 1992 int t = 0;
1730 1993
1731 /* PWR_CMD */ 1994 /* PWR_CMD */
1732 REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27); 1995 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
1733 1996
1734 /* PWR_STATUS */ 1997 /* PWR_STATUS */
1735 while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) { 1998 while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
1999 26, 25) != state) {
1736 if (++t > 1000) { 2000 if (++t > 1000) {
1737 DSSERR("failed to set complexio power state to " 2001 DSSERR("failed to set complexio power state to "
1738 "%d\n", state); 2002 "%d\n", state);
@@ -1744,9 +2008,70 @@ static int dsi_complexio_power(enum dsi_complexio_power_state state)
1744 return 0; 2008 return 0;
1745} 2009}
1746 2010
1747static void dsi_complexio_config(struct omap_dss_device *dssdev) 2011/* Number of data lanes present on DSI interface */
2012static inline int dsi_get_num_data_lanes(struct platform_device *dsidev)
1748{ 2013{
2014 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
2015 * of data lanes as 2 by default */
2016 if (dss_has_feature(FEAT_DSI_GNQ))
2017 return REG_GET(dsidev, DSI_GNQ, 11, 9); /* NB_DATA_LANES */
2018 else
2019 return 2;
2020}
2021
2022/* Number of data lanes used by the dss device */
2023static inline int dsi_get_num_data_lanes_dssdev(struct omap_dss_device *dssdev)
2024{
2025 int num_data_lanes = 0;
2026
2027 if (dssdev->phy.dsi.data1_lane != 0)
2028 num_data_lanes++;
2029 if (dssdev->phy.dsi.data2_lane != 0)
2030 num_data_lanes++;
2031 if (dssdev->phy.dsi.data3_lane != 0)
2032 num_data_lanes++;
2033 if (dssdev->phy.dsi.data4_lane != 0)
2034 num_data_lanes++;
2035
2036 return num_data_lanes;
2037}
2038
2039static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2040{
2041 int val;
2042
2043 /* line buffer on OMAP3 is 1024 x 24bits */
2044 /* XXX: for some reason using full buffer size causes
2045 * considerable TX slowdown with update sizes that fill the
2046 * whole buffer */
2047 if (!dss_has_feature(FEAT_DSI_GNQ))
2048 return 1023 * 3;
2049
2050 val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
2051
2052 switch (val) {
2053 case 1:
2054 return 512 * 3; /* 512x24 bits */
2055 case 2:
2056 return 682 * 3; /* 682x24 bits */
2057 case 3:
2058 return 853 * 3; /* 853x24 bits */
2059 case 4:
2060 return 1024 * 3; /* 1024x24 bits */
2061 case 5:
2062 return 1194 * 3; /* 1194x24 bits */
2063 case 6:
2064 return 1365 * 3; /* 1365x24 bits */
2065 default:
2066 BUG();
2067 }
2068}
2069
2070static void dsi_set_lane_config(struct omap_dss_device *dssdev)
2071{
2072 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1749 u32 r; 2073 u32 r;
2074 int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
1750 2075
1751 int clk_lane = dssdev->phy.dsi.clk_lane; 2076 int clk_lane = dssdev->phy.dsi.clk_lane;
1752 int data1_lane = dssdev->phy.dsi.data1_lane; 2077 int data1_lane = dssdev->phy.dsi.data1_lane;
@@ -1755,14 +2080,28 @@ static void dsi_complexio_config(struct omap_dss_device *dssdev)
1755 int data1_pol = dssdev->phy.dsi.data1_pol; 2080 int data1_pol = dssdev->phy.dsi.data1_pol;
1756 int data2_pol = dssdev->phy.dsi.data2_pol; 2081 int data2_pol = dssdev->phy.dsi.data2_pol;
1757 2082
1758 r = dsi_read_reg(DSI_COMPLEXIO_CFG1); 2083 r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1759 r = FLD_MOD(r, clk_lane, 2, 0); 2084 r = FLD_MOD(r, clk_lane, 2, 0);
1760 r = FLD_MOD(r, clk_pol, 3, 3); 2085 r = FLD_MOD(r, clk_pol, 3, 3);
1761 r = FLD_MOD(r, data1_lane, 6, 4); 2086 r = FLD_MOD(r, data1_lane, 6, 4);
1762 r = FLD_MOD(r, data1_pol, 7, 7); 2087 r = FLD_MOD(r, data1_pol, 7, 7);
1763 r = FLD_MOD(r, data2_lane, 10, 8); 2088 r = FLD_MOD(r, data2_lane, 10, 8);
1764 r = FLD_MOD(r, data2_pol, 11, 11); 2089 r = FLD_MOD(r, data2_pol, 11, 11);
1765 dsi_write_reg(DSI_COMPLEXIO_CFG1, r); 2090 if (num_data_lanes_dssdev > 2) {
2091 int data3_lane = dssdev->phy.dsi.data3_lane;
2092 int data3_pol = dssdev->phy.dsi.data3_pol;
2093
2094 r = FLD_MOD(r, data3_lane, 14, 12);
2095 r = FLD_MOD(r, data3_pol, 15, 15);
2096 }
2097 if (num_data_lanes_dssdev > 3) {
2098 int data4_lane = dssdev->phy.dsi.data4_lane;
2099 int data4_pol = dssdev->phy.dsi.data4_pol;
2100
2101 r = FLD_MOD(r, data4_lane, 18, 16);
2102 r = FLD_MOD(r, data4_pol, 19, 19);
2103 }
2104 dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
1766 2105
1767 /* The configuration of the DSI complex I/O (number of data lanes, 2106 /* The configuration of the DSI complex I/O (number of data lanes,
1768 position, differential order) should not be changed while 2107 position, differential order) should not be changed while
@@ -1776,27 +2115,31 @@ static void dsi_complexio_config(struct omap_dss_device *dssdev)
1776 DSI complex I/O configuration is unknown. */ 2115 DSI complex I/O configuration is unknown. */
1777 2116
1778 /* 2117 /*
1779 REG_FLD_MOD(DSI_CTRL, 1, 0, 0); 2118 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
1780 REG_FLD_MOD(DSI_CTRL, 0, 0, 0); 2119 REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0);
1781 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); 2120 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20);
1782 REG_FLD_MOD(DSI_CTRL, 1, 0, 0); 2121 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
1783 */ 2122 */
1784} 2123}
1785 2124
1786static inline unsigned ns2ddr(unsigned ns) 2125static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
1787{ 2126{
2127 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2128
1788 /* convert time in ns to ddr ticks, rounding up */ 2129 /* convert time in ns to ddr ticks, rounding up */
1789 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; 2130 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
1790 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; 2131 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
1791} 2132}
1792 2133
1793static inline unsigned ddr2ns(unsigned ddr) 2134static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
1794{ 2135{
1795 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; 2136 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2137
2138 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
1796 return ddr * 1000 * 1000 / (ddr_clk / 1000); 2139 return ddr * 1000 * 1000 / (ddr_clk / 1000);
1797} 2140}
1798 2141
1799static void dsi_complexio_timings(void) 2142static void dsi_cio_timings(struct platform_device *dsidev)
1800{ 2143{
1801 u32 r; 2144 u32 r;
1802 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; 2145 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
@@ -1808,139 +2151,323 @@ static void dsi_complexio_timings(void)
1808 /* 1 * DDR_CLK = 2 * UI */ 2151 /* 1 * DDR_CLK = 2 * UI */
1809 2152
1810 /* min 40ns + 4*UI max 85ns + 6*UI */ 2153 /* min 40ns + 4*UI max 85ns + 6*UI */
1811 ths_prepare = ns2ddr(70) + 2; 2154 ths_prepare = ns2ddr(dsidev, 70) + 2;
1812 2155
1813 /* min 145ns + 10*UI */ 2156 /* min 145ns + 10*UI */
1814 ths_prepare_ths_zero = ns2ddr(175) + 2; 2157 ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
1815 2158
1816 /* min max(8*UI, 60ns+4*UI) */ 2159 /* min max(8*UI, 60ns+4*UI) */
1817 ths_trail = ns2ddr(60) + 5; 2160 ths_trail = ns2ddr(dsidev, 60) + 5;
1818 2161
1819 /* min 100ns */ 2162 /* min 100ns */
1820 ths_exit = ns2ddr(145); 2163 ths_exit = ns2ddr(dsidev, 145);
1821 2164
1822 /* tlpx min 50n */ 2165 /* tlpx min 50n */
1823 tlpx_half = ns2ddr(25); 2166 tlpx_half = ns2ddr(dsidev, 25);
1824 2167
1825 /* min 60ns */ 2168 /* min 60ns */
1826 tclk_trail = ns2ddr(60) + 2; 2169 tclk_trail = ns2ddr(dsidev, 60) + 2;
1827 2170
1828 /* min 38ns, max 95ns */ 2171 /* min 38ns, max 95ns */
1829 tclk_prepare = ns2ddr(65); 2172 tclk_prepare = ns2ddr(dsidev, 65);
1830 2173
1831 /* min tclk-prepare + tclk-zero = 300ns */ 2174 /* min tclk-prepare + tclk-zero = 300ns */
1832 tclk_zero = ns2ddr(260); 2175 tclk_zero = ns2ddr(dsidev, 260);
1833 2176
1834 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", 2177 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
1835 ths_prepare, ddr2ns(ths_prepare), 2178 ths_prepare, ddr2ns(dsidev, ths_prepare),
1836 ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero)); 2179 ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
1837 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", 2180 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
1838 ths_trail, ddr2ns(ths_trail), 2181 ths_trail, ddr2ns(dsidev, ths_trail),
1839 ths_exit, ddr2ns(ths_exit)); 2182 ths_exit, ddr2ns(dsidev, ths_exit));
1840 2183
1841 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " 2184 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
1842 "tclk_zero %u (%uns)\n", 2185 "tclk_zero %u (%uns)\n",
1843 tlpx_half, ddr2ns(tlpx_half), 2186 tlpx_half, ddr2ns(dsidev, tlpx_half),
1844 tclk_trail, ddr2ns(tclk_trail), 2187 tclk_trail, ddr2ns(dsidev, tclk_trail),
1845 tclk_zero, ddr2ns(tclk_zero)); 2188 tclk_zero, ddr2ns(dsidev, tclk_zero));
1846 DSSDBG("tclk_prepare %u (%uns)\n", 2189 DSSDBG("tclk_prepare %u (%uns)\n",
1847 tclk_prepare, ddr2ns(tclk_prepare)); 2190 tclk_prepare, ddr2ns(dsidev, tclk_prepare));
1848 2191
1849 /* program timings */ 2192 /* program timings */
1850 2193
1851 r = dsi_read_reg(DSI_DSIPHY_CFG0); 2194 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
1852 r = FLD_MOD(r, ths_prepare, 31, 24); 2195 r = FLD_MOD(r, ths_prepare, 31, 24);
1853 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); 2196 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
1854 r = FLD_MOD(r, ths_trail, 15, 8); 2197 r = FLD_MOD(r, ths_trail, 15, 8);
1855 r = FLD_MOD(r, ths_exit, 7, 0); 2198 r = FLD_MOD(r, ths_exit, 7, 0);
1856 dsi_write_reg(DSI_DSIPHY_CFG0, r); 2199 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
1857 2200
1858 r = dsi_read_reg(DSI_DSIPHY_CFG1); 2201 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
1859 r = FLD_MOD(r, tlpx_half, 22, 16); 2202 r = FLD_MOD(r, tlpx_half, 22, 16);
1860 r = FLD_MOD(r, tclk_trail, 15, 8); 2203 r = FLD_MOD(r, tclk_trail, 15, 8);
1861 r = FLD_MOD(r, tclk_zero, 7, 0); 2204 r = FLD_MOD(r, tclk_zero, 7, 0);
1862 dsi_write_reg(DSI_DSIPHY_CFG1, r); 2205 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
1863 2206
1864 r = dsi_read_reg(DSI_DSIPHY_CFG2); 2207 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
1865 r = FLD_MOD(r, tclk_prepare, 7, 0); 2208 r = FLD_MOD(r, tclk_prepare, 7, 0);
1866 dsi_write_reg(DSI_DSIPHY_CFG2, r); 2209 dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
1867} 2210}
1868 2211
2212static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev,
2213 enum dsi_lane lanes)
2214{
2215 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2216 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2217 int clk_lane = dssdev->phy.dsi.clk_lane;
2218 int data1_lane = dssdev->phy.dsi.data1_lane;
2219 int data2_lane = dssdev->phy.dsi.data2_lane;
2220 int data3_lane = dssdev->phy.dsi.data3_lane;
2221 int data4_lane = dssdev->phy.dsi.data4_lane;
2222 int clk_pol = dssdev->phy.dsi.clk_pol;
2223 int data1_pol = dssdev->phy.dsi.data1_pol;
2224 int data2_pol = dssdev->phy.dsi.data2_pol;
2225 int data3_pol = dssdev->phy.dsi.data3_pol;
2226 int data4_pol = dssdev->phy.dsi.data4_pol;
2227
2228 u32 l = 0;
2229 u8 lptxscp_start = dsi->num_data_lanes == 2 ? 22 : 26;
2230
2231 if (lanes & DSI_CLK_P)
2232 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1));
2233 if (lanes & DSI_CLK_N)
2234 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0));
2235
2236 if (lanes & DSI_DATA1_P)
2237 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1));
2238 if (lanes & DSI_DATA1_N)
2239 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0));
2240
2241 if (lanes & DSI_DATA2_P)
2242 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1));
2243 if (lanes & DSI_DATA2_N)
2244 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0));
2245
2246 if (lanes & DSI_DATA3_P)
2247 l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 0 : 1));
2248 if (lanes & DSI_DATA3_N)
2249 l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 1 : 0));
2250
2251 if (lanes & DSI_DATA4_P)
2252 l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 0 : 1));
2253 if (lanes & DSI_DATA4_N)
2254 l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 1 : 0));
2255 /*
2256 * Bits in REGLPTXSCPDAT4TO0DXDY:
2257 * 17: DY0 18: DX0
2258 * 19: DY1 20: DX1
2259 * 21: DY2 22: DX2
2260 * 23: DY3 24: DX3
2261 * 25: DY4 26: DX4
2262 */
2263
2264 /* Set the lane override configuration */
2265
2266 /* REGLPTXSCPDAT4TO0DXDY */
2267 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
1869 2268
1870static int dsi_complexio_init(struct omap_dss_device *dssdev) 2269 /* Enable lane override */
2270
2271 /* ENLPTXSCPDAT */
2272 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2273}
2274
2275static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
1871{ 2276{
1872 int r = 0; 2277 /* Disable lane override */
2278 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2279 /* Reset the lane override configuration */
2280 /* REGLPTXSCPDAT4TO0DXDY */
2281 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2282}
2283
2284static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
2285{
2286 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2287 int t;
2288 int bits[3];
2289 bool in_use[3];
2290
2291 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
2292 bits[0] = 28;
2293 bits[1] = 27;
2294 bits[2] = 26;
2295 } else {
2296 bits[0] = 24;
2297 bits[1] = 25;
2298 bits[2] = 26;
2299 }
2300
2301 in_use[0] = false;
2302 in_use[1] = false;
2303 in_use[2] = false;
2304
2305 if (dssdev->phy.dsi.clk_lane != 0)
2306 in_use[dssdev->phy.dsi.clk_lane - 1] = true;
2307 if (dssdev->phy.dsi.data1_lane != 0)
2308 in_use[dssdev->phy.dsi.data1_lane - 1] = true;
2309 if (dssdev->phy.dsi.data2_lane != 0)
2310 in_use[dssdev->phy.dsi.data2_lane - 1] = true;
2311
2312 t = 100000;
2313 while (true) {
2314 u32 l;
2315 int i;
2316 int ok;
2317
2318 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2319
2320 ok = 0;
2321 for (i = 0; i < 3; ++i) {
2322 if (!in_use[i] || (l & (1 << bits[i])))
2323 ok++;
2324 }
2325
2326 if (ok == 3)
2327 break;
2328
2329 if (--t == 0) {
2330 for (i = 0; i < 3; ++i) {
2331 if (!in_use[i] || (l & (1 << bits[i])))
2332 continue;
2333
2334 DSSERR("CIO TXCLKESC%d domain not coming " \
2335 "out of reset\n", i);
2336 }
2337 return -EIO;
2338 }
2339 }
2340
2341 return 0;
2342}
2343
2344static int dsi_cio_init(struct omap_dss_device *dssdev)
2345{
2346 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2347 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2348 int r;
2349 int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
2350 u32 l;
1873 2351
1874 DSSDBG("dsi_complexio_init\n"); 2352 DSSDBGF();
1875 2353
1876 /* CIO_CLK_ICG, enable L3 clk to CIO */ 2354 if (dsi->dsi_mux_pads)
1877 REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14); 2355 dsi->dsi_mux_pads(true);
2356
2357 dsi_enable_scp_clk(dsidev);
1878 2358
1879 /* A dummy read using the SCP interface to any DSIPHY register is 2359 /* A dummy read using the SCP interface to any DSIPHY register is
1880 * required after DSIPHY reset to complete the reset of the DSI complex 2360 * required after DSIPHY reset to complete the reset of the DSI complex
1881 * I/O. */ 2361 * I/O. */
1882 dsi_read_reg(DSI_DSIPHY_CFG5); 2362 dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1883 2363
1884 if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) { 2364 if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
1885 DSSERR("ComplexIO PHY not coming out of reset.\n"); 2365 DSSERR("CIO SCP Clock domain not coming out of reset.\n");
1886 r = -ENODEV; 2366 r = -EIO;
1887 goto err; 2367 goto err_scp_clk_dom;
1888 } 2368 }
1889 2369
1890 dsi_complexio_config(dssdev); 2370 dsi_set_lane_config(dssdev);
2371
2372 /* set TX STOP MODE timer to maximum for this operation */
2373 l = dsi_read_reg(dsidev, DSI_TIMING1);
2374 l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2375 l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
2376 l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
2377 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2378 dsi_write_reg(dsidev, DSI_TIMING1, l);
1891 2379
1892 r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON); 2380 if (dsi->ulps_enabled) {
2381 u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P;
1893 2382
2383 DSSDBG("manual ulps exit\n");
2384
2385 /* ULPS is exited by Mark-1 state for 1ms, followed by
2386 * stop state. DSS HW cannot do this via the normal
2387 * ULPS exit sequence, as after reset the DSS HW thinks
2388 * that we are not in ULPS mode, and refuses to send the
2389 * sequence. So we need to send the ULPS exit sequence
2390 * manually.
2391 */
2392
2393 if (num_data_lanes_dssdev > 2)
2394 lane_mask |= DSI_DATA3_P;
2395
2396 if (num_data_lanes_dssdev > 3)
2397 lane_mask |= DSI_DATA4_P;
2398
2399 dsi_cio_enable_lane_override(dssdev, lane_mask);
2400 }
2401
2402 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
1894 if (r) 2403 if (r)
1895 goto err; 2404 goto err_cio_pwr;
1896 2405
1897 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) { 2406 if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
1898 DSSERR("ComplexIO not coming out of reset.\n"); 2407 DSSERR("CIO PWR clock domain not coming out of reset.\n");
1899 r = -ENODEV; 2408 r = -ENODEV;
1900 goto err; 2409 goto err_cio_pwr_dom;
1901 } 2410 }
1902 2411
1903 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) { 2412 dsi_if_enable(dsidev, true);
1904 DSSERR("ComplexIO LDO power down.\n"); 2413 dsi_if_enable(dsidev, false);
1905 r = -ENODEV; 2414 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
1906 goto err; 2415
2416 r = dsi_cio_wait_tx_clk_esc_reset(dssdev);
2417 if (r)
2418 goto err_tx_clk_esc_rst;
2419
2420 if (dsi->ulps_enabled) {
2421 /* Keep Mark-1 state for 1ms (as per DSI spec) */
2422 ktime_t wait = ns_to_ktime(1000 * 1000);
2423 set_current_state(TASK_UNINTERRUPTIBLE);
2424 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2425
2426 /* Disable the override. The lanes should be set to Mark-11
2427 * state by the HW */
2428 dsi_cio_disable_lane_override(dsidev);
1907 } 2429 }
1908 2430
1909 dsi_complexio_timings(); 2431 /* FORCE_TX_STOP_MODE_IO */
2432 REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
1910 2433
1911 /* 2434 dsi_cio_timings(dsidev);
1912 The configuration of the DSI complex I/O (number of data lanes, 2435
1913 position, differential order) should not be changed while 2436 dsi->ulps_enabled = false;
1914 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the
1915 hardware to recognize a new configuration of the complex I/O (done
1916 in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow
1917 this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next
1918 reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20]
1919 LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN
1920 bit to 1. If the sequence is not followed, the DSi complex I/O
1921 configuration is undetermined.
1922 */
1923 dsi_if_enable(1);
1924 dsi_if_enable(0);
1925 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
1926 dsi_if_enable(1);
1927 dsi_if_enable(0);
1928 2437
1929 DSSDBG("CIO init done\n"); 2438 DSSDBG("CIO init done\n");
1930err: 2439
2440 return 0;
2441
2442err_tx_clk_esc_rst:
2443 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2444err_cio_pwr_dom:
2445 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2446err_cio_pwr:
2447 if (dsi->ulps_enabled)
2448 dsi_cio_disable_lane_override(dsidev);
2449err_scp_clk_dom:
2450 dsi_disable_scp_clk(dsidev);
2451 if (dsi->dsi_mux_pads)
2452 dsi->dsi_mux_pads(false);
1931 return r; 2453 return r;
1932} 2454}
1933 2455
1934static void dsi_complexio_uninit(void) 2456static void dsi_cio_uninit(struct platform_device *dsidev)
1935{ 2457{
1936 dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF); 2458 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2459
2460 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2461 dsi_disable_scp_clk(dsidev);
2462 if (dsi->dsi_mux_pads)
2463 dsi->dsi_mux_pads(false);
1937} 2464}
1938 2465
1939static int _dsi_wait_reset(void) 2466static int _dsi_wait_reset(struct platform_device *dsidev)
1940{ 2467{
1941 int t = 0; 2468 int t = 0;
1942 2469
1943 while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) { 2470 while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
1944 if (++t > 5) { 2471 if (++t > 5) {
1945 DSSERR("soft reset failed\n"); 2472 DSSERR("soft reset failed\n");
1946 return -ENODEV; 2473 return -ENODEV;
@@ -1951,28 +2478,30 @@ static int _dsi_wait_reset(void)
1951 return 0; 2478 return 0;
1952} 2479}
1953 2480
1954static int _dsi_reset(void) 2481static int _dsi_reset(struct platform_device *dsidev)
1955{ 2482{
1956 /* Soft reset */ 2483 /* Soft reset */
1957 REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1); 2484 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
1958 return _dsi_wait_reset(); 2485 return _dsi_wait_reset(dsidev);
1959} 2486}
1960 2487
1961static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2, 2488static void dsi_config_tx_fifo(struct platform_device *dsidev,
2489 enum fifo_size size1, enum fifo_size size2,
1962 enum fifo_size size3, enum fifo_size size4) 2490 enum fifo_size size3, enum fifo_size size4)
1963{ 2491{
2492 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1964 u32 r = 0; 2493 u32 r = 0;
1965 int add = 0; 2494 int add = 0;
1966 int i; 2495 int i;
1967 2496
1968 dsi.vc[0].fifo_size = size1; 2497 dsi->vc[0].fifo_size = size1;
1969 dsi.vc[1].fifo_size = size2; 2498 dsi->vc[1].fifo_size = size2;
1970 dsi.vc[2].fifo_size = size3; 2499 dsi->vc[2].fifo_size = size3;
1971 dsi.vc[3].fifo_size = size4; 2500 dsi->vc[3].fifo_size = size4;
1972 2501
1973 for (i = 0; i < 4; i++) { 2502 for (i = 0; i < 4; i++) {
1974 u8 v; 2503 u8 v;
1975 int size = dsi.vc[i].fifo_size; 2504 int size = dsi->vc[i].fifo_size;
1976 2505
1977 if (add + size > 4) { 2506 if (add + size > 4) {
1978 DSSERR("Illegal FIFO configuration\n"); 2507 DSSERR("Illegal FIFO configuration\n");
@@ -1985,24 +2514,26 @@ static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
1985 add += size; 2514 add += size;
1986 } 2515 }
1987 2516
1988 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r); 2517 dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
1989} 2518}
1990 2519
1991static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2, 2520static void dsi_config_rx_fifo(struct platform_device *dsidev,
2521 enum fifo_size size1, enum fifo_size size2,
1992 enum fifo_size size3, enum fifo_size size4) 2522 enum fifo_size size3, enum fifo_size size4)
1993{ 2523{
2524 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1994 u32 r = 0; 2525 u32 r = 0;
1995 int add = 0; 2526 int add = 0;
1996 int i; 2527 int i;
1997 2528
1998 dsi.vc[0].fifo_size = size1; 2529 dsi->vc[0].fifo_size = size1;
1999 dsi.vc[1].fifo_size = size2; 2530 dsi->vc[1].fifo_size = size2;
2000 dsi.vc[2].fifo_size = size3; 2531 dsi->vc[2].fifo_size = size3;
2001 dsi.vc[3].fifo_size = size4; 2532 dsi->vc[3].fifo_size = size4;
2002 2533
2003 for (i = 0; i < 4; i++) { 2534 for (i = 0; i < 4; i++) {
2004 u8 v; 2535 u8 v;
2005 int size = dsi.vc[i].fifo_size; 2536 int size = dsi->vc[i].fifo_size;
2006 2537
2007 if (add + size > 4) { 2538 if (add + size > 4) {
2008 DSSERR("Illegal FIFO configuration\n"); 2539 DSSERR("Illegal FIFO configuration\n");
@@ -2015,18 +2546,18 @@ static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
2015 add += size; 2546 add += size;
2016 } 2547 }
2017 2548
2018 dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r); 2549 dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2019} 2550}
2020 2551
2021static int dsi_force_tx_stop_mode_io(void) 2552static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2022{ 2553{
2023 u32 r; 2554 u32 r;
2024 2555
2025 r = dsi_read_reg(DSI_TIMING1); 2556 r = dsi_read_reg(dsidev, DSI_TIMING1);
2026 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ 2557 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2027 dsi_write_reg(DSI_TIMING1, r); 2558 dsi_write_reg(dsidev, DSI_TIMING1, r);
2028 2559
2029 if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) { 2560 if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2030 DSSERR("TX_STOP bit not going down\n"); 2561 DSSERR("TX_STOP bit not going down\n");
2031 return -EIO; 2562 return -EIO;
2032 } 2563 }
@@ -2034,16 +2565,135 @@ static int dsi_force_tx_stop_mode_io(void)
2034 return 0; 2565 return 0;
2035} 2566}
2036 2567
2037static int dsi_vc_enable(int channel, bool enable) 2568static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2569{
2570 return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2571}
2572
2573static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2574{
2575 struct dsi_packet_sent_handler_data *vp_data =
2576 (struct dsi_packet_sent_handler_data *) data;
2577 struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2578 const int channel = dsi->update_channel;
2579 u8 bit = dsi->te_enabled ? 30 : 31;
2580
2581 if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2582 complete(vp_data->completion);
2583}
2584
2585static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2586{
2587 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2588 DECLARE_COMPLETION_ONSTACK(completion);
2589 struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2590 int r = 0;
2591 u8 bit;
2592
2593 bit = dsi->te_enabled ? 30 : 31;
2594
2595 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2596 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2597 if (r)
2598 goto err0;
2599
2600 /* Wait for completion only if TE_EN/TE_START is still set */
2601 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2602 if (wait_for_completion_timeout(&completion,
2603 msecs_to_jiffies(10)) == 0) {
2604 DSSERR("Failed to complete previous frame transfer\n");
2605 r = -EIO;
2606 goto err1;
2607 }
2608 }
2609
2610 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2611 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2612
2613 return 0;
2614err1:
2615 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2616 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2617err0:
2618 return r;
2619}
2620
2621static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2622{
2623 struct dsi_packet_sent_handler_data *l4_data =
2624 (struct dsi_packet_sent_handler_data *) data;
2625 struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2626 const int channel = dsi->update_channel;
2627
2628 if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2629 complete(l4_data->completion);
2630}
2631
2632static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2633{
2634 DECLARE_COMPLETION_ONSTACK(completion);
2635 struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2636 int r = 0;
2637
2638 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2639 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2640 if (r)
2641 goto err0;
2642
2643 /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2644 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2645 if (wait_for_completion_timeout(&completion,
2646 msecs_to_jiffies(10)) == 0) {
2647 DSSERR("Failed to complete previous l4 transfer\n");
2648 r = -EIO;
2649 goto err1;
2650 }
2651 }
2652
2653 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2654 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2655
2656 return 0;
2657err1:
2658 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2659 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2660err0:
2661 return r;
2662}
2663
2664static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2665{
2666 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2667
2668 WARN_ON(!dsi_bus_is_locked(dsidev));
2669
2670 WARN_ON(in_interrupt());
2671
2672 if (!dsi_vc_is_enabled(dsidev, channel))
2673 return 0;
2674
2675 switch (dsi->vc[channel].mode) {
2676 case DSI_VC_MODE_VP:
2677 return dsi_sync_vc_vp(dsidev, channel);
2678 case DSI_VC_MODE_L4:
2679 return dsi_sync_vc_l4(dsidev, channel);
2680 default:
2681 BUG();
2682 }
2683}
2684
2685static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2686 bool enable)
2038{ 2687{
2039 DSSDBG("dsi_vc_enable channel %d, enable %d\n", 2688 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2040 channel, enable); 2689 channel, enable);
2041 2690
2042 enable = enable ? 1 : 0; 2691 enable = enable ? 1 : 0;
2043 2692
2044 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0); 2693 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2045 2694
2046 if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) { 2695 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2696 0, enable) != enable) {
2047 DSSERR("Failed to set dsi_vc_enable to %d\n", enable); 2697 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2048 return -EIO; 2698 return -EIO;
2049 } 2699 }
@@ -2051,13 +2701,13 @@ static int dsi_vc_enable(int channel, bool enable)
2051 return 0; 2701 return 0;
2052} 2702}
2053 2703
2054static void dsi_vc_initial_config(int channel) 2704static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2055{ 2705{
2056 u32 r; 2706 u32 r;
2057 2707
2058 DSSDBGF("%d", channel); 2708 DSSDBGF("%d", channel);
2059 2709
2060 r = dsi_read_reg(DSI_VC_CTRL(channel)); 2710 r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2061 2711
2062 if (FLD_GET(r, 15, 15)) /* VC_BUSY */ 2712 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2063 DSSERR("VC(%d) busy when trying to configure it!\n", 2713 DSSERR("VC(%d) busy when trying to configure it!\n",
@@ -2070,85 +2720,107 @@ static void dsi_vc_initial_config(int channel)
2070 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */ 2720 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2071 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */ 2721 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2072 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */ 2722 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2723 if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2724 r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
2073 2725
2074 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ 2726 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2075 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ 2727 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2076 2728
2077 dsi_write_reg(DSI_VC_CTRL(channel), r); 2729 dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2078} 2730}
2079 2731
2080static int dsi_vc_config_l4(int channel) 2732static int dsi_vc_config_l4(struct platform_device *dsidev, int channel)
2081{ 2733{
2082 if (dsi.vc[channel].mode == DSI_VC_MODE_L4) 2734 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2735
2736 if (dsi->vc[channel].mode == DSI_VC_MODE_L4)
2083 return 0; 2737 return 0;
2084 2738
2085 DSSDBGF("%d", channel); 2739 DSSDBGF("%d", channel);
2086 2740
2087 dsi_vc_enable(channel, 0); 2741 dsi_sync_vc(dsidev, channel);
2742
2743 dsi_vc_enable(dsidev, channel, 0);
2088 2744
2089 /* VC_BUSY */ 2745 /* VC_BUSY */
2090 if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) { 2746 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2091 DSSERR("vc(%d) busy when trying to config for L4\n", channel); 2747 DSSERR("vc(%d) busy when trying to config for L4\n", channel);
2092 return -EIO; 2748 return -EIO;
2093 } 2749 }
2094 2750
2095 REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */ 2751 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
2752
2753 /* DCS_CMD_ENABLE */
2754 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2755 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 30, 30);
2096 2756
2097 dsi_vc_enable(channel, 1); 2757 dsi_vc_enable(dsidev, channel, 1);
2098 2758
2099 dsi.vc[channel].mode = DSI_VC_MODE_L4; 2759 dsi->vc[channel].mode = DSI_VC_MODE_L4;
2100 2760
2101 return 0; 2761 return 0;
2102} 2762}
2103 2763
2104static int dsi_vc_config_vp(int channel) 2764static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
2105{ 2765{
2106 if (dsi.vc[channel].mode == DSI_VC_MODE_VP) 2766 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2767
2768 if (dsi->vc[channel].mode == DSI_VC_MODE_VP)
2107 return 0; 2769 return 0;
2108 2770
2109 DSSDBGF("%d", channel); 2771 DSSDBGF("%d", channel);
2110 2772
2111 dsi_vc_enable(channel, 0); 2773 dsi_sync_vc(dsidev, channel);
2774
2775 dsi_vc_enable(dsidev, channel, 0);
2112 2776
2113 /* VC_BUSY */ 2777 /* VC_BUSY */
2114 if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) { 2778 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2115 DSSERR("vc(%d) busy when trying to config for VP\n", channel); 2779 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2116 return -EIO; 2780 return -EIO;
2117 } 2781 }
2118 2782
2119 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */ 2783 /* SOURCE, 1 = video port */
2784 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 1, 1);
2785
2786 /* DCS_CMD_ENABLE */
2787 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2788 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 30, 30);
2120 2789
2121 dsi_vc_enable(channel, 1); 2790 dsi_vc_enable(dsidev, channel, 1);
2122 2791
2123 dsi.vc[channel].mode = DSI_VC_MODE_VP; 2792 dsi->vc[channel].mode = DSI_VC_MODE_VP;
2124 2793
2125 return 0; 2794 return 0;
2126} 2795}
2127 2796
2128 2797
2129void omapdss_dsi_vc_enable_hs(int channel, bool enable) 2798void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2799 bool enable)
2130{ 2800{
2801 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2802
2131 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); 2803 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2132 2804
2133 WARN_ON(!dsi_bus_is_locked()); 2805 WARN_ON(!dsi_bus_is_locked(dsidev));
2134 2806
2135 dsi_vc_enable(channel, 0); 2807 dsi_vc_enable(dsidev, channel, 0);
2136 dsi_if_enable(0); 2808 dsi_if_enable(dsidev, 0);
2137 2809
2138 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9); 2810 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2139 2811
2140 dsi_vc_enable(channel, 1); 2812 dsi_vc_enable(dsidev, channel, 1);
2141 dsi_if_enable(1); 2813 dsi_if_enable(dsidev, 1);
2142 2814
2143 dsi_force_tx_stop_mode_io(); 2815 dsi_force_tx_stop_mode_io(dsidev);
2144} 2816}
2145EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs); 2817EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
2146 2818
2147static void dsi_vc_flush_long_data(int channel) 2819static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2148{ 2820{
2149 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { 2821 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2150 u32 val; 2822 u32 val;
2151 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 2823 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2152 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", 2824 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2153 (val >> 0) & 0xff, 2825 (val >> 0) & 0xff,
2154 (val >> 8) & 0xff, 2826 (val >> 8) & 0xff,
@@ -2194,13 +2866,14 @@ static void dsi_show_rx_ack_with_err(u16 err)
2194 DSSERR("\t\tDSI Protocol Violation\n"); 2866 DSSERR("\t\tDSI Protocol Violation\n");
2195} 2867}
2196 2868
2197static u16 dsi_vc_flush_receive_data(int channel) 2869static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2870 int channel)
2198{ 2871{
2199 /* RX_FIFO_NOT_EMPTY */ 2872 /* RX_FIFO_NOT_EMPTY */
2200 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { 2873 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2201 u32 val; 2874 u32 val;
2202 u8 dt; 2875 u8 dt;
2203 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 2876 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2204 DSSERR("\trawval %#08x\n", val); 2877 DSSERR("\trawval %#08x\n", val);
2205 dt = FLD_GET(val, 5, 0); 2878 dt = FLD_GET(val, 5, 0);
2206 if (dt == DSI_DT_RX_ACK_WITH_ERR) { 2879 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
@@ -2215,7 +2888,7 @@ static u16 dsi_vc_flush_receive_data(int channel)
2215 } else if (dt == DSI_DT_RX_DCS_LONG_READ) { 2888 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2216 DSSERR("\tDCS long response, len %d\n", 2889 DSSERR("\tDCS long response, len %d\n",
2217 FLD_GET(val, 23, 8)); 2890 FLD_GET(val, 23, 8));
2218 dsi_vc_flush_long_data(channel); 2891 dsi_vc_flush_long_data(dsidev, channel);
2219 } else { 2892 } else {
2220 DSSERR("\tunknown datatype 0x%02x\n", dt); 2893 DSSERR("\tunknown datatype 0x%02x\n", dt);
2221 } 2894 }
@@ -2223,40 +2896,44 @@ static u16 dsi_vc_flush_receive_data(int channel)
2223 return 0; 2896 return 0;
2224} 2897}
2225 2898
2226static int dsi_vc_send_bta(int channel) 2899static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2227{ 2900{
2228 if (dsi.debug_write || dsi.debug_read) 2901 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2902
2903 if (dsi->debug_write || dsi->debug_read)
2229 DSSDBG("dsi_vc_send_bta %d\n", channel); 2904 DSSDBG("dsi_vc_send_bta %d\n", channel);
2230 2905
2231 WARN_ON(!dsi_bus_is_locked()); 2906 WARN_ON(!dsi_bus_is_locked(dsidev));
2232 2907
2233 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ 2908 /* RX_FIFO_NOT_EMPTY */
2909 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2234 DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); 2910 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2235 dsi_vc_flush_receive_data(channel); 2911 dsi_vc_flush_receive_data(dsidev, channel);
2236 } 2912 }
2237 2913
2238 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ 2914 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2239 2915
2240 return 0; 2916 return 0;
2241} 2917}
2242 2918
2243int dsi_vc_send_bta_sync(int channel) 2919int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2244{ 2920{
2921 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2245 DECLARE_COMPLETION_ONSTACK(completion); 2922 DECLARE_COMPLETION_ONSTACK(completion);
2246 int r = 0; 2923 int r = 0;
2247 u32 err; 2924 u32 err;
2248 2925
2249 r = dsi_register_isr_vc(channel, dsi_completion_handler, 2926 r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2250 &completion, DSI_VC_IRQ_BTA); 2927 &completion, DSI_VC_IRQ_BTA);
2251 if (r) 2928 if (r)
2252 goto err0; 2929 goto err0;
2253 2930
2254 r = dsi_register_isr(dsi_completion_handler, &completion, 2931 r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2255 DSI_IRQ_ERROR_MASK); 2932 DSI_IRQ_ERROR_MASK);
2256 if (r) 2933 if (r)
2257 goto err1; 2934 goto err1;
2258 2935
2259 r = dsi_vc_send_bta(channel); 2936 r = dsi_vc_send_bta(dsidev, channel);
2260 if (r) 2937 if (r)
2261 goto err2; 2938 goto err2;
2262 2939
@@ -2267,41 +2944,42 @@ int dsi_vc_send_bta_sync(int channel)
2267 goto err2; 2944 goto err2;
2268 } 2945 }
2269 2946
2270 err = dsi_get_errors(); 2947 err = dsi_get_errors(dsidev);
2271 if (err) { 2948 if (err) {
2272 DSSERR("Error while sending BTA: %x\n", err); 2949 DSSERR("Error while sending BTA: %x\n", err);
2273 r = -EIO; 2950 r = -EIO;
2274 goto err2; 2951 goto err2;
2275 } 2952 }
2276err2: 2953err2:
2277 dsi_unregister_isr(dsi_completion_handler, &completion, 2954 dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2278 DSI_IRQ_ERROR_MASK); 2955 DSI_IRQ_ERROR_MASK);
2279err1: 2956err1:
2280 dsi_unregister_isr_vc(channel, dsi_completion_handler, 2957 dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2281 &completion, DSI_VC_IRQ_BTA); 2958 &completion, DSI_VC_IRQ_BTA);
2282err0: 2959err0:
2283 return r; 2960 return r;
2284} 2961}
2285EXPORT_SYMBOL(dsi_vc_send_bta_sync); 2962EXPORT_SYMBOL(dsi_vc_send_bta_sync);
2286 2963
2287static inline void dsi_vc_write_long_header(int channel, u8 data_type, 2964static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2288 u16 len, u8 ecc) 2965 int channel, u8 data_type, u16 len, u8 ecc)
2289{ 2966{
2967 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2290 u32 val; 2968 u32 val;
2291 u8 data_id; 2969 u8 data_id;
2292 2970
2293 WARN_ON(!dsi_bus_is_locked()); 2971 WARN_ON(!dsi_bus_is_locked(dsidev));
2294 2972
2295 data_id = data_type | dsi.vc[channel].vc_id << 6; 2973 data_id = data_type | dsi->vc[channel].vc_id << 6;
2296 2974
2297 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | 2975 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2298 FLD_VAL(ecc, 31, 24); 2976 FLD_VAL(ecc, 31, 24);
2299 2977
2300 dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val); 2978 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
2301} 2979}
2302 2980
2303static inline void dsi_vc_write_long_payload(int channel, 2981static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
2304 u8 b1, u8 b2, u8 b3, u8 b4) 2982 int channel, u8 b1, u8 b2, u8 b3, u8 b4)
2305{ 2983{
2306 u32 val; 2984 u32 val;
2307 2985
@@ -2310,34 +2988,35 @@ static inline void dsi_vc_write_long_payload(int channel,
2310/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", 2988/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2311 b1, b2, b3, b4, val); */ 2989 b1, b2, b3, b4, val); */
2312 2990
2313 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val); 2991 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
2314} 2992}
2315 2993
2316static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, 2994static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2317 u8 ecc) 2995 u8 data_type, u8 *data, u16 len, u8 ecc)
2318{ 2996{
2319 /*u32 val; */ 2997 /*u32 val; */
2998 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2320 int i; 2999 int i;
2321 u8 *p; 3000 u8 *p;
2322 int r = 0; 3001 int r = 0;
2323 u8 b1, b2, b3, b4; 3002 u8 b1, b2, b3, b4;
2324 3003
2325 if (dsi.debug_write) 3004 if (dsi->debug_write)
2326 DSSDBG("dsi_vc_send_long, %d bytes\n", len); 3005 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
2327 3006
2328 /* len + header */ 3007 /* len + header */
2329 if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) { 3008 if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
2330 DSSERR("unable to send long packet: packet too long.\n"); 3009 DSSERR("unable to send long packet: packet too long.\n");
2331 return -EINVAL; 3010 return -EINVAL;
2332 } 3011 }
2333 3012
2334 dsi_vc_config_l4(channel); 3013 dsi_vc_config_l4(dsidev, channel);
2335 3014
2336 dsi_vc_write_long_header(channel, data_type, len, ecc); 3015 dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
2337 3016
2338 p = data; 3017 p = data;
2339 for (i = 0; i < len >> 2; i++) { 3018 for (i = 0; i < len >> 2; i++) {
2340 if (dsi.debug_write) 3019 if (dsi->debug_write)
2341 DSSDBG("\tsending full packet %d\n", i); 3020 DSSDBG("\tsending full packet %d\n", i);
2342 3021
2343 b1 = *p++; 3022 b1 = *p++;
@@ -2345,14 +3024,14 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
2345 b3 = *p++; 3024 b3 = *p++;
2346 b4 = *p++; 3025 b4 = *p++;
2347 3026
2348 dsi_vc_write_long_payload(channel, b1, b2, b3, b4); 3027 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
2349 } 3028 }
2350 3029
2351 i = len % 4; 3030 i = len % 4;
2352 if (i) { 3031 if (i) {
2353 b1 = 0; b2 = 0; b3 = 0; 3032 b1 = 0; b2 = 0; b3 = 0;
2354 3033
2355 if (dsi.debug_write) 3034 if (dsi->debug_write)
2356 DSSDBG("\tsending remainder bytes %d\n", i); 3035 DSSDBG("\tsending remainder bytes %d\n", i);
2357 3036
2358 switch (i) { 3037 switch (i) {
@@ -2370,62 +3049,69 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
2370 break; 3049 break;
2371 } 3050 }
2372 3051
2373 dsi_vc_write_long_payload(channel, b1, b2, b3, 0); 3052 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
2374 } 3053 }
2375 3054
2376 return r; 3055 return r;
2377} 3056}
2378 3057
2379static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc) 3058static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
3059 u8 data_type, u16 data, u8 ecc)
2380{ 3060{
3061 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2381 u32 r; 3062 u32 r;
2382 u8 data_id; 3063 u8 data_id;
2383 3064
2384 WARN_ON(!dsi_bus_is_locked()); 3065 WARN_ON(!dsi_bus_is_locked(dsidev));
2385 3066
2386 if (dsi.debug_write) 3067 if (dsi->debug_write)
2387 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", 3068 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
2388 channel, 3069 channel,
2389 data_type, data & 0xff, (data >> 8) & 0xff); 3070 data_type, data & 0xff, (data >> 8) & 0xff);
2390 3071
2391 dsi_vc_config_l4(channel); 3072 dsi_vc_config_l4(dsidev, channel);
2392 3073
2393 if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) { 3074 if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
2394 DSSERR("ERROR FIFO FULL, aborting transfer\n"); 3075 DSSERR("ERROR FIFO FULL, aborting transfer\n");
2395 return -EINVAL; 3076 return -EINVAL;
2396 } 3077 }
2397 3078
2398 data_id = data_type | dsi.vc[channel].vc_id << 6; 3079 data_id = data_type | dsi->vc[channel].vc_id << 6;
2399 3080
2400 r = (data_id << 0) | (data << 8) | (ecc << 24); 3081 r = (data_id << 0) | (data << 8) | (ecc << 24);
2401 3082
2402 dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r); 3083 dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
2403 3084
2404 return 0; 3085 return 0;
2405} 3086}
2406 3087
2407int dsi_vc_send_null(int channel) 3088int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
2408{ 3089{
3090 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2409 u8 nullpkg[] = {0, 0, 0, 0}; 3091 u8 nullpkg[] = {0, 0, 0, 0};
2410 return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0); 3092
3093 return dsi_vc_send_long(dsidev, channel, DSI_DT_NULL_PACKET, nullpkg,
3094 4, 0);
2411} 3095}
2412EXPORT_SYMBOL(dsi_vc_send_null); 3096EXPORT_SYMBOL(dsi_vc_send_null);
2413 3097
2414int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len) 3098int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3099 u8 *data, int len)
2415{ 3100{
3101 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2416 int r; 3102 int r;
2417 3103
2418 BUG_ON(len == 0); 3104 BUG_ON(len == 0);
2419 3105
2420 if (len == 1) { 3106 if (len == 1) {
2421 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0, 3107 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_0,
2422 data[0], 0); 3108 data[0], 0);
2423 } else if (len == 2) { 3109 } else if (len == 2) {
2424 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1, 3110 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_1,
2425 data[0] | (data[1] << 8), 0); 3111 data[0] | (data[1] << 8), 0);
2426 } else { 3112 } else {
2427 /* 0x39 = DCS Long Write */ 3113 /* 0x39 = DCS Long Write */
2428 r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE, 3114 r = dsi_vc_send_long(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
2429 data, len, 0); 3115 data, len, 0);
2430 } 3116 }
2431 3117
@@ -2433,21 +3119,24 @@ int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
2433} 3119}
2434EXPORT_SYMBOL(dsi_vc_dcs_write_nosync); 3120EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
2435 3121
2436int dsi_vc_dcs_write(int channel, u8 *data, int len) 3122int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3123 int len)
2437{ 3124{
3125 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2438 int r; 3126 int r;
2439 3127
2440 r = dsi_vc_dcs_write_nosync(channel, data, len); 3128 r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len);
2441 if (r) 3129 if (r)
2442 goto err; 3130 goto err;
2443 3131
2444 r = dsi_vc_send_bta_sync(channel); 3132 r = dsi_vc_send_bta_sync(dssdev, channel);
2445 if (r) 3133 if (r)
2446 goto err; 3134 goto err;
2447 3135
2448 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ 3136 /* RX_FIFO_NOT_EMPTY */
3137 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2449 DSSERR("rx fifo not empty after write, dumping data:\n"); 3138 DSSERR("rx fifo not empty after write, dumping data:\n");
2450 dsi_vc_flush_receive_data(channel); 3139 dsi_vc_flush_receive_data(dsidev, channel);
2451 r = -EIO; 3140 r = -EIO;
2452 goto err; 3141 goto err;
2453 } 3142 }
@@ -2460,47 +3149,51 @@ err:
2460} 3149}
2461EXPORT_SYMBOL(dsi_vc_dcs_write); 3150EXPORT_SYMBOL(dsi_vc_dcs_write);
2462 3151
2463int dsi_vc_dcs_write_0(int channel, u8 dcs_cmd) 3152int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
2464{ 3153{
2465 return dsi_vc_dcs_write(channel, &dcs_cmd, 1); 3154 return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
2466} 3155}
2467EXPORT_SYMBOL(dsi_vc_dcs_write_0); 3156EXPORT_SYMBOL(dsi_vc_dcs_write_0);
2468 3157
2469int dsi_vc_dcs_write_1(int channel, u8 dcs_cmd, u8 param) 3158int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3159 u8 param)
2470{ 3160{
2471 u8 buf[2]; 3161 u8 buf[2];
2472 buf[0] = dcs_cmd; 3162 buf[0] = dcs_cmd;
2473 buf[1] = param; 3163 buf[1] = param;
2474 return dsi_vc_dcs_write(channel, buf, 2); 3164 return dsi_vc_dcs_write(dssdev, channel, buf, 2);
2475} 3165}
2476EXPORT_SYMBOL(dsi_vc_dcs_write_1); 3166EXPORT_SYMBOL(dsi_vc_dcs_write_1);
2477 3167
2478int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) 3168int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3169 u8 *buf, int buflen)
2479{ 3170{
3171 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3172 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2480 u32 val; 3173 u32 val;
2481 u8 dt; 3174 u8 dt;
2482 int r; 3175 int r;
2483 3176
2484 if (dsi.debug_read) 3177 if (dsi->debug_read)
2485 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd); 3178 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
2486 3179
2487 r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0); 3180 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0);
2488 if (r) 3181 if (r)
2489 goto err; 3182 goto err;
2490 3183
2491 r = dsi_vc_send_bta_sync(channel); 3184 r = dsi_vc_send_bta_sync(dssdev, channel);
2492 if (r) 3185 if (r)
2493 goto err; 3186 goto err;
2494 3187
2495 /* RX_FIFO_NOT_EMPTY */ 3188 /* RX_FIFO_NOT_EMPTY */
2496 if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) { 3189 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
2497 DSSERR("RX fifo empty when trying to read.\n"); 3190 DSSERR("RX fifo empty when trying to read.\n");
2498 r = -EIO; 3191 r = -EIO;
2499 goto err; 3192 goto err;
2500 } 3193 }
2501 3194
2502 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 3195 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2503 if (dsi.debug_read) 3196 if (dsi->debug_read)
2504 DSSDBG("\theader: %08x\n", val); 3197 DSSDBG("\theader: %08x\n", val);
2505 dt = FLD_GET(val, 5, 0); 3198 dt = FLD_GET(val, 5, 0);
2506 if (dt == DSI_DT_RX_ACK_WITH_ERR) { 3199 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
@@ -2511,7 +3204,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2511 3204
2512 } else if (dt == DSI_DT_RX_SHORT_READ_1) { 3205 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2513 u8 data = FLD_GET(val, 15, 8); 3206 u8 data = FLD_GET(val, 15, 8);
2514 if (dsi.debug_read) 3207 if (dsi->debug_read)
2515 DSSDBG("\tDCS short response, 1 byte: %02x\n", data); 3208 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
2516 3209
2517 if (buflen < 1) { 3210 if (buflen < 1) {
@@ -2524,7 +3217,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2524 return 1; 3217 return 1;
2525 } else if (dt == DSI_DT_RX_SHORT_READ_2) { 3218 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2526 u16 data = FLD_GET(val, 23, 8); 3219 u16 data = FLD_GET(val, 23, 8);
2527 if (dsi.debug_read) 3220 if (dsi->debug_read)
2528 DSSDBG("\tDCS short response, 2 byte: %04x\n", data); 3221 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
2529 3222
2530 if (buflen < 2) { 3223 if (buflen < 2) {
@@ -2539,7 +3232,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2539 } else if (dt == DSI_DT_RX_DCS_LONG_READ) { 3232 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2540 int w; 3233 int w;
2541 int len = FLD_GET(val, 23, 8); 3234 int len = FLD_GET(val, 23, 8);
2542 if (dsi.debug_read) 3235 if (dsi->debug_read)
2543 DSSDBG("\tDCS long response, len %d\n", len); 3236 DSSDBG("\tDCS long response, len %d\n", len);
2544 3237
2545 if (len > buflen) { 3238 if (len > buflen) {
@@ -2550,8 +3243,9 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2550 /* two byte checksum ends the packet, not included in len */ 3243 /* two byte checksum ends the packet, not included in len */
2551 for (w = 0; w < len + 2;) { 3244 for (w = 0; w < len + 2;) {
2552 int b; 3245 int b;
2553 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 3246 val = dsi_read_reg(dsidev,
2554 if (dsi.debug_read) 3247 DSI_VC_SHORT_PACKET_HEADER(channel));
3248 if (dsi->debug_read)
2555 DSSDBG("\t\t%02x %02x %02x %02x\n", 3249 DSSDBG("\t\t%02x %02x %02x %02x\n",
2556 (val >> 0) & 0xff, 3250 (val >> 0) & 0xff,
2557 (val >> 8) & 0xff, 3251 (val >> 8) & 0xff,
@@ -2582,11 +3276,12 @@ err:
2582} 3276}
2583EXPORT_SYMBOL(dsi_vc_dcs_read); 3277EXPORT_SYMBOL(dsi_vc_dcs_read);
2584 3278
2585int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data) 3279int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3280 u8 *data)
2586{ 3281{
2587 int r; 3282 int r;
2588 3283
2589 r = dsi_vc_dcs_read(channel, dcs_cmd, data, 1); 3284 r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, data, 1);
2590 3285
2591 if (r < 0) 3286 if (r < 0)
2592 return r; 3287 return r;
@@ -2598,12 +3293,13 @@ int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data)
2598} 3293}
2599EXPORT_SYMBOL(dsi_vc_dcs_read_1); 3294EXPORT_SYMBOL(dsi_vc_dcs_read_1);
2600 3295
2601int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2) 3296int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3297 u8 *data1, u8 *data2)
2602{ 3298{
2603 u8 buf[2]; 3299 u8 buf[2];
2604 int r; 3300 int r;
2605 3301
2606 r = dsi_vc_dcs_read(channel, dcs_cmd, buf, 2); 3302 r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, buf, 2);
2607 3303
2608 if (r < 0) 3304 if (r < 0)
2609 return r; 3305 return r;
@@ -2618,14 +3314,94 @@ int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2)
2618} 3314}
2619EXPORT_SYMBOL(dsi_vc_dcs_read_2); 3315EXPORT_SYMBOL(dsi_vc_dcs_read_2);
2620 3316
2621int dsi_vc_set_max_rx_packet_size(int channel, u16 len) 3317int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3318 u16 len)
2622{ 3319{
2623 return dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE, 3320 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3321
3322 return dsi_vc_send_short(dsidev, channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
2624 len, 0); 3323 len, 0);
2625} 3324}
2626EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size); 3325EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
2627 3326
2628static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16) 3327static int dsi_enter_ulps(struct platform_device *dsidev)
3328{
3329 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3330 DECLARE_COMPLETION_ONSTACK(completion);
3331 int r;
3332
3333 DSSDBGF();
3334
3335 WARN_ON(!dsi_bus_is_locked(dsidev));
3336
3337 WARN_ON(dsi->ulps_enabled);
3338
3339 if (dsi->ulps_enabled)
3340 return 0;
3341
3342 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3343 DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n");
3344 return -EIO;
3345 }
3346
3347 dsi_sync_vc(dsidev, 0);
3348 dsi_sync_vc(dsidev, 1);
3349 dsi_sync_vc(dsidev, 2);
3350 dsi_sync_vc(dsidev, 3);
3351
3352 dsi_force_tx_stop_mode_io(dsidev);
3353
3354 dsi_vc_enable(dsidev, 0, false);
3355 dsi_vc_enable(dsidev, 1, false);
3356 dsi_vc_enable(dsidev, 2, false);
3357 dsi_vc_enable(dsidev, 3, false);
3358
3359 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
3360 DSSERR("HS busy when enabling ULPS\n");
3361 return -EIO;
3362 }
3363
3364 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
3365 DSSERR("LP busy when enabling ULPS\n");
3366 return -EIO;
3367 }
3368
3369 r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3370 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3371 if (r)
3372 return r;
3373
3374 /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3375 /* LANEx_ULPS_SIG2 */
3376 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2),
3377 7, 5);
3378
3379 if (wait_for_completion_timeout(&completion,
3380 msecs_to_jiffies(1000)) == 0) {
3381 DSSERR("ULPS enable timeout\n");
3382 r = -EIO;
3383 goto err;
3384 }
3385
3386 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3387 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3388
3389 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3390
3391 dsi_if_enable(dsidev, false);
3392
3393 dsi->ulps_enabled = true;
3394
3395 return 0;
3396
3397err:
3398 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3399 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3400 return r;
3401}
3402
3403static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3404 unsigned ticks, bool x4, bool x16)
2629{ 3405{
2630 unsigned long fck; 3406 unsigned long fck;
2631 unsigned long total_ticks; 3407 unsigned long total_ticks;
@@ -2634,14 +3410,14 @@ static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16)
2634 BUG_ON(ticks > 0x1fff); 3410 BUG_ON(ticks > 0x1fff);
2635 3411
2636 /* ticks in DSI_FCK */ 3412 /* ticks in DSI_FCK */
2637 fck = dsi_fclk_rate(); 3413 fck = dsi_fclk_rate(dsidev);
2638 3414
2639 r = dsi_read_reg(DSI_TIMING2); 3415 r = dsi_read_reg(dsidev, DSI_TIMING2);
2640 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ 3416 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
2641 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ 3417 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
2642 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ 3418 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
2643 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ 3419 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
2644 dsi_write_reg(DSI_TIMING2, r); 3420 dsi_write_reg(dsidev, DSI_TIMING2, r);
2645 3421
2646 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 3422 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2647 3423
@@ -2651,7 +3427,8 @@ static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16)
2651 (total_ticks * 1000) / (fck / 1000 / 1000)); 3427 (total_ticks * 1000) / (fck / 1000 / 1000));
2652} 3428}
2653 3429
2654static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16) 3430static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3431 bool x8, bool x16)
2655{ 3432{
2656 unsigned long fck; 3433 unsigned long fck;
2657 unsigned long total_ticks; 3434 unsigned long total_ticks;
@@ -2660,14 +3437,14 @@ static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16)
2660 BUG_ON(ticks > 0x1fff); 3437 BUG_ON(ticks > 0x1fff);
2661 3438
2662 /* ticks in DSI_FCK */ 3439 /* ticks in DSI_FCK */
2663 fck = dsi_fclk_rate(); 3440 fck = dsi_fclk_rate(dsidev);
2664 3441
2665 r = dsi_read_reg(DSI_TIMING1); 3442 r = dsi_read_reg(dsidev, DSI_TIMING1);
2666 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ 3443 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
2667 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */ 3444 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
2668 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */ 3445 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
2669 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ 3446 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
2670 dsi_write_reg(DSI_TIMING1, r); 3447 dsi_write_reg(dsidev, DSI_TIMING1, r);
2671 3448
2672 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1); 3449 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
2673 3450
@@ -2677,7 +3454,8 @@ static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16)
2677 (total_ticks * 1000) / (fck / 1000 / 1000)); 3454 (total_ticks * 1000) / (fck / 1000 / 1000));
2678} 3455}
2679 3456
2680static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16) 3457static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3458 unsigned ticks, bool x4, bool x16)
2681{ 3459{
2682 unsigned long fck; 3460 unsigned long fck;
2683 unsigned long total_ticks; 3461 unsigned long total_ticks;
@@ -2686,14 +3464,14 @@ static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16)
2686 BUG_ON(ticks > 0x1fff); 3464 BUG_ON(ticks > 0x1fff);
2687 3465
2688 /* ticks in DSI_FCK */ 3466 /* ticks in DSI_FCK */
2689 fck = dsi_fclk_rate(); 3467 fck = dsi_fclk_rate(dsidev);
2690 3468
2691 r = dsi_read_reg(DSI_TIMING1); 3469 r = dsi_read_reg(dsidev, DSI_TIMING1);
2692 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ 3470 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2693 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */ 3471 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
2694 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */ 3472 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
2695 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ 3473 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
2696 dsi_write_reg(DSI_TIMING1, r); 3474 dsi_write_reg(dsidev, DSI_TIMING1, r);
2697 3475
2698 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 3476 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2699 3477
@@ -2703,7 +3481,8 @@ static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16)
2703 (total_ticks * 1000) / (fck / 1000 / 1000)); 3481 (total_ticks * 1000) / (fck / 1000 / 1000));
2704} 3482}
2705 3483
2706static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16) 3484static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3485 unsigned ticks, bool x4, bool x16)
2707{ 3486{
2708 unsigned long fck; 3487 unsigned long fck;
2709 unsigned long total_ticks; 3488 unsigned long total_ticks;
@@ -2712,14 +3491,14 @@ static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16)
2712 BUG_ON(ticks > 0x1fff); 3491 BUG_ON(ticks > 0x1fff);
2713 3492
2714 /* ticks in TxByteClkHS */ 3493 /* ticks in TxByteClkHS */
2715 fck = dsi_get_txbyteclkhs(); 3494 fck = dsi_get_txbyteclkhs(dsidev);
2716 3495
2717 r = dsi_read_reg(DSI_TIMING2); 3496 r = dsi_read_reg(dsidev, DSI_TIMING2);
2718 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ 3497 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
2719 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */ 3498 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
2720 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */ 3499 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
2721 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ 3500 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
2722 dsi_write_reg(DSI_TIMING2, r); 3501 dsi_write_reg(dsidev, DSI_TIMING2, r);
2723 3502
2724 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 3503 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2725 3504
@@ -2730,24 +3509,25 @@ static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16)
2730} 3509}
2731static int dsi_proto_config(struct omap_dss_device *dssdev) 3510static int dsi_proto_config(struct omap_dss_device *dssdev)
2732{ 3511{
3512 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2733 u32 r; 3513 u32 r;
2734 int buswidth = 0; 3514 int buswidth = 0;
2735 3515
2736 dsi_config_tx_fifo(DSI_FIFO_SIZE_32, 3516 dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
2737 DSI_FIFO_SIZE_32, 3517 DSI_FIFO_SIZE_32,
2738 DSI_FIFO_SIZE_32, 3518 DSI_FIFO_SIZE_32,
2739 DSI_FIFO_SIZE_32); 3519 DSI_FIFO_SIZE_32);
2740 3520
2741 dsi_config_rx_fifo(DSI_FIFO_SIZE_32, 3521 dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
2742 DSI_FIFO_SIZE_32, 3522 DSI_FIFO_SIZE_32,
2743 DSI_FIFO_SIZE_32, 3523 DSI_FIFO_SIZE_32,
2744 DSI_FIFO_SIZE_32); 3524 DSI_FIFO_SIZE_32);
2745 3525
2746 /* XXX what values for the timeouts? */ 3526 /* XXX what values for the timeouts? */
2747 dsi_set_stop_state_counter(0x1000, false, false); 3527 dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
2748 dsi_set_ta_timeout(0x1fff, true, true); 3528 dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
2749 dsi_set_lp_rx_timeout(0x1fff, true, true); 3529 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
2750 dsi_set_hs_tx_timeout(0x1fff, true, true); 3530 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
2751 3531
2752 switch (dssdev->ctrl.pixel_size) { 3532 switch (dssdev->ctrl.pixel_size) {
2753 case 16: 3533 case 16:
@@ -2763,7 +3543,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
2763 BUG(); 3543 BUG();
2764 } 3544 }
2765 3545
2766 r = dsi_read_reg(DSI_CTRL); 3546 r = dsi_read_reg(dsidev, DSI_CTRL);
2767 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ 3547 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
2768 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ 3548 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
2769 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ 3549 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
@@ -2773,21 +3553,25 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
2773 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */ 3553 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
2774 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */ 3554 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
2775 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */ 3555 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
2776 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */ 3556 if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
2777 r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */ 3557 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
3558 /* DCS_CMD_CODE, 1=start, 0=continue */
3559 r = FLD_MOD(r, 0, 25, 25);
3560 }
2778 3561
2779 dsi_write_reg(DSI_CTRL, r); 3562 dsi_write_reg(dsidev, DSI_CTRL, r);
2780 3563
2781 dsi_vc_initial_config(0); 3564 dsi_vc_initial_config(dsidev, 0);
2782 dsi_vc_initial_config(1); 3565 dsi_vc_initial_config(dsidev, 1);
2783 dsi_vc_initial_config(2); 3566 dsi_vc_initial_config(dsidev, 2);
2784 dsi_vc_initial_config(3); 3567 dsi_vc_initial_config(dsidev, 3);
2785 3568
2786 return 0; 3569 return 0;
2787} 3570}
2788 3571
2789static void dsi_proto_timings(struct omap_dss_device *dssdev) 3572static void dsi_proto_timings(struct omap_dss_device *dssdev)
2790{ 3573{
3574 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2791 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; 3575 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
2792 unsigned tclk_pre, tclk_post; 3576 unsigned tclk_pre, tclk_post;
2793 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero; 3577 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
@@ -2797,32 +3581,27 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
2797 unsigned ths_eot; 3581 unsigned ths_eot;
2798 u32 r; 3582 u32 r;
2799 3583
2800 r = dsi_read_reg(DSI_DSIPHY_CFG0); 3584 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2801 ths_prepare = FLD_GET(r, 31, 24); 3585 ths_prepare = FLD_GET(r, 31, 24);
2802 ths_prepare_ths_zero = FLD_GET(r, 23, 16); 3586 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
2803 ths_zero = ths_prepare_ths_zero - ths_prepare; 3587 ths_zero = ths_prepare_ths_zero - ths_prepare;
2804 ths_trail = FLD_GET(r, 15, 8); 3588 ths_trail = FLD_GET(r, 15, 8);
2805 ths_exit = FLD_GET(r, 7, 0); 3589 ths_exit = FLD_GET(r, 7, 0);
2806 3590
2807 r = dsi_read_reg(DSI_DSIPHY_CFG1); 3591 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2808 tlpx = FLD_GET(r, 22, 16) * 2; 3592 tlpx = FLD_GET(r, 22, 16) * 2;
2809 tclk_trail = FLD_GET(r, 15, 8); 3593 tclk_trail = FLD_GET(r, 15, 8);
2810 tclk_zero = FLD_GET(r, 7, 0); 3594 tclk_zero = FLD_GET(r, 7, 0);
2811 3595
2812 r = dsi_read_reg(DSI_DSIPHY_CFG2); 3596 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2813 tclk_prepare = FLD_GET(r, 7, 0); 3597 tclk_prepare = FLD_GET(r, 7, 0);
2814 3598
2815 /* min 8*UI */ 3599 /* min 8*UI */
2816 tclk_pre = 20; 3600 tclk_pre = 20;
2817 /* min 60ns + 52*UI */ 3601 /* min 60ns + 52*UI */
2818 tclk_post = ns2ddr(60) + 26; 3602 tclk_post = ns2ddr(dsidev, 60) + 26;
2819 3603
2820 /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */ 3604 ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev));
2821 if (dssdev->phy.dsi.data1_lane != 0 &&
2822 dssdev->phy.dsi.data2_lane != 0)
2823 ths_eot = 2;
2824 else
2825 ths_eot = 4;
2826 3605
2827 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare, 3606 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
2828 4); 3607 4);
@@ -2831,10 +3610,10 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
2831 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); 3610 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
2832 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); 3611 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
2833 3612
2834 r = dsi_read_reg(DSI_CLK_TIMING); 3613 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
2835 r = FLD_MOD(r, ddr_clk_pre, 15, 8); 3614 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
2836 r = FLD_MOD(r, ddr_clk_post, 7, 0); 3615 r = FLD_MOD(r, ddr_clk_post, 7, 0);
2837 dsi_write_reg(DSI_CLK_TIMING, r); 3616 dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
2838 3617
2839 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", 3618 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
2840 ddr_clk_pre, 3619 ddr_clk_pre,
@@ -2848,7 +3627,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
2848 3627
2849 r = FLD_VAL(enter_hs_mode_lat, 31, 16) | 3628 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
2850 FLD_VAL(exit_hs_mode_lat, 15, 0); 3629 FLD_VAL(exit_hs_mode_lat, 15, 0);
2851 dsi_write_reg(DSI_VM_TIMING7, r); 3630 dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
2852 3631
2853 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", 3632 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
2854 enter_hs_mode_lat, exit_hs_mode_lat); 3633 enter_hs_mode_lat, exit_hs_mode_lat);
@@ -2858,25 +3637,27 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
2858#define DSI_DECL_VARS \ 3637#define DSI_DECL_VARS \
2859 int __dsi_cb = 0; u32 __dsi_cv = 0; 3638 int __dsi_cb = 0; u32 __dsi_cv = 0;
2860 3639
2861#define DSI_FLUSH(ch) \ 3640#define DSI_FLUSH(dsidev, ch) \
2862 if (__dsi_cb > 0) { \ 3641 if (__dsi_cb > 0) { \
2863 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \ 3642 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
2864 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \ 3643 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
2865 __dsi_cb = __dsi_cv = 0; \ 3644 __dsi_cb = __dsi_cv = 0; \
2866 } 3645 }
2867 3646
2868#define DSI_PUSH(ch, data) \ 3647#define DSI_PUSH(dsidev, ch, data) \
2869 do { \ 3648 do { \
2870 __dsi_cv |= (data) << (__dsi_cb * 8); \ 3649 __dsi_cv |= (data) << (__dsi_cb * 8); \
2871 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \ 3650 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
2872 if (++__dsi_cb > 3) \ 3651 if (++__dsi_cb > 3) \
2873 DSI_FLUSH(ch); \ 3652 DSI_FLUSH(dsidev, ch); \
2874 } while (0) 3653 } while (0)
2875 3654
2876static int dsi_update_screen_l4(struct omap_dss_device *dssdev, 3655static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2877 int x, int y, int w, int h) 3656 int x, int y, int w, int h)
2878{ 3657{
2879 /* Note: supports only 24bit colors in 32bit container */ 3658 /* Note: supports only 24bit colors in 32bit container */
3659 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3660 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2880 int first = 1; 3661 int first = 1;
2881 int fifo_stalls = 0; 3662 int fifo_stalls = 0;
2882 int max_dsi_packet_size; 3663 int max_dsi_packet_size;
@@ -2915,7 +3696,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2915 * in fifo */ 3696 * in fifo */
2916 3697
2917 /* When using CPU, max long packet size is TX buffer size */ 3698 /* When using CPU, max long packet size is TX buffer size */
2918 max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4; 3699 max_dsi_packet_size = dsi->vc[0].fifo_size * 32 * 4;
2919 3700
2920 /* we seem to get better perf if we divide the tx fifo to half, 3701 /* we seem to get better perf if we divide the tx fifo to half,
2921 and while the other half is being sent, we fill the other half 3702 and while the other half is being sent, we fill the other half
@@ -2944,35 +3725,36 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2944#if 1 3725#if 1
2945 /* using fifo not empty */ 3726 /* using fifo not empty */
2946 /* TX_FIFO_NOT_EMPTY */ 3727 /* TX_FIFO_NOT_EMPTY */
2947 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { 3728 while (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(0)), 5, 5)) {
2948 fifo_stalls++; 3729 fifo_stalls++;
2949 if (fifo_stalls > 0xfffff) { 3730 if (fifo_stalls > 0xfffff) {
2950 DSSERR("fifo stalls overflow, pixels left %d\n", 3731 DSSERR("fifo stalls overflow, pixels left %d\n",
2951 pixels_left); 3732 pixels_left);
2952 dsi_if_enable(0); 3733 dsi_if_enable(dsidev, 0);
2953 return -EIO; 3734 return -EIO;
2954 } 3735 }
2955 udelay(1); 3736 udelay(1);
2956 } 3737 }
2957#elif 1 3738#elif 1
2958 /* using fifo emptiness */ 3739 /* using fifo emptiness */
2959 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 < 3740 while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
2960 max_dsi_packet_size) { 3741 max_dsi_packet_size) {
2961 fifo_stalls++; 3742 fifo_stalls++;
2962 if (fifo_stalls > 0xfffff) { 3743 if (fifo_stalls > 0xfffff) {
2963 DSSERR("fifo stalls overflow, pixels left %d\n", 3744 DSSERR("fifo stalls overflow, pixels left %d\n",
2964 pixels_left); 3745 pixels_left);
2965 dsi_if_enable(0); 3746 dsi_if_enable(dsidev, 0);
2966 return -EIO; 3747 return -EIO;
2967 } 3748 }
2968 } 3749 }
2969#else 3750#else
2970 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) { 3751 while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS,
3752 7, 0) + 1) * 4 == 0) {
2971 fifo_stalls++; 3753 fifo_stalls++;
2972 if (fifo_stalls > 0xfffff) { 3754 if (fifo_stalls > 0xfffff) {
2973 DSSERR("fifo stalls overflow, pixels left %d\n", 3755 DSSERR("fifo stalls overflow, pixels left %d\n",
2974 pixels_left); 3756 pixels_left);
2975 dsi_if_enable(0); 3757 dsi_if_enable(dsidev, 0);
2976 return -EIO; 3758 return -EIO;
2977 } 3759 }
2978 } 3760 }
@@ -2981,17 +3763,17 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2981 3763
2982 pixels_left -= pixels; 3764 pixels_left -= pixels;
2983 3765
2984 dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE, 3766 dsi_vc_write_long_header(dsidev, 0, DSI_DT_DCS_LONG_WRITE,
2985 1 + pixels * bytespp, 0); 3767 1 + pixels * bytespp, 0);
2986 3768
2987 DSI_PUSH(0, dcs_cmd); 3769 DSI_PUSH(dsidev, 0, dcs_cmd);
2988 3770
2989 while (pixels-- > 0) { 3771 while (pixels-- > 0) {
2990 u32 pix = __raw_readl(data++); 3772 u32 pix = __raw_readl(data++);
2991 3773
2992 DSI_PUSH(0, (pix >> 16) & 0xff); 3774 DSI_PUSH(dsidev, 0, (pix >> 16) & 0xff);
2993 DSI_PUSH(0, (pix >> 8) & 0xff); 3775 DSI_PUSH(dsidev, 0, (pix >> 8) & 0xff);
2994 DSI_PUSH(0, (pix >> 0) & 0xff); 3776 DSI_PUSH(dsidev, 0, (pix >> 0) & 0xff);
2995 3777
2996 current_x++; 3778 current_x++;
2997 if (current_x == x+w) { 3779 if (current_x == x+w) {
@@ -3000,7 +3782,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3000 } 3782 }
3001 } 3783 }
3002 3784
3003 DSI_FLUSH(0); 3785 DSI_FLUSH(dsidev, 0);
3004 } 3786 }
3005 3787
3006 return 0; 3788 return 0;
@@ -3009,6 +3791,8 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3009static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, 3791static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3010 u16 x, u16 y, u16 w, u16 h) 3792 u16 x, u16 y, u16 w, u16 h)
3011{ 3793{
3794 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3795 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3012 unsigned bytespp; 3796 unsigned bytespp;
3013 unsigned bytespl; 3797 unsigned bytespl;
3014 unsigned bytespf; 3798 unsigned bytespf;
@@ -3017,16 +3801,13 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3017 unsigned packet_len; 3801 unsigned packet_len;
3018 u32 l; 3802 u32 l;
3019 int r; 3803 int r;
3020 const unsigned channel = dsi.update_channel; 3804 const unsigned channel = dsi->update_channel;
3021 /* line buffer is 1024 x 24bits */ 3805 const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
3022 /* XXX: for some reason using full buffer size causes considerable TX
3023 * slowdown with update sizes that fill the whole buffer */
3024 const unsigned line_buf_size = 1023 * 3;
3025 3806
3026 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n", 3807 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
3027 x, y, w, h); 3808 x, y, w, h);
3028 3809
3029 dsi_vc_config_vp(channel); 3810 dsi_vc_config_vp(dsidev, channel);
3030 3811
3031 bytespp = dssdev->ctrl.pixel_size / 8; 3812 bytespp = dssdev->ctrl.pixel_size / 8;
3032 bytespl = w * bytespp; 3813 bytespl = w * bytespp;
@@ -3047,15 +3828,16 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3047 total_len += (bytespf % packet_payload) + 1; 3828 total_len += (bytespf % packet_payload) + 1;
3048 3829
3049 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ 3830 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
3050 dsi_write_reg(DSI_VC_TE(channel), l); 3831 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3051 3832
3052 dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0); 3833 dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3834 packet_len, 0);
3053 3835
3054 if (dsi.te_enabled) 3836 if (dsi->te_enabled)
3055 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ 3837 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
3056 else 3838 else
3057 l = FLD_MOD(l, 1, 31, 31); /* TE_START */ 3839 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
3058 dsi_write_reg(DSI_VC_TE(channel), l); 3840 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3059 3841
3060 /* We put SIDLEMODE to no-idle for the duration of the transfer, 3842 /* We put SIDLEMODE to no-idle for the duration of the transfer,
3061 * because DSS interrupts are not capable of waking up the CPU and the 3843 * because DSS interrupts are not capable of waking up the CPU and the
@@ -3065,23 +3847,23 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3065 */ 3847 */
3066 dispc_disable_sidle(); 3848 dispc_disable_sidle();
3067 3849
3068 dsi_perf_mark_start(); 3850 dsi_perf_mark_start(dsidev);
3069 3851
3070 r = queue_delayed_work(dsi.workqueue, &dsi.framedone_timeout_work, 3852 r = schedule_delayed_work(&dsi->framedone_timeout_work,
3071 msecs_to_jiffies(250)); 3853 msecs_to_jiffies(250));
3072 BUG_ON(r == 0); 3854 BUG_ON(r == 0);
3073 3855
3074 dss_start_update(dssdev); 3856 dss_start_update(dssdev);
3075 3857
3076 if (dsi.te_enabled) { 3858 if (dsi->te_enabled) {
3077 /* disable LP_RX_TO, so that we can receive TE. Time to wait 3859 /* disable LP_RX_TO, so that we can receive TE. Time to wait
3078 * for TE is longer than the timer allows */ 3860 * for TE is longer than the timer allows */
3079 REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ 3861 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
3080 3862
3081 dsi_vc_send_bta(channel); 3863 dsi_vc_send_bta(dsidev, channel);
3082 3864
3083#ifdef DSI_CATCH_MISSING_TE 3865#ifdef DSI_CATCH_MISSING_TE
3084 mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250)); 3866 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
3085#endif 3867#endif
3086 } 3868 }
3087} 3869}
@@ -3093,41 +3875,28 @@ static void dsi_te_timeout(unsigned long arg)
3093} 3875}
3094#endif 3876#endif
3095 3877
3096static void dsi_framedone_bta_callback(void *data, u32 mask); 3878static void dsi_handle_framedone(struct platform_device *dsidev, int error)
3097
3098static void dsi_handle_framedone(int error)
3099{ 3879{
3100 const int channel = dsi.update_channel; 3880 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3101
3102 dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback,
3103 NULL, DSI_VC_IRQ_BTA);
3104
3105 cancel_delayed_work(&dsi.framedone_timeout_work);
3106 3881
3107 /* SIDLEMODE back to smart-idle */ 3882 /* SIDLEMODE back to smart-idle */
3108 dispc_enable_sidle(); 3883 dispc_enable_sidle();
3109 3884
3110 if (dsi.te_enabled) { 3885 if (dsi->te_enabled) {
3111 /* enable LP_RX_TO again after the TE */ 3886 /* enable LP_RX_TO again after the TE */
3112 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ 3887 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3113 } 3888 }
3114 3889
3115 /* RX_FIFO_NOT_EMPTY */ 3890 dsi->framedone_callback(error, dsi->framedone_data);
3116 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
3117 DSSERR("Received error during frame transfer:\n");
3118 dsi_vc_flush_receive_data(channel);
3119 if (!error)
3120 error = -EIO;
3121 }
3122
3123 dsi.framedone_callback(error, dsi.framedone_data);
3124 3891
3125 if (!error) 3892 if (!error)
3126 dsi_perf_show("DISPC"); 3893 dsi_perf_show(dsidev, "DISPC");
3127} 3894}
3128 3895
3129static void dsi_framedone_timeout_work_callback(struct work_struct *work) 3896static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3130{ 3897{
3898 struct dsi_data *dsi = container_of(work, struct dsi_data,
3899 framedone_timeout_work.work);
3131 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after 3900 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
3132 * 250ms which would conflict with this timeout work. What should be 3901 * 250ms which would conflict with this timeout work. What should be
3133 * done is first cancel the transfer on the HW, and then cancel the 3902 * done is first cancel the transfer on the HW, and then cancel the
@@ -3137,70 +3906,34 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3137 3906
3138 DSSERR("Framedone not received for 250ms!\n"); 3907 DSSERR("Framedone not received for 250ms!\n");
3139 3908
3140 dsi_handle_framedone(-ETIMEDOUT); 3909 dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
3141}
3142
3143static void dsi_framedone_bta_callback(void *data, u32 mask)
3144{
3145 dsi_handle_framedone(0);
3146
3147#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3148 dispc_fake_vsync_irq();
3149#endif
3150} 3910}
3151 3911
3152static void dsi_framedone_irq_callback(void *data, u32 mask) 3912static void dsi_framedone_irq_callback(void *data, u32 mask)
3153{ 3913{
3154 const int channel = dsi.update_channel; 3914 struct omap_dss_device *dssdev = (struct omap_dss_device *) data;
3155 int r; 3915 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3916 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3156 3917
3157 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and 3918 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
3158 * turns itself off. However, DSI still has the pixels in its buffers, 3919 * turns itself off. However, DSI still has the pixels in its buffers,
3159 * and is sending the data. 3920 * and is sending the data.
3160 */ 3921 */
3161 3922
3162 if (dsi.te_enabled) { 3923 __cancel_delayed_work(&dsi->framedone_timeout_work);
3163 /* enable LP_RX_TO again after the TE */
3164 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3165 }
3166
3167 /* Send BTA after the frame. We need this for the TE to work, as TE
3168 * trigger is only sent for BTAs without preceding packet. Thus we need
3169 * to BTA after the pixel packets so that next BTA will cause TE
3170 * trigger.
3171 *
3172 * This is not needed when TE is not in use, but we do it anyway to
3173 * make sure that the transfer has been completed. It would be more
3174 * optimal, but more complex, to wait only just before starting next
3175 * transfer.
3176 *
3177 * Also, as there's no interrupt telling when the transfer has been
3178 * done and the channel could be reconfigured, the only way is to
3179 * busyloop until TE_SIZE is zero. With BTA we can do this
3180 * asynchronously.
3181 * */
3182
3183 r = dsi_register_isr_vc(channel, dsi_framedone_bta_callback,
3184 NULL, DSI_VC_IRQ_BTA);
3185 if (r) {
3186 DSSERR("Failed to register BTA ISR\n");
3187 dsi_handle_framedone(-EIO);
3188 return;
3189 }
3190 3924
3191 r = dsi_vc_send_bta(channel); 3925 dsi_handle_framedone(dsidev, 0);
3192 if (r) { 3926
3193 DSSERR("BTA after framedone failed\n"); 3927#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3194 dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback, 3928 dispc_fake_vsync_irq();
3195 NULL, DSI_VC_IRQ_BTA); 3929#endif
3196 dsi_handle_framedone(-EIO);
3197 }
3198} 3930}
3199 3931
3200int omap_dsi_prepare_update(struct omap_dss_device *dssdev, 3932int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
3201 u16 *x, u16 *y, u16 *w, u16 *h, 3933 u16 *x, u16 *y, u16 *w, u16 *h,
3202 bool enlarge_update_area) 3934 bool enlarge_update_area)
3203{ 3935{
3936 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3204 u16 dw, dh; 3937 u16 dw, dh;
3205 3938
3206 dssdev->driver->get_resolution(dssdev, &dw, &dh); 3939 dssdev->driver->get_resolution(dssdev, &dw, &dh);
@@ -3220,7 +3953,7 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
3220 if (*w == 0 || *h == 0) 3953 if (*w == 0 || *h == 0)
3221 return -EINVAL; 3954 return -EINVAL;
3222 3955
3223 dsi_perf_mark_setup(); 3956 dsi_perf_mark_setup(dsidev);
3224 3957
3225 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { 3958 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3226 dss_setup_partial_planes(dssdev, x, y, w, h, 3959 dss_setup_partial_planes(dssdev, x, y, w, h,
@@ -3237,7 +3970,10 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
3237 u16 x, u16 y, u16 w, u16 h, 3970 u16 x, u16 y, u16 w, u16 h,
3238 void (*callback)(int, void *), void *data) 3971 void (*callback)(int, void *), void *data)
3239{ 3972{
3240 dsi.update_channel = channel; 3973 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3974 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3975
3976 dsi->update_channel = channel;
3241 3977
3242 /* OMAP DSS cannot send updates of odd widths. 3978 /* OMAP DSS cannot send updates of odd widths.
3243 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON 3979 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
@@ -3246,14 +3982,14 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
3246 BUG_ON(x % 2 == 1); 3982 BUG_ON(x % 2 == 1);
3247 3983
3248 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { 3984 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3249 dsi.framedone_callback = callback; 3985 dsi->framedone_callback = callback;
3250 dsi.framedone_data = data; 3986 dsi->framedone_data = data;
3251 3987
3252 dsi.update_region.x = x; 3988 dsi->update_region.x = x;
3253 dsi.update_region.y = y; 3989 dsi->update_region.y = y;
3254 dsi.update_region.w = w; 3990 dsi->update_region.w = w;
3255 dsi.update_region.h = h; 3991 dsi->update_region.h = h;
3256 dsi.update_region.device = dssdev; 3992 dsi->update_region.device = dssdev;
3257 3993
3258 dsi_update_screen_dispc(dssdev, x, y, w, h); 3994 dsi_update_screen_dispc(dssdev, x, y, w, h);
3259 } else { 3995 } else {
@@ -3263,7 +3999,7 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
3263 if (r) 3999 if (r)
3264 return r; 4000 return r;
3265 4001
3266 dsi_perf_show("L4"); 4002 dsi_perf_show(dsidev, "L4");
3267 callback(0, data); 4003 callback(0, data);
3268 } 4004 }
3269 4005
@@ -3276,9 +4012,13 @@ EXPORT_SYMBOL(omap_dsi_update);
3276static int dsi_display_init_dispc(struct omap_dss_device *dssdev) 4012static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
3277{ 4013{
3278 int r; 4014 int r;
4015 u32 irq;
4016
4017 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4018 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
3279 4019
3280 r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL, 4020 r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
3281 DISPC_IRQ_FRAMEDONE); 4021 irq);
3282 if (r) { 4022 if (r) {
3283 DSSERR("can't get FRAMEDONE irq\n"); 4023 DSSERR("can't get FRAMEDONE irq\n");
3284 return r; 4024 return r;
@@ -3311,28 +4051,34 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
3311 4051
3312static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) 4052static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
3313{ 4053{
3314 omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL, 4054 u32 irq;
3315 DISPC_IRQ_FRAMEDONE); 4055
4056 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4057 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
4058
4059 omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
4060 irq);
3316} 4061}
3317 4062
3318static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) 4063static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3319{ 4064{
4065 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3320 struct dsi_clock_info cinfo; 4066 struct dsi_clock_info cinfo;
3321 int r; 4067 int r;
3322 4068
3323 /* we always use DSS_CLK_SYSCK as input clock */ 4069 /* we always use DSS_CLK_SYSCK as input clock */
3324 cinfo.use_sys_clk = true; 4070 cinfo.use_sys_clk = true;
3325 cinfo.regn = dssdev->phy.dsi.div.regn; 4071 cinfo.regn = dssdev->clocks.dsi.regn;
3326 cinfo.regm = dssdev->phy.dsi.div.regm; 4072 cinfo.regm = dssdev->clocks.dsi.regm;
3327 cinfo.regm_dispc = dssdev->phy.dsi.div.regm_dispc; 4073 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
3328 cinfo.regm_dsi = dssdev->phy.dsi.div.regm_dsi; 4074 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
3329 r = dsi_calc_clock_rates(dssdev, &cinfo); 4075 r = dsi_calc_clock_rates(dssdev, &cinfo);
3330 if (r) { 4076 if (r) {
3331 DSSERR("Failed to calc dsi clocks\n"); 4077 DSSERR("Failed to calc dsi clocks\n");
3332 return r; 4078 return r;
3333 } 4079 }
3334 4080
3335 r = dsi_pll_set_clock_div(&cinfo); 4081 r = dsi_pll_set_clock_div(dsidev, &cinfo);
3336 if (r) { 4082 if (r) {
3337 DSSERR("Failed to set dsi clocks\n"); 4083 DSSERR("Failed to set dsi clocks\n");
3338 return r; 4084 return r;
@@ -3343,14 +4089,15 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3343 4089
3344static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) 4090static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
3345{ 4091{
4092 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3346 struct dispc_clock_info dispc_cinfo; 4093 struct dispc_clock_info dispc_cinfo;
3347 int r; 4094 int r;
3348 unsigned long long fck; 4095 unsigned long long fck;
3349 4096
3350 fck = dsi_get_pll_hsdiv_dispc_rate(); 4097 fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
3351 4098
3352 dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div; 4099 dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
3353 dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div; 4100 dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
3354 4101
3355 r = dispc_calc_clock_rates(fck, &dispc_cinfo); 4102 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
3356 if (r) { 4103 if (r) {
@@ -3369,11 +4116,11 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
3369 4116
3370static int dsi_display_init_dsi(struct omap_dss_device *dssdev) 4117static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3371{ 4118{
4119 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4120 int dsi_module = dsi_get_dsidev_id(dsidev);
3372 int r; 4121 int r;
3373 4122
3374 _dsi_print_reset_status(); 4123 r = dsi_pll_init(dsidev, true, true);
3375
3376 r = dsi_pll_init(dssdev, true, true);
3377 if (r) 4124 if (r)
3378 goto err0; 4125 goto err0;
3379 4126
@@ -3381,8 +4128,10 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3381 if (r) 4128 if (r)
3382 goto err1; 4129 goto err1;
3383 4130
3384 dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC); 4131 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
3385 dss_select_dsi_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI); 4132 dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
4133 dss_select_lcd_clk_source(dssdev->manager->id,
4134 dssdev->clocks.dispc.channel.lcd_clk_src);
3386 4135
3387 DSSDBG("PLL OK\n"); 4136 DSSDBG("PLL OK\n");
3388 4137
@@ -3390,82 +4139,92 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3390 if (r) 4139 if (r)
3391 goto err2; 4140 goto err2;
3392 4141
3393 r = dsi_complexio_init(dssdev); 4142 r = dsi_cio_init(dssdev);
3394 if (r) 4143 if (r)
3395 goto err2; 4144 goto err2;
3396 4145
3397 _dsi_print_reset_status(); 4146 _dsi_print_reset_status(dsidev);
3398 4147
3399 dsi_proto_timings(dssdev); 4148 dsi_proto_timings(dssdev);
3400 dsi_set_lp_clk_divisor(dssdev); 4149 dsi_set_lp_clk_divisor(dssdev);
3401 4150
3402 if (1) 4151 if (1)
3403 _dsi_print_reset_status(); 4152 _dsi_print_reset_status(dsidev);
3404 4153
3405 r = dsi_proto_config(dssdev); 4154 r = dsi_proto_config(dssdev);
3406 if (r) 4155 if (r)
3407 goto err3; 4156 goto err3;
3408 4157
3409 /* enable interface */ 4158 /* enable interface */
3410 dsi_vc_enable(0, 1); 4159 dsi_vc_enable(dsidev, 0, 1);
3411 dsi_vc_enable(1, 1); 4160 dsi_vc_enable(dsidev, 1, 1);
3412 dsi_vc_enable(2, 1); 4161 dsi_vc_enable(dsidev, 2, 1);
3413 dsi_vc_enable(3, 1); 4162 dsi_vc_enable(dsidev, 3, 1);
3414 dsi_if_enable(1); 4163 dsi_if_enable(dsidev, 1);
3415 dsi_force_tx_stop_mode_io(); 4164 dsi_force_tx_stop_mode_io(dsidev);
3416 4165
3417 return 0; 4166 return 0;
3418err3: 4167err3:
3419 dsi_complexio_uninit(); 4168 dsi_cio_uninit(dsidev);
3420err2: 4169err2:
3421 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); 4170 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
3422 dss_select_dsi_clk_source(DSS_CLK_SRC_FCK); 4171 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
3423err1: 4172err1:
3424 dsi_pll_uninit(); 4173 dsi_pll_uninit(dsidev, true);
3425err0: 4174err0:
3426 return r; 4175 return r;
3427} 4176}
3428 4177
3429static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev) 4178static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4179 bool disconnect_lanes, bool enter_ulps)
3430{ 4180{
3431 /* disable interface */ 4181 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3432 dsi_if_enable(0); 4182 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3433 dsi_vc_enable(0, 0); 4183 int dsi_module = dsi_get_dsidev_id(dsidev);
3434 dsi_vc_enable(1, 0);
3435 dsi_vc_enable(2, 0);
3436 dsi_vc_enable(3, 0);
3437 4184
3438 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); 4185 if (enter_ulps && !dsi->ulps_enabled)
3439 dss_select_dsi_clk_source(DSS_CLK_SRC_FCK); 4186 dsi_enter_ulps(dsidev);
3440 dsi_complexio_uninit(); 4187
3441 dsi_pll_uninit(); 4188 /* disable interface */
4189 dsi_if_enable(dsidev, 0);
4190 dsi_vc_enable(dsidev, 0, 0);
4191 dsi_vc_enable(dsidev, 1, 0);
4192 dsi_vc_enable(dsidev, 2, 0);
4193 dsi_vc_enable(dsidev, 3, 0);
4194
4195 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4196 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4197 dsi_cio_uninit(dsidev);
4198 dsi_pll_uninit(dsidev, disconnect_lanes);
3442} 4199}
3443 4200
3444static int dsi_core_init(void) 4201static int dsi_core_init(struct platform_device *dsidev)
3445{ 4202{
3446 /* Autoidle */ 4203 /* Autoidle */
3447 REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0); 4204 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
3448 4205
3449 /* ENWAKEUP */ 4206 /* ENWAKEUP */
3450 REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2); 4207 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
3451 4208
3452 /* SIDLEMODE smart-idle */ 4209 /* SIDLEMODE smart-idle */
3453 REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3); 4210 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
3454 4211
3455 _dsi_initialize_irq(); 4212 _dsi_initialize_irq(dsidev);
3456 4213
3457 return 0; 4214 return 0;
3458} 4215}
3459 4216
3460int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) 4217int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
3461{ 4218{
4219 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4220 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3462 int r = 0; 4221 int r = 0;
3463 4222
3464 DSSDBG("dsi_display_enable\n"); 4223 DSSDBG("dsi_display_enable\n");
3465 4224
3466 WARN_ON(!dsi_bus_is_locked()); 4225 WARN_ON(!dsi_bus_is_locked(dsidev));
3467 4226
3468 mutex_lock(&dsi.lock); 4227 mutex_lock(&dsi->lock);
3469 4228
3470 r = omap_dss_start_device(dssdev); 4229 r = omap_dss_start_device(dssdev);
3471 if (r) { 4230 if (r) {
@@ -3474,13 +4233,13 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
3474 } 4233 }
3475 4234
3476 enable_clocks(1); 4235 enable_clocks(1);
3477 dsi_enable_pll_clock(1); 4236 dsi_enable_pll_clock(dsidev, 1);
3478 4237
3479 r = _dsi_reset(); 4238 r = _dsi_reset(dsidev);
3480 if (r) 4239 if (r)
3481 goto err1; 4240 goto err1;
3482 4241
3483 dsi_core_init(); 4242 dsi_core_init(dsidev);
3484 4243
3485 r = dsi_display_init_dispc(dssdev); 4244 r = dsi_display_init_dispc(dssdev);
3486 if (r) 4245 if (r)
@@ -3490,7 +4249,7 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
3490 if (r) 4249 if (r)
3491 goto err2; 4250 goto err2;
3492 4251
3493 mutex_unlock(&dsi.lock); 4252 mutex_unlock(&dsi->lock);
3494 4253
3495 return 0; 4254 return 0;
3496 4255
@@ -3498,39 +4257,46 @@ err2:
3498 dsi_display_uninit_dispc(dssdev); 4257 dsi_display_uninit_dispc(dssdev);
3499err1: 4258err1:
3500 enable_clocks(0); 4259 enable_clocks(0);
3501 dsi_enable_pll_clock(0); 4260 dsi_enable_pll_clock(dsidev, 0);
3502 omap_dss_stop_device(dssdev); 4261 omap_dss_stop_device(dssdev);
3503err0: 4262err0:
3504 mutex_unlock(&dsi.lock); 4263 mutex_unlock(&dsi->lock);
3505 DSSDBG("dsi_display_enable FAILED\n"); 4264 DSSDBG("dsi_display_enable FAILED\n");
3506 return r; 4265 return r;
3507} 4266}
3508EXPORT_SYMBOL(omapdss_dsi_display_enable); 4267EXPORT_SYMBOL(omapdss_dsi_display_enable);
3509 4268
3510void omapdss_dsi_display_disable(struct omap_dss_device *dssdev) 4269void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
4270 bool disconnect_lanes, bool enter_ulps)
3511{ 4271{
4272 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4273 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4274
3512 DSSDBG("dsi_display_disable\n"); 4275 DSSDBG("dsi_display_disable\n");
3513 4276
3514 WARN_ON(!dsi_bus_is_locked()); 4277 WARN_ON(!dsi_bus_is_locked(dsidev));
3515 4278
3516 mutex_lock(&dsi.lock); 4279 mutex_lock(&dsi->lock);
3517 4280
3518 dsi_display_uninit_dispc(dssdev); 4281 dsi_display_uninit_dispc(dssdev);
3519 4282
3520 dsi_display_uninit_dsi(dssdev); 4283 dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
3521 4284
3522 enable_clocks(0); 4285 enable_clocks(0);
3523 dsi_enable_pll_clock(0); 4286 dsi_enable_pll_clock(dsidev, 0);
3524 4287
3525 omap_dss_stop_device(dssdev); 4288 omap_dss_stop_device(dssdev);
3526 4289
3527 mutex_unlock(&dsi.lock); 4290 mutex_unlock(&dsi->lock);
3528} 4291}
3529EXPORT_SYMBOL(omapdss_dsi_display_disable); 4292EXPORT_SYMBOL(omapdss_dsi_display_disable);
3530 4293
3531int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable) 4294int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
3532{ 4295{
3533 dsi.te_enabled = enable; 4296 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4297 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4298
4299 dsi->te_enabled = enable;
3534 return 0; 4300 return 0;
3535} 4301}
3536EXPORT_SYMBOL(omapdss_dsi_enable_te); 4302EXPORT_SYMBOL(omapdss_dsi_enable_te);
@@ -3550,23 +4316,33 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
3550 4316
3551int dsi_init_display(struct omap_dss_device *dssdev) 4317int dsi_init_display(struct omap_dss_device *dssdev)
3552{ 4318{
4319 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4320 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4321 int dsi_module = dsi_get_dsidev_id(dsidev);
4322
3553 DSSDBG("DSI init\n"); 4323 DSSDBG("DSI init\n");
3554 4324
3555 /* XXX these should be figured out dynamically */ 4325 /* XXX these should be figured out dynamically */
3556 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | 4326 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
3557 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; 4327 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
3558 4328
3559 if (dsi.vdds_dsi_reg == NULL) { 4329 if (dsi->vdds_dsi_reg == NULL) {
3560 struct regulator *vdds_dsi; 4330 struct regulator *vdds_dsi;
3561 4331
3562 vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi"); 4332 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
3563 4333
3564 if (IS_ERR(vdds_dsi)) { 4334 if (IS_ERR(vdds_dsi)) {
3565 DSSERR("can't get VDDS_DSI regulator\n"); 4335 DSSERR("can't get VDDS_DSI regulator\n");
3566 return PTR_ERR(vdds_dsi); 4336 return PTR_ERR(vdds_dsi);
3567 } 4337 }
3568 4338
3569 dsi.vdds_dsi_reg = vdds_dsi; 4339 dsi->vdds_dsi_reg = vdds_dsi;
4340 }
4341
4342 if (dsi_get_num_data_lanes_dssdev(dssdev) > dsi->num_data_lanes) {
4343 DSSERR("DSI%d can't support more than %d data lanes\n",
4344 dsi_module + 1, dsi->num_data_lanes);
4345 return -EINVAL;
3570 } 4346 }
3571 4347
3572 return 0; 4348 return 0;
@@ -3574,11 +4350,13 @@ int dsi_init_display(struct omap_dss_device *dssdev)
3574 4350
3575int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel) 4351int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
3576{ 4352{
4353 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4354 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3577 int i; 4355 int i;
3578 4356
3579 for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) { 4357 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
3580 if (!dsi.vc[i].dssdev) { 4358 if (!dsi->vc[i].dssdev) {
3581 dsi.vc[i].dssdev = dssdev; 4359 dsi->vc[i].dssdev = dssdev;
3582 *channel = i; 4360 *channel = i;
3583 return 0; 4361 return 0;
3584 } 4362 }
@@ -3591,6 +4369,9 @@ EXPORT_SYMBOL(omap_dsi_request_vc);
3591 4369
3592int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id) 4370int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
3593{ 4371{
4372 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4373 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4374
3594 if (vc_id < 0 || vc_id > 3) { 4375 if (vc_id < 0 || vc_id > 3) {
3595 DSSERR("VC ID out of range\n"); 4376 DSSERR("VC ID out of range\n");
3596 return -EINVAL; 4377 return -EINVAL;
@@ -3601,13 +4382,13 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
3601 return -EINVAL; 4382 return -EINVAL;
3602 } 4383 }
3603 4384
3604 if (dsi.vc[channel].dssdev != dssdev) { 4385 if (dsi->vc[channel].dssdev != dssdev) {
3605 DSSERR("Virtual Channel not allocated to display %s\n", 4386 DSSERR("Virtual Channel not allocated to display %s\n",
3606 dssdev->name); 4387 dssdev->name);
3607 return -EINVAL; 4388 return -EINVAL;
3608 } 4389 }
3609 4390
3610 dsi.vc[channel].vc_id = vc_id; 4391 dsi->vc[channel].vc_id = vc_id;
3611 4392
3612 return 0; 4393 return 0;
3613} 4394}
@@ -3615,143 +4396,172 @@ EXPORT_SYMBOL(omap_dsi_set_vc_id);
3615 4396
3616void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel) 4397void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
3617{ 4398{
4399 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4400 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4401
3618 if ((channel >= 0 && channel <= 3) && 4402 if ((channel >= 0 && channel <= 3) &&
3619 dsi.vc[channel].dssdev == dssdev) { 4403 dsi->vc[channel].dssdev == dssdev) {
3620 dsi.vc[channel].dssdev = NULL; 4404 dsi->vc[channel].dssdev = NULL;
3621 dsi.vc[channel].vc_id = 0; 4405 dsi->vc[channel].vc_id = 0;
3622 } 4406 }
3623} 4407}
3624EXPORT_SYMBOL(omap_dsi_release_vc); 4408EXPORT_SYMBOL(omap_dsi_release_vc);
3625 4409
3626void dsi_wait_pll_hsdiv_dispc_active(void) 4410void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
3627{ 4411{
3628 if (wait_for_bit_change(DSI_PLL_STATUS, 7, 1) != 1) 4412 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
3629 DSSERR("%s (%s) not active\n", 4413 DSSERR("%s (%s) not active\n",
3630 dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), 4414 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
3631 dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC)); 4415 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
3632} 4416}
3633 4417
3634void dsi_wait_pll_hsdiv_dsi_active(void) 4418void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
3635{ 4419{
3636 if (wait_for_bit_change(DSI_PLL_STATUS, 8, 1) != 1) 4420 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
3637 DSSERR("%s (%s) not active\n", 4421 DSSERR("%s (%s) not active\n",
3638 dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), 4422 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
3639 dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI)); 4423 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
3640} 4424}
3641 4425
3642static void dsi_calc_clock_param_ranges(void) 4426static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
3643{ 4427{
3644 dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN); 4428 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3645 dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM); 4429
3646 dsi.regm_dispc_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC); 4430 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
3647 dsi.regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI); 4431 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
3648 dsi.fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT); 4432 dsi->regm_dispc_max =
3649 dsi.fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT); 4433 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
3650 dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); 4434 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
4435 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
4436 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
4437 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
3651} 4438}
3652 4439
3653static int dsi_init(struct platform_device *pdev) 4440static int dsi_init(struct platform_device *dsidev)
3654{ 4441{
4442 struct omap_display_platform_data *dss_plat_data;
4443 struct omap_dss_board_info *board_info;
3655 u32 rev; 4444 u32 rev;
3656 int r, i; 4445 int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
3657 struct resource *dsi_mem; 4446 struct resource *dsi_mem;
4447 struct dsi_data *dsi;
4448
4449 dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
4450 if (!dsi) {
4451 r = -ENOMEM;
4452 goto err0;
4453 }
4454
4455 dsi->pdev = dsidev;
4456 dsi_pdev_map[dsi_module] = dsidev;
4457 dev_set_drvdata(&dsidev->dev, dsi);
4458
4459 dss_plat_data = dsidev->dev.platform_data;
4460 board_info = dss_plat_data->board_data;
4461 dsi->dsi_mux_pads = board_info->dsi_mux_pads;
3658 4462
3659 spin_lock_init(&dsi.irq_lock); 4463 spin_lock_init(&dsi->irq_lock);
3660 spin_lock_init(&dsi.errors_lock); 4464 spin_lock_init(&dsi->errors_lock);
3661 dsi.errors = 0; 4465 dsi->errors = 0;
3662 4466
3663#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 4467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
3664 spin_lock_init(&dsi.irq_stats_lock); 4468 spin_lock_init(&dsi->irq_stats_lock);
3665 dsi.irq_stats.last_reset = jiffies; 4469 dsi->irq_stats.last_reset = jiffies;
3666#endif 4470#endif
3667 4471
3668 mutex_init(&dsi.lock); 4472 mutex_init(&dsi->lock);
3669 sema_init(&dsi.bus_lock, 1); 4473 sema_init(&dsi->bus_lock, 1);
3670 4474
3671 dsi.workqueue = create_singlethread_workqueue("dsi"); 4475 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
3672 if (dsi.workqueue == NULL)
3673 return -ENOMEM;
3674
3675 INIT_DELAYED_WORK_DEFERRABLE(&dsi.framedone_timeout_work,
3676 dsi_framedone_timeout_work_callback); 4476 dsi_framedone_timeout_work_callback);
3677 4477
3678#ifdef DSI_CATCH_MISSING_TE 4478#ifdef DSI_CATCH_MISSING_TE
3679 init_timer(&dsi.te_timer); 4479 init_timer(&dsi->te_timer);
3680 dsi.te_timer.function = dsi_te_timeout; 4480 dsi->te_timer.function = dsi_te_timeout;
3681 dsi.te_timer.data = 0; 4481 dsi->te_timer.data = 0;
3682#endif 4482#endif
3683 dsi_mem = platform_get_resource(dsi.pdev, IORESOURCE_MEM, 0); 4483 dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
3684 if (!dsi_mem) { 4484 if (!dsi_mem) {
3685 DSSERR("can't get IORESOURCE_MEM DSI\n"); 4485 DSSERR("can't get IORESOURCE_MEM DSI\n");
3686 r = -EINVAL; 4486 r = -EINVAL;
3687 goto err1; 4487 goto err1;
3688 } 4488 }
3689 dsi.base = ioremap(dsi_mem->start, resource_size(dsi_mem)); 4489 dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
3690 if (!dsi.base) { 4490 if (!dsi->base) {
3691 DSSERR("can't ioremap DSI\n"); 4491 DSSERR("can't ioremap DSI\n");
3692 r = -ENOMEM; 4492 r = -ENOMEM;
3693 goto err1; 4493 goto err1;
3694 } 4494 }
3695 dsi.irq = platform_get_irq(dsi.pdev, 0); 4495 dsi->irq = platform_get_irq(dsi->pdev, 0);
3696 if (dsi.irq < 0) { 4496 if (dsi->irq < 0) {
3697 DSSERR("platform_get_irq failed\n"); 4497 DSSERR("platform_get_irq failed\n");
3698 r = -ENODEV; 4498 r = -ENODEV;
3699 goto err2; 4499 goto err2;
3700 } 4500 }
3701 4501
3702 r = request_irq(dsi.irq, omap_dsi_irq_handler, IRQF_SHARED, 4502 r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
3703 "OMAP DSI1", dsi.pdev); 4503 dev_name(&dsidev->dev), dsi->pdev);
3704 if (r < 0) { 4504 if (r < 0) {
3705 DSSERR("request_irq failed\n"); 4505 DSSERR("request_irq failed\n");
3706 goto err2; 4506 goto err2;
3707 } 4507 }
3708 4508
3709 /* DSI VCs initialization */ 4509 /* DSI VCs initialization */
3710 for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) { 4510 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
3711 dsi.vc[i].mode = DSI_VC_MODE_L4; 4511 dsi->vc[i].mode = DSI_VC_MODE_L4;
3712 dsi.vc[i].dssdev = NULL; 4512 dsi->vc[i].dssdev = NULL;
3713 dsi.vc[i].vc_id = 0; 4513 dsi->vc[i].vc_id = 0;
3714 } 4514 }
3715 4515
3716 dsi_calc_clock_param_ranges(); 4516 dsi_calc_clock_param_ranges(dsidev);
3717 4517
3718 enable_clocks(1); 4518 enable_clocks(1);
3719 4519
3720 rev = dsi_read_reg(DSI_REVISION); 4520 rev = dsi_read_reg(dsidev, DSI_REVISION);
3721 dev_dbg(&pdev->dev, "OMAP DSI rev %d.%d\n", 4521 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
3722 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); 4522 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3723 4523
4524 dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
4525
3724 enable_clocks(0); 4526 enable_clocks(0);
3725 4527
3726 return 0; 4528 return 0;
3727err2: 4529err2:
3728 iounmap(dsi.base); 4530 iounmap(dsi->base);
3729err1: 4531err1:
3730 destroy_workqueue(dsi.workqueue); 4532 kfree(dsi);
4533err0:
3731 return r; 4534 return r;
3732} 4535}
3733 4536
3734static void dsi_exit(void) 4537static void dsi_exit(struct platform_device *dsidev)
3735{ 4538{
3736 if (dsi.vdds_dsi_reg != NULL) { 4539 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3737 regulator_put(dsi.vdds_dsi_reg); 4540
3738 dsi.vdds_dsi_reg = NULL; 4541 if (dsi->vdds_dsi_reg != NULL) {
4542 if (dsi->vdds_dsi_enabled) {
4543 regulator_disable(dsi->vdds_dsi_reg);
4544 dsi->vdds_dsi_enabled = false;
4545 }
4546
4547 regulator_put(dsi->vdds_dsi_reg);
4548 dsi->vdds_dsi_reg = NULL;
3739 } 4549 }
3740 4550
3741 free_irq(dsi.irq, dsi.pdev); 4551 free_irq(dsi->irq, dsi->pdev);
3742 iounmap(dsi.base); 4552 iounmap(dsi->base);
3743 4553
3744 destroy_workqueue(dsi.workqueue); 4554 kfree(dsi);
3745 4555
3746 DSSDBG("omap_dsi_exit\n"); 4556 DSSDBG("omap_dsi_exit\n");
3747} 4557}
3748 4558
3749/* DSI1 HW IP initialisation */ 4559/* DSI1 HW IP initialisation */
3750static int omap_dsi1hw_probe(struct platform_device *pdev) 4560static int omap_dsi1hw_probe(struct platform_device *dsidev)
3751{ 4561{
3752 int r; 4562 int r;
3753 dsi.pdev = pdev; 4563
3754 r = dsi_init(pdev); 4564 r = dsi_init(dsidev);
3755 if (r) { 4565 if (r) {
3756 DSSERR("Failed to initialize DSI\n"); 4566 DSSERR("Failed to initialize DSI\n");
3757 goto err_dsi; 4567 goto err_dsi;
@@ -3760,9 +4570,12 @@ err_dsi:
3760 return r; 4570 return r;
3761} 4571}
3762 4572
3763static int omap_dsi1hw_remove(struct platform_device *pdev) 4573static int omap_dsi1hw_remove(struct platform_device *dsidev)
3764{ 4574{
3765 dsi_exit(); 4575 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4576
4577 dsi_exit(dsidev);
4578 WARN_ON(dsi->scp_clk_refcount > 0);
3766 return 0; 4579 return 0;
3767} 4580}
3768 4581
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 3f1fee63c678..d9489d5c4f08 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -29,7 +29,7 @@
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31 31
32#include <plat/display.h> 32#include <video/omapdss.h>
33#include <plat/clock.h> 33#include <plat/clock.h>
34#include "dss.h" 34#include "dss.h"
35#include "dss_features.h" 35#include "dss_features.h"
@@ -45,7 +45,6 @@ struct dss_reg {
45#define DSS_REVISION DSS_REG(0x0000) 45#define DSS_REVISION DSS_REG(0x0000)
46#define DSS_SYSCONFIG DSS_REG(0x0010) 46#define DSS_SYSCONFIG DSS_REG(0x0010)
47#define DSS_SYSSTATUS DSS_REG(0x0014) 47#define DSS_SYSSTATUS DSS_REG(0x0014)
48#define DSS_IRQSTATUS DSS_REG(0x0018)
49#define DSS_CONTROL DSS_REG(0x0040) 48#define DSS_CONTROL DSS_REG(0x0040)
50#define DSS_SDI_CONTROL DSS_REG(0x0044) 49#define DSS_SDI_CONTROL DSS_REG(0x0044)
51#define DSS_PLL_CONTROL DSS_REG(0x0048) 50#define DSS_PLL_CONTROL DSS_REG(0x0048)
@@ -75,17 +74,17 @@ static struct {
75 struct dss_clock_info cache_dss_cinfo; 74 struct dss_clock_info cache_dss_cinfo;
76 struct dispc_clock_info cache_dispc_cinfo; 75 struct dispc_clock_info cache_dispc_cinfo;
77 76
78 enum dss_clk_source dsi_clk_source; 77 enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI];
79 enum dss_clk_source dispc_clk_source; 78 enum omap_dss_clk_source dispc_clk_source;
80 enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; 79 enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
81 80
82 u32 ctx[DSS_SZ_REGS / sizeof(u32)]; 81 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
83} dss; 82} dss;
84 83
85static const char * const dss_generic_clk_source_names[] = { 84static const char * const dss_generic_clk_source_names[] = {
86 [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", 85 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
87 [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", 86 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
88 [DSS_CLK_SRC_FCK] = "DSS_FCK", 87 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
89}; 88};
90 89
91static void dss_clk_enable_all_no_ctx(void); 90static void dss_clk_enable_all_no_ctx(void);
@@ -230,7 +229,7 @@ void dss_sdi_disable(void)
230 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ 229 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
231} 230}
232 231
233const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src) 232const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
234{ 233{
235 return dss_generic_clk_source_names[clk_src]; 234 return dss_generic_clk_source_names[clk_src];
236} 235}
@@ -246,8 +245,8 @@ void dss_dump_clocks(struct seq_file *s)
246 245
247 seq_printf(s, "- DSS -\n"); 246 seq_printf(s, "- DSS -\n");
248 247
249 fclk_name = dss_get_generic_clk_source_name(DSS_CLK_SRC_FCK); 248 fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
250 fclk_real_name = dss_feat_get_clk_source_name(DSS_CLK_SRC_FCK); 249 fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
251 fclk_rate = dss_clk_get_rate(DSS_CLK_FCK); 250 fclk_rate = dss_clk_get_rate(DSS_CLK_FCK);
252 251
253 if (dss.dpll4_m4_ck) { 252 if (dss.dpll4_m4_ck) {
@@ -286,7 +285,6 @@ void dss_dump_regs(struct seq_file *s)
286 DUMPREG(DSS_REVISION); 285 DUMPREG(DSS_REVISION);
287 DUMPREG(DSS_SYSCONFIG); 286 DUMPREG(DSS_SYSCONFIG);
288 DUMPREG(DSS_SYSSTATUS); 287 DUMPREG(DSS_SYSSTATUS);
289 DUMPREG(DSS_IRQSTATUS);
290 DUMPREG(DSS_CONTROL); 288 DUMPREG(DSS_CONTROL);
291 289
292 if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & 290 if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -300,18 +298,25 @@ void dss_dump_regs(struct seq_file *s)
300#undef DUMPREG 298#undef DUMPREG
301} 299}
302 300
303void dss_select_dispc_clk_source(enum dss_clk_source clk_src) 301void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
304{ 302{
303 struct platform_device *dsidev;
305 int b; 304 int b;
306 u8 start, end; 305 u8 start, end;
307 306
308 switch (clk_src) { 307 switch (clk_src) {
309 case DSS_CLK_SRC_FCK: 308 case OMAP_DSS_CLK_SRC_FCK:
310 b = 0; 309 b = 0;
311 break; 310 break;
312 case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 311 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
313 b = 1; 312 b = 1;
314 dsi_wait_pll_hsdiv_dispc_active(); 313 dsidev = dsi_get_dsidev_from_id(0);
314 dsi_wait_pll_hsdiv_dispc_active(dsidev);
315 break;
316 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
317 b = 2;
318 dsidev = dsi_get_dsidev_from_id(1);
319 dsi_wait_pll_hsdiv_dispc_active(dsidev);
315 break; 320 break;
316 default: 321 default:
317 BUG(); 322 BUG();
@@ -324,17 +329,27 @@ void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
324 dss.dispc_clk_source = clk_src; 329 dss.dispc_clk_source = clk_src;
325} 330}
326 331
327void dss_select_dsi_clk_source(enum dss_clk_source clk_src) 332void dss_select_dsi_clk_source(int dsi_module,
333 enum omap_dss_clk_source clk_src)
328{ 334{
335 struct platform_device *dsidev;
329 int b; 336 int b;
330 337
331 switch (clk_src) { 338 switch (clk_src) {
332 case DSS_CLK_SRC_FCK: 339 case OMAP_DSS_CLK_SRC_FCK:
333 b = 0; 340 b = 0;
334 break; 341 break;
335 case DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: 342 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
343 BUG_ON(dsi_module != 0);
344 b = 1;
345 dsidev = dsi_get_dsidev_from_id(0);
346 dsi_wait_pll_hsdiv_dsi_active(dsidev);
347 break;
348 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI:
349 BUG_ON(dsi_module != 1);
336 b = 1; 350 b = 1;
337 dsi_wait_pll_hsdiv_dsi_active(); 351 dsidev = dsi_get_dsidev_from_id(1);
352 dsi_wait_pll_hsdiv_dsi_active(dsidev);
338 break; 353 break;
339 default: 354 default:
340 BUG(); 355 BUG();
@@ -342,25 +357,33 @@ void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
342 357
343 REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */ 358 REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
344 359
345 dss.dsi_clk_source = clk_src; 360 dss.dsi_clk_source[dsi_module] = clk_src;
346} 361}
347 362
348void dss_select_lcd_clk_source(enum omap_channel channel, 363void dss_select_lcd_clk_source(enum omap_channel channel,
349 enum dss_clk_source clk_src) 364 enum omap_dss_clk_source clk_src)
350{ 365{
366 struct platform_device *dsidev;
351 int b, ix, pos; 367 int b, ix, pos;
352 368
353 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) 369 if (!dss_has_feature(FEAT_LCD_CLK_SRC))
354 return; 370 return;
355 371
356 switch (clk_src) { 372 switch (clk_src) {
357 case DSS_CLK_SRC_FCK: 373 case OMAP_DSS_CLK_SRC_FCK:
358 b = 0; 374 b = 0;
359 break; 375 break;
360 case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 376 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
361 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); 377 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
362 b = 1; 378 b = 1;
363 dsi_wait_pll_hsdiv_dispc_active(); 379 dsidev = dsi_get_dsidev_from_id(0);
380 dsi_wait_pll_hsdiv_dispc_active(dsidev);
381 break;
382 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
383 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2);
384 b = 1;
385 dsidev = dsi_get_dsidev_from_id(1);
386 dsi_wait_pll_hsdiv_dispc_active(dsidev);
364 break; 387 break;
365 default: 388 default:
366 BUG(); 389 BUG();
@@ -373,20 +396,26 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
373 dss.lcd_clk_source[ix] = clk_src; 396 dss.lcd_clk_source[ix] = clk_src;
374} 397}
375 398
376enum dss_clk_source dss_get_dispc_clk_source(void) 399enum omap_dss_clk_source dss_get_dispc_clk_source(void)
377{ 400{
378 return dss.dispc_clk_source; 401 return dss.dispc_clk_source;
379} 402}
380 403
381enum dss_clk_source dss_get_dsi_clk_source(void) 404enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module)
382{ 405{
383 return dss.dsi_clk_source; 406 return dss.dsi_clk_source[dsi_module];
384} 407}
385 408
386enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) 409enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
387{ 410{
388 int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; 411 if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
389 return dss.lcd_clk_source[ix]; 412 int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
413 return dss.lcd_clk_source[ix];
414 } else {
415 /* LCD_CLK source is the same as DISPC_FCLK source for
416 * OMAP2 and OMAP3 */
417 return dss.dispc_clk_source;
418 }
390} 419}
391 420
392/* calculate clock rates using dividers in cinfo */ 421/* calculate clock rates using dividers in cinfo */
@@ -659,13 +688,18 @@ static int dss_init(void)
659 * the kernel resets it */ 688 * the kernel resets it */
660 omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440); 689 omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
661 690
691#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET
662 /* We need to wait here a bit, otherwise we sometimes start to 692 /* We need to wait here a bit, otherwise we sometimes start to
663 * get synclost errors, and after that only power cycle will 693 * get synclost errors, and after that only power cycle will
664 * restore DSS functionality. I have no idea why this happens. 694 * restore DSS functionality. I have no idea why this happens.
665 * And we have to wait _before_ resetting the DSS, but after 695 * And we have to wait _before_ resetting the DSS, but after
666 * enabling clocks. 696 * enabling clocks.
697 *
698 * This bug was at least present on OMAP3430. It's unknown
699 * if it happens on OMAP2 or OMAP3630.
667 */ 700 */
668 msleep(50); 701 msleep(50);
702#endif
669 703
670 _omap_dss_reset(); 704 _omap_dss_reset();
671 705
@@ -700,10 +734,11 @@ static int dss_init(void)
700 734
701 dss.dpll4_m4_ck = dpll4_m4_ck; 735 dss.dpll4_m4_ck = dpll4_m4_ck;
702 736
703 dss.dsi_clk_source = DSS_CLK_SRC_FCK; 737 dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
704 dss.dispc_clk_source = DSS_CLK_SRC_FCK; 738 dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
705 dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; 739 dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
706 dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; 740 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
741 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
707 742
708 dss_save_context(); 743 dss_save_context();
709 744
@@ -1015,6 +1050,14 @@ static void core_dump_clocks(struct seq_file *s)
1015 dss.dss_video_fck 1050 dss.dss_video_fck
1016 }; 1051 };
1017 1052
1053 const char *names[5] = {
1054 "ick",
1055 "fck",
1056 "sys_clk",
1057 "tv_fck",
1058 "video_fck"
1059 };
1060
1018 seq_printf(s, "- CORE -\n"); 1061 seq_printf(s, "- CORE -\n");
1019 1062
1020 seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled); 1063 seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
@@ -1022,8 +1065,11 @@ static void core_dump_clocks(struct seq_file *s)
1022 for (i = 0; i < 5; i++) { 1065 for (i = 0; i < 5; i++) {
1023 if (!clocks[i]) 1066 if (!clocks[i])
1024 continue; 1067 continue;
1025 seq_printf(s, "%-15s\t%lu\t%d\n", 1068 seq_printf(s, "%s (%s)%*s\t%lu\t%d\n",
1069 names[i],
1026 clocks[i]->name, 1070 clocks[i]->name,
1071 24 - strlen(names[i]) - strlen(clocks[i]->name),
1072 "",
1027 clk_get_rate(clocks[i]), 1073 clk_get_rate(clocks[i]),
1028 clocks[i]->usecount); 1074 clocks[i]->usecount);
1029 } 1075 }
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index c2f582bb19c0..8ab6d43329bb 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -117,15 +117,6 @@ enum dss_clock {
117 DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/ 117 DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
118}; 118};
119 119
120enum dss_clk_source {
121 DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
122 * OMAP4: PLL1_CLK1 */
123 DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
124 * OMAP4: PLL1_CLK2 */
125 DSS_CLK_SRC_FCK, /* OMAP2/3: DSS1_ALWON_FCLK
126 * OMAP4: DSS_FCLK */
127};
128
129enum dss_hdmi_venc_clk_source_select { 120enum dss_hdmi_venc_clk_source_select {
130 DSS_VENC_TV_CLK = 0, 121 DSS_VENC_TV_CLK = 0,
131 DSS_HDMI_M_PCLK = 1, 122 DSS_HDMI_M_PCLK = 1,
@@ -236,7 +227,7 @@ void dss_clk_enable(enum dss_clock clks);
236void dss_clk_disable(enum dss_clock clks); 227void dss_clk_disable(enum dss_clock clks);
237unsigned long dss_clk_get_rate(enum dss_clock clk); 228unsigned long dss_clk_get_rate(enum dss_clock clk);
238int dss_need_ctx_restore(void); 229int dss_need_ctx_restore(void);
239const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src); 230const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
240void dss_dump_clocks(struct seq_file *s); 231void dss_dump_clocks(struct seq_file *s);
241 232
242void dss_dump_regs(struct seq_file *s); 233void dss_dump_regs(struct seq_file *s);
@@ -248,13 +239,14 @@ void dss_sdi_init(u8 datapairs);
248int dss_sdi_enable(void); 239int dss_sdi_enable(void);
249void dss_sdi_disable(void); 240void dss_sdi_disable(void);
250 241
251void dss_select_dispc_clk_source(enum dss_clk_source clk_src); 242void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src);
252void dss_select_dsi_clk_source(enum dss_clk_source clk_src); 243void dss_select_dsi_clk_source(int dsi_module,
244 enum omap_dss_clk_source clk_src);
253void dss_select_lcd_clk_source(enum omap_channel channel, 245void dss_select_lcd_clk_source(enum omap_channel channel,
254 enum dss_clk_source clk_src); 246 enum omap_dss_clk_source clk_src);
255enum dss_clk_source dss_get_dispc_clk_source(void); 247enum omap_dss_clk_source dss_get_dispc_clk_source(void);
256enum dss_clk_source dss_get_dsi_clk_source(void); 248enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module);
257enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); 249enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
258 250
259void dss_set_venc_output(enum omap_dss_venc_type type); 251void dss_set_venc_output(enum omap_dss_venc_type type);
260void dss_set_dac_pwrdn_bgz(bool enable); 252void dss_set_dac_pwrdn_bgz(bool enable);
@@ -284,31 +276,39 @@ static inline void sdi_exit(void)
284 276
285/* DSI */ 277/* DSI */
286#ifdef CONFIG_OMAP2_DSS_DSI 278#ifdef CONFIG_OMAP2_DSS_DSI
279
280struct dentry;
281struct file_operations;
282
287int dsi_init_platform_driver(void); 283int dsi_init_platform_driver(void);
288void dsi_uninit_platform_driver(void); 284void dsi_uninit_platform_driver(void);
289 285
290void dsi_dump_clocks(struct seq_file *s); 286void dsi_dump_clocks(struct seq_file *s);
291void dsi_dump_irqs(struct seq_file *s); 287void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
292void dsi_dump_regs(struct seq_file *s); 288 const struct file_operations *debug_fops);
289void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
290 const struct file_operations *debug_fops);
293 291
294void dsi_save_context(void); 292void dsi_save_context(void);
295void dsi_restore_context(void); 293void dsi_restore_context(void);
296 294
297int dsi_init_display(struct omap_dss_device *display); 295int dsi_init_display(struct omap_dss_device *display);
298void dsi_irq_handler(void); 296void dsi_irq_handler(void);
299unsigned long dsi_get_pll_hsdiv_dispc_rate(void); 297unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
300int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo); 298int dsi_pll_set_clock_div(struct platform_device *dsidev,
301int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, 299 struct dsi_clock_info *cinfo);
302 struct dsi_clock_info *cinfo, 300int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
301 unsigned long req_pck, struct dsi_clock_info *cinfo,
303 struct dispc_clock_info *dispc_cinfo); 302 struct dispc_clock_info *dispc_cinfo);
304int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk, 303int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
305 bool enable_hsdiv); 304 bool enable_hsdiv);
306void dsi_pll_uninit(void); 305void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
307void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, 306void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
308 u32 fifo_size, enum omap_burst_size *burst_size, 307 u32 fifo_size, enum omap_burst_size *burst_size,
309 u32 *fifo_low, u32 *fifo_high); 308 u32 *fifo_low, u32 *fifo_high);
310void dsi_wait_pll_hsdiv_dispc_active(void); 309void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
311void dsi_wait_pll_hsdiv_dsi_active(void); 310void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
311struct platform_device *dsi_get_dsidev_from_id(int module);
312#else 312#else
313static inline int dsi_init_platform_driver(void) 313static inline int dsi_init_platform_driver(void)
314{ 314{
@@ -317,17 +317,47 @@ static inline int dsi_init_platform_driver(void)
317static inline void dsi_uninit_platform_driver(void) 317static inline void dsi_uninit_platform_driver(void)
318{ 318{
319} 319}
320static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(void) 320static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
321{ 321{
322 WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); 322 WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
323 return 0; 323 return 0;
324} 324}
325static inline void dsi_wait_pll_hsdiv_dispc_active(void) 325static inline int dsi_pll_set_clock_div(struct platform_device *dsidev,
326 struct dsi_clock_info *cinfo)
327{
328 WARN("%s: DSI not compiled in\n", __func__);
329 return -ENODEV;
330}
331static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
332 bool is_tft, unsigned long req_pck,
333 struct dsi_clock_info *dsi_cinfo,
334 struct dispc_clock_info *dispc_cinfo)
335{
336 WARN("%s: DSI not compiled in\n", __func__);
337 return -ENODEV;
338}
339static inline int dsi_pll_init(struct platform_device *dsidev,
340 bool enable_hsclk, bool enable_hsdiv)
326{ 341{
342 WARN("%s: DSI not compiled in\n", __func__);
343 return -ENODEV;
327} 344}
328static inline void dsi_wait_pll_hsdiv_dsi_active(void) 345static inline void dsi_pll_uninit(struct platform_device *dsidev,
346 bool disconnect_lanes)
329{ 347{
330} 348}
349static inline void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
350{
351}
352static inline void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
353{
354}
355static inline struct platform_device *dsi_get_dsidev_from_id(int module)
356{
357 WARN("%s: DSI not compiled in, returning platform device as NULL\n",
358 __func__);
359 return NULL;
360}
331#endif 361#endif
332 362
333/* DPI */ 363/* DPI */
@@ -391,7 +421,8 @@ int dispc_setup_plane(enum omap_plane plane,
391 enum omap_dss_rotation_type rotation_type, 421 enum omap_dss_rotation_type rotation_type,
392 u8 rotation, bool mirror, 422 u8 rotation, bool mirror,
393 u8 global_alpha, u8 pre_mult_alpha, 423 u8 global_alpha, u8 pre_mult_alpha,
394 enum omap_channel channel); 424 enum omap_channel channel,
425 u32 puv_addr);
395 426
396bool dispc_go_busy(enum omap_channel channel); 427bool dispc_go_busy(enum omap_channel channel);
397void dispc_go(enum omap_channel channel); 428void dispc_go(enum omap_channel channel);
@@ -485,13 +516,6 @@ void hdmi_panel_exit(void);
485int rfbi_init_platform_driver(void); 516int rfbi_init_platform_driver(void);
486void rfbi_uninit_platform_driver(void); 517void rfbi_uninit_platform_driver(void);
487void rfbi_dump_regs(struct seq_file *s); 518void rfbi_dump_regs(struct seq_file *s);
488
489int rfbi_configure(int rfbi_module, int bpp, int lines);
490void rfbi_enable_rfbi(bool enable);
491void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
492 u16 height, void (callback)(void *data), void *data);
493void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
494unsigned long rfbi_get_max_tx_rate(void);
495int rfbi_init_display(struct omap_dss_device *display); 519int rfbi_init_display(struct omap_dss_device *display);
496#else 520#else
497static inline int rfbi_init_platform_driver(void) 521static inline int rfbi_init_platform_driver(void)
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index aa1622241d0d..1c18888e5df3 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -22,7 +22,7 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include <plat/display.h> 25#include <video/omapdss.h>
26#include <plat/cpu.h> 26#include <plat/cpu.h>
27 27
28#include "dss.h" 28#include "dss.h"
@@ -52,7 +52,7 @@ struct omap_dss_features {
52}; 52};
53 53
54/* This struct is assigned to one of the below during initialization */ 54/* This struct is assigned to one of the below during initialization */
55static struct omap_dss_features *omap_current_dss_features; 55static const struct omap_dss_features *omap_current_dss_features;
56 56
57static const struct dss_reg_field omap2_dss_reg_fields[] = { 57static const struct dss_reg_field omap2_dss_reg_fields[] = {
58 [FEAT_REG_FIRHINC] = { 11, 0 }, 58 [FEAT_REG_FIRHINC] = { 11, 0 },
@@ -177,22 +177,55 @@ static const enum omap_color_mode omap3_dss_supported_color_modes[] = {
177 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, 177 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
178}; 178};
179 179
180static const enum omap_color_mode omap4_dss_supported_color_modes[] = {
181 /* OMAP_DSS_GFX */
182 OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
183 OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
184 OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
185 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
186 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
187 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32 |
188 OMAP_DSS_COLOR_ARGB16_1555,
189
190 /* OMAP_DSS_VIDEO1 */
191 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
192 OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
193 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
194 OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
195 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
196 OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
197 OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
198 OMAP_DSS_COLOR_RGBX32,
199
200 /* OMAP_DSS_VIDEO2 */
201 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
202 OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
203 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
204 OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
205 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
206 OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
207 OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
208 OMAP_DSS_COLOR_RGBX32,
209};
210
180static const char * const omap2_dss_clk_source_names[] = { 211static const char * const omap2_dss_clk_source_names[] = {
181 [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", 212 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
182 [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", 213 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
183 [DSS_CLK_SRC_FCK] = "DSS_FCLK1", 214 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1",
184}; 215};
185 216
186static const char * const omap3_dss_clk_source_names[] = { 217static const char * const omap3_dss_clk_source_names[] = {
187 [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", 218 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
188 [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", 219 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
189 [DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", 220 [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
190}; 221};
191 222
192static const char * const omap4_dss_clk_source_names[] = { 223static const char * const omap4_dss_clk_source_names[] = {
193 [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", 224 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
194 [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", 225 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
195 [DSS_CLK_SRC_FCK] = "DSS_FCLK", 226 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK",
227 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1",
228 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2",
196}; 229};
197 230
198static const struct dss_param_range omap2_dss_param_range[] = { 231static const struct dss_param_range omap2_dss_param_range[] = {
@@ -226,7 +259,7 @@ static const struct dss_param_range omap4_dss_param_range[] = {
226}; 259};
227 260
228/* OMAP2 DSS Features */ 261/* OMAP2 DSS Features */
229static struct omap_dss_features omap2_dss_features = { 262static const struct omap_dss_features omap2_dss_features = {
230 .reg_fields = omap2_dss_reg_fields, 263 .reg_fields = omap2_dss_reg_fields,
231 .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields), 264 .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields),
232 265
@@ -244,7 +277,7 @@ static struct omap_dss_features omap2_dss_features = {
244}; 277};
245 278
246/* OMAP3 DSS Features */ 279/* OMAP3 DSS Features */
247static struct omap_dss_features omap3430_dss_features = { 280static const struct omap_dss_features omap3430_dss_features = {
248 .reg_fields = omap3_dss_reg_fields, 281 .reg_fields = omap3_dss_reg_fields,
249 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), 282 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
250 283
@@ -252,7 +285,8 @@ static struct omap_dss_features omap3430_dss_features = {
252 FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL | 285 FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
253 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | 286 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
254 FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | 287 FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
255 FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF, 288 FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
289 FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC,
256 290
257 .num_mgrs = 2, 291 .num_mgrs = 2,
258 .num_ovls = 3, 292 .num_ovls = 3,
@@ -262,7 +296,7 @@ static struct omap_dss_features omap3430_dss_features = {
262 .dss_params = omap3_dss_param_range, 296 .dss_params = omap3_dss_param_range,
263}; 297};
264 298
265static struct omap_dss_features omap3630_dss_features = { 299static const struct omap_dss_features omap3630_dss_features = {
266 .reg_fields = omap3_dss_reg_fields, 300 .reg_fields = omap3_dss_reg_fields,
267 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), 301 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
268 302
@@ -271,7 +305,8 @@ static struct omap_dss_features omap3630_dss_features = {
271 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | 305 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
272 FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED | 306 FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
273 FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | 307 FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
274 FEAT_RESIZECONF, 308 FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG |
309 FEAT_DSI_PLL_FREQSEL,
275 310
276 .num_mgrs = 2, 311 .num_mgrs = 2,
277 .num_ovls = 3, 312 .num_ovls = 3,
@@ -282,19 +317,43 @@ static struct omap_dss_features omap3630_dss_features = {
282}; 317};
283 318
284/* OMAP4 DSS Features */ 319/* OMAP4 DSS Features */
285static struct omap_dss_features omap4_dss_features = { 320/* For OMAP4430 ES 1.0 revision */
321static const struct omap_dss_features omap4430_es1_0_dss_features = {
286 .reg_fields = omap4_dss_reg_fields, 322 .reg_fields = omap4_dss_reg_fields,
287 .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), 323 .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
288 324
289 .has_feature = 325 .has_feature =
290 FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA | 326 FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
291 FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | 327 FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
292 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC, 328 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
329 FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
330 FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
293 331
294 .num_mgrs = 3, 332 .num_mgrs = 3,
295 .num_ovls = 3, 333 .num_ovls = 3,
296 .supported_displays = omap4_dss_supported_displays, 334 .supported_displays = omap4_dss_supported_displays,
297 .supported_color_modes = omap3_dss_supported_color_modes, 335 .supported_color_modes = omap4_dss_supported_color_modes,
336 .clksrc_names = omap4_dss_clk_source_names,
337 .dss_params = omap4_dss_param_range,
338};
339
340/* For all the other OMAP4 versions */
341static const struct omap_dss_features omap4_dss_features = {
342 .reg_fields = omap4_dss_reg_fields,
343 .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
344
345 .has_feature =
346 FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
347 FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
348 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
349 FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
350 FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE |
351 FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
352
353 .num_mgrs = 3,
354 .num_ovls = 3,
355 .supported_displays = omap4_dss_supported_displays,
356 .supported_color_modes = omap4_dss_supported_color_modes,
298 .clksrc_names = omap4_dss_clk_source_names, 357 .clksrc_names = omap4_dss_clk_source_names,
299 .dss_params = omap4_dss_param_range, 358 .dss_params = omap4_dss_param_range,
300}; 359};
@@ -337,7 +396,7 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
337 color_mode; 396 color_mode;
338} 397}
339 398
340const char *dss_feat_get_clk_source_name(enum dss_clk_source id) 399const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
341{ 400{
342 return omap_current_dss_features->clksrc_names[id]; 401 return omap_current_dss_features->clksrc_names[id];
343} 402}
@@ -365,6 +424,10 @@ void dss_features_init(void)
365 omap_current_dss_features = &omap3630_dss_features; 424 omap_current_dss_features = &omap3630_dss_features;
366 else if (cpu_is_omap34xx()) 425 else if (cpu_is_omap34xx())
367 omap_current_dss_features = &omap3430_dss_features; 426 omap_current_dss_features = &omap3430_dss_features;
368 else 427 else if (omap_rev() == OMAP4430_REV_ES1_0)
428 omap_current_dss_features = &omap4430_es1_0_dss_features;
429 else if (cpu_is_omap44xx())
369 omap_current_dss_features = &omap4_dss_features; 430 omap_current_dss_features = &omap4_dss_features;
431 else
432 DSSWARN("Unsupported OMAP version");
370} 433}
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 12e9c4ef0dec..07b346f7d916 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -23,23 +23,34 @@
23#define MAX_DSS_MANAGERS 3 23#define MAX_DSS_MANAGERS 3
24#define MAX_DSS_OVERLAYS 3 24#define MAX_DSS_OVERLAYS 3
25#define MAX_DSS_LCD_MANAGERS 2 25#define MAX_DSS_LCD_MANAGERS 2
26#define MAX_NUM_DSI 2
26 27
27/* DSS has feature id */ 28/* DSS has feature id */
28enum dss_feat_id { 29enum dss_feat_id {
29 FEAT_GLOBAL_ALPHA = 1 << 0, 30 FEAT_GLOBAL_ALPHA = 1 << 0,
30 FEAT_GLOBAL_ALPHA_VID1 = 1 << 1, 31 FEAT_GLOBAL_ALPHA_VID1 = 1 << 1,
31 FEAT_PRE_MULT_ALPHA = 1 << 2, 32 FEAT_PRE_MULT_ALPHA = 1 << 2,
32 FEAT_LCDENABLEPOL = 1 << 3, 33 FEAT_LCDENABLEPOL = 1 << 3,
33 FEAT_LCDENABLESIGNAL = 1 << 4, 34 FEAT_LCDENABLESIGNAL = 1 << 4,
34 FEAT_PCKFREEENABLE = 1 << 5, 35 FEAT_PCKFREEENABLE = 1 << 5,
35 FEAT_FUNCGATED = 1 << 6, 36 FEAT_FUNCGATED = 1 << 6,
36 FEAT_MGR_LCD2 = 1 << 7, 37 FEAT_MGR_LCD2 = 1 << 7,
37 FEAT_LINEBUFFERSPLIT = 1 << 8, 38 FEAT_LINEBUFFERSPLIT = 1 << 8,
38 FEAT_ROWREPEATENABLE = 1 << 9, 39 FEAT_ROWREPEATENABLE = 1 << 9,
39 FEAT_RESIZECONF = 1 << 10, 40 FEAT_RESIZECONF = 1 << 10,
40 /* Independent core clk divider */ 41 /* Independent core clk divider */
41 FEAT_CORE_CLK_DIV = 1 << 11, 42 FEAT_CORE_CLK_DIV = 1 << 11,
42 FEAT_LCD_CLK_SRC = 1 << 12, 43 FEAT_LCD_CLK_SRC = 1 << 12,
44 /* DSI-PLL power command 0x3 is not working */
45 FEAT_DSI_PLL_PWR_BUG = 1 << 13,
46 FEAT_DSI_PLL_FREQSEL = 1 << 14,
47 FEAT_DSI_DCS_CMD_CONFIG_VC = 1 << 15,
48 FEAT_DSI_VC_OCP_WIDTH = 1 << 16,
49 FEAT_DSI_REVERSE_TXCLKESC = 1 << 17,
50 FEAT_DSI_GNQ = 1 << 18,
51 FEAT_HDMI_CTS_SWMODE = 1 << 19,
52 FEAT_HANDLE_UV_SEPARATE = 1 << 20,
53 FEAT_ATTR2 = 1 << 21,
43}; 54};
44 55
45/* DSS register field id */ 56/* DSS register field id */
@@ -77,7 +88,7 @@ enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel
77enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); 88enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
78bool dss_feat_color_mode_supported(enum omap_plane plane, 89bool dss_feat_color_mode_supported(enum omap_plane plane,
79 enum omap_color_mode color_mode); 90 enum omap_color_mode color_mode);
80const char *dss_feat_get_clk_source_name(enum dss_clk_source id); 91const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
81 92
82bool dss_has_feature(enum dss_feat_id id); 93bool dss_has_feature(enum dss_feat_id id);
83void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); 94void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index a981def8099a..b0555f4f0a78 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -29,10 +29,16 @@
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <plat/display.h> 32#include <video/omapdss.h>
33#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
34 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
35#include <sound/soc.h>
36#include <sound/pcm_params.h>
37#endif
33 38
34#include "dss.h" 39#include "dss.h"
35#include "hdmi.h" 40#include "hdmi.h"
41#include "dss_features.h"
36 42
37static struct { 43static struct {
38 struct mutex lock; 44 struct mutex lock;
@@ -1052,25 +1058,26 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
1052 cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol; 1058 cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol;
1053} 1059}
1054 1060
1055static void hdmi_compute_pll(unsigned long clkin, int phy, 1061static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
1056 int n, struct hdmi_pll_info *pi) 1062 struct hdmi_pll_info *pi)
1057{ 1063{
1058 unsigned long refclk; 1064 unsigned long clkin, refclk;
1059 u32 mf; 1065 u32 mf;
1060 1066
1067 clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000;
1061 /* 1068 /*
1062 * Input clock is predivided by N + 1 1069 * Input clock is predivided by N + 1
1063 * out put of which is reference clk 1070 * out put of which is reference clk
1064 */ 1071 */
1065 refclk = clkin / (n + 1); 1072 pi->regn = dssdev->clocks.hdmi.regn;
1066 pi->regn = n; 1073 refclk = clkin / (pi->regn + 1);
1067 1074
1068 /* 1075 /*
1069 * multiplier is pixel_clk/ref_clk 1076 * multiplier is pixel_clk/ref_clk
1070 * Multiplying by 100 to avoid fractional part removal 1077 * Multiplying by 100 to avoid fractional part removal
1071 */ 1078 */
1072 pi->regm = (phy * 100/(refclk))/100; 1079 pi->regm = (phy * 100 / (refclk)) / 100;
1073 pi->regm2 = 1; 1080 pi->regm2 = dssdev->clocks.hdmi.regm2;
1074 1081
1075 /* 1082 /*
1076 * fractional multiplier is remainder of the difference between 1083 * fractional multiplier is remainder of the difference between
@@ -1078,14 +1085,14 @@ static void hdmi_compute_pll(unsigned long clkin, int phy,
1078 * multiplied by 2^18(262144) divided by the reference clock 1085 * multiplied by 2^18(262144) divided by the reference clock
1079 */ 1086 */
1080 mf = (phy - pi->regm * refclk) * 262144; 1087 mf = (phy - pi->regm * refclk) * 262144;
1081 pi->regmf = mf/(refclk); 1088 pi->regmf = mf / (refclk);
1082 1089
1083 /* 1090 /*
1084 * Dcofreq should be set to 1 if required pixel clock 1091 * Dcofreq should be set to 1 if required pixel clock
1085 * is greater than 1000MHz 1092 * is greater than 1000MHz
1086 */ 1093 */
1087 pi->dcofreq = phy > 1000 * 100; 1094 pi->dcofreq = phy > 1000 * 100;
1088 pi->regsd = ((pi->regm * clkin / 10) / ((n + 1) * 250) + 5) / 10; 1095 pi->regsd = ((pi->regm * clkin / 10) / ((pi->regn + 1) * 250) + 5) / 10;
1089 1096
1090 DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf); 1097 DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
1091 DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd); 1098 DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
@@ -1106,7 +1113,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
1106 int r, code = 0; 1113 int r, code = 0;
1107 struct hdmi_pll_info pll_data; 1114 struct hdmi_pll_info pll_data;
1108 struct omap_video_timings *p; 1115 struct omap_video_timings *p;
1109 int clkin, n, phy; 1116 unsigned long phy;
1110 1117
1111 hdmi_enable_clocks(1); 1118 hdmi_enable_clocks(1);
1112 1119
@@ -1126,11 +1133,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
1126 dssdev->panel.timings = cea_vesa_timings[code].timings; 1133 dssdev->panel.timings = cea_vesa_timings[code].timings;
1127 update_hdmi_timings(&hdmi.cfg, p, code); 1134 update_hdmi_timings(&hdmi.cfg, p, code);
1128 1135
1129 clkin = 3840; /* 38.4 MHz */
1130 n = 15; /* this is a constant for our math */
1131 phy = p->pixel_clock; 1136 phy = p->pixel_clock;
1132 1137
1133 hdmi_compute_pll(clkin, phy, n, &pll_data); 1138 hdmi_compute_pll(dssdev, phy, &pll_data);
1134 1139
1135 hdmi_wp_video_start(0); 1140 hdmi_wp_video_start(0);
1136 1141
@@ -1160,7 +1165,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
1160 * dynamically by user. This can be moved to single location , say 1165 * dynamically by user. This can be moved to single location , say
1161 * Boardfile. 1166 * Boardfile.
1162 */ 1167 */
1163 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); 1168 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
1164 1169
1165 /* bypass TV gamma table */ 1170 /* bypass TV gamma table */
1166 dispc_enable_gamma_table(0); 1171 dispc_enable_gamma_table(0);
@@ -1275,10 +1280,420 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
1275 mutex_unlock(&hdmi.lock); 1280 mutex_unlock(&hdmi.lock);
1276} 1281}
1277 1282
1283#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1284 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1285static void hdmi_wp_audio_config_format(
1286 struct hdmi_audio_format *aud_fmt)
1287{
1288 u32 r;
1289
1290 DSSDBG("Enter hdmi_wp_audio_config_format\n");
1291
1292 r = hdmi_read_reg(HDMI_WP_AUDIO_CFG);
1293 r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
1294 r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
1295 r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
1296 r = FLD_MOD(r, aud_fmt->type, 4, 4);
1297 r = FLD_MOD(r, aud_fmt->justification, 3, 3);
1298 r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
1299 r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
1300 r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
1301 hdmi_write_reg(HDMI_WP_AUDIO_CFG, r);
1302}
1303
1304static void hdmi_wp_audio_config_dma(struct hdmi_audio_dma *aud_dma)
1305{
1306 u32 r;
1307
1308 DSSDBG("Enter hdmi_wp_audio_config_dma\n");
1309
1310 r = hdmi_read_reg(HDMI_WP_AUDIO_CFG2);
1311 r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
1312 r = FLD_MOD(r, aud_dma->block_size, 7, 0);
1313 hdmi_write_reg(HDMI_WP_AUDIO_CFG2, r);
1314
1315 r = hdmi_read_reg(HDMI_WP_AUDIO_CTRL);
1316 r = FLD_MOD(r, aud_dma->mode, 9, 9);
1317 r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
1318 hdmi_write_reg(HDMI_WP_AUDIO_CTRL, r);
1319}
1320
1321static void hdmi_core_audio_config(struct hdmi_core_audio_config *cfg)
1322{
1323 u32 r;
1324
1325 /* audio clock recovery parameters */
1326 r = hdmi_read_reg(HDMI_CORE_AV_ACR_CTRL);
1327 r = FLD_MOD(r, cfg->use_mclk, 2, 2);
1328 r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
1329 r = FLD_MOD(r, cfg->cts_mode, 0, 0);
1330 hdmi_write_reg(HDMI_CORE_AV_ACR_CTRL, r);
1331
1332 REG_FLD_MOD(HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
1333 REG_FLD_MOD(HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
1334 REG_FLD_MOD(HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
1335
1336 if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
1337 REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
1338 REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
1339 REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
1340 } else {
1341 /*
1342 * HDMI IP uses this configuration to divide the MCLK to
1343 * update CTS value.
1344 */
1345 REG_FLD_MOD(HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
1346
1347 /* Configure clock for audio packets */
1348 REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
1349 cfg->aud_par_busclk, 7, 0);
1350 REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
1351 (cfg->aud_par_busclk >> 8), 7, 0);
1352 REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
1353 (cfg->aud_par_busclk >> 16), 7, 0);
1354 }
1355
1356 /* Override of SPDIF sample frequency with value in I2S_CHST4 */
1357 REG_FLD_MOD(HDMI_CORE_AV_SPDIF_CTRL, cfg->fs_override, 1, 1);
1358
1359 /* I2S parameters */
1360 REG_FLD_MOD(HDMI_CORE_AV_I2S_CHST4, cfg->freq_sample, 3, 0);
1361
1362 r = hdmi_read_reg(HDMI_CORE_AV_I2S_IN_CTRL);
1363 r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
1364 r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
1365 r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
1366 r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
1367 r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
1368 r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
1369 r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
1370 r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
1371 hdmi_write_reg(HDMI_CORE_AV_I2S_IN_CTRL, r);
1372
1373 r = hdmi_read_reg(HDMI_CORE_AV_I2S_CHST5);
1374 r = FLD_MOD(r, cfg->freq_sample, 7, 4);
1375 r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
1376 r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
1377 hdmi_write_reg(HDMI_CORE_AV_I2S_CHST5, r);
1378
1379 REG_FLD_MOD(HDMI_CORE_AV_I2S_IN_LEN, cfg->i2s_cfg.in_length_bits, 3, 0);
1380
1381 /* Audio channels and mode parameters */
1382 REG_FLD_MOD(HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1);
1383 r = hdmi_read_reg(HDMI_CORE_AV_AUD_MODE);
1384 r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
1385 r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
1386 r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
1387 r = FLD_MOD(r, cfg->en_spdif, 1, 1);
1388 hdmi_write_reg(HDMI_CORE_AV_AUD_MODE, r);
1389}
1390
1391static void hdmi_core_audio_infoframe_config(
1392 struct hdmi_core_infoframe_audio *info_aud)
1393{
1394 u8 val;
1395 u8 sum = 0, checksum = 0;
1396
1397 /*
1398 * Set audio info frame type, version and length as
1399 * described in HDMI 1.4a Section 8.2.2 specification.
1400 * Checksum calculation is defined in Section 5.3.5.
1401 */
1402 hdmi_write_reg(HDMI_CORE_AV_AUDIO_TYPE, 0x84);
1403 hdmi_write_reg(HDMI_CORE_AV_AUDIO_VERS, 0x01);
1404 hdmi_write_reg(HDMI_CORE_AV_AUDIO_LEN, 0x0a);
1405 sum += 0x84 + 0x001 + 0x00a;
1406
1407 val = (info_aud->db1_coding_type << 4)
1408 | (info_aud->db1_channel_count - 1);
1409 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(0), val);
1410 sum += val;
1411
1412 val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
1413 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(1), val);
1414 sum += val;
1415
1416 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
1417
1418 val = info_aud->db4_channel_alloc;
1419 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(3), val);
1420 sum += val;
1421
1422 val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
1423 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(4), val);
1424 sum += val;
1425
1426 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
1427 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
1428 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
1429 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
1430 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
1431
1432 checksum = 0x100 - sum;
1433 hdmi_write_reg(HDMI_CORE_AV_AUDIO_CHSUM, checksum);
1434
1435 /*
1436 * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
1437 * is available.
1438 */
1439}
1440
1441static int hdmi_config_audio_acr(u32 sample_freq, u32 *n, u32 *cts)
1442{
1443 u32 r;
1444 u32 deep_color = 0;
1445 u32 pclk = hdmi.cfg.timings.timings.pixel_clock;
1446
1447 if (n == NULL || cts == NULL)
1448 return -EINVAL;
1449 /*
1450 * Obtain current deep color configuration. This needed
1451 * to calculate the TMDS clock based on the pixel clock.
1452 */
1453 r = REG_GET(HDMI_WP_VIDEO_CFG, 1, 0);
1454 switch (r) {
1455 case 1: /* No deep color selected */
1456 deep_color = 100;
1457 break;
1458 case 2: /* 10-bit deep color selected */
1459 deep_color = 125;
1460 break;
1461 case 3: /* 12-bit deep color selected */
1462 deep_color = 150;
1463 break;
1464 default:
1465 return -EINVAL;
1466 }
1467
1468 switch (sample_freq) {
1469 case 32000:
1470 if ((deep_color == 125) && ((pclk == 54054)
1471 || (pclk == 74250)))
1472 *n = 8192;
1473 else
1474 *n = 4096;
1475 break;
1476 case 44100:
1477 *n = 6272;
1478 break;
1479 case 48000:
1480 if ((deep_color == 125) && ((pclk == 54054)
1481 || (pclk == 74250)))
1482 *n = 8192;
1483 else
1484 *n = 6144;
1485 break;
1486 default:
1487 *n = 0;
1488 return -EINVAL;
1489 }
1490
1491 /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
1492 *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
1493
1494 return 0;
1495}
1496
1497static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
1498 struct snd_pcm_hw_params *params,
1499 struct snd_soc_dai *dai)
1500{
1501 struct hdmi_audio_format audio_format;
1502 struct hdmi_audio_dma audio_dma;
1503 struct hdmi_core_audio_config core_cfg;
1504 struct hdmi_core_infoframe_audio aud_if_cfg;
1505 int err, n, cts;
1506 enum hdmi_core_audio_sample_freq sample_freq;
1507
1508 switch (params_format(params)) {
1509 case SNDRV_PCM_FORMAT_S16_LE:
1510 core_cfg.i2s_cfg.word_max_length =
1511 HDMI_AUDIO_I2S_MAX_WORD_20BITS;
1512 core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
1513 core_cfg.i2s_cfg.in_length_bits =
1514 HDMI_AUDIO_I2S_INPUT_LENGTH_16;
1515 core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
1516 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
1517 audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
1518 audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
1519 audio_dma.transfer_size = 0x10;
1520 break;
1521 case SNDRV_PCM_FORMAT_S24_LE:
1522 core_cfg.i2s_cfg.word_max_length =
1523 HDMI_AUDIO_I2S_MAX_WORD_24BITS;
1524 core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
1525 core_cfg.i2s_cfg.in_length_bits =
1526 HDMI_AUDIO_I2S_INPUT_LENGTH_24;
1527 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
1528 audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
1529 audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
1530 core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
1531 audio_dma.transfer_size = 0x20;
1532 break;
1533 default:
1534 return -EINVAL;
1535 }
1536
1537 switch (params_rate(params)) {
1538 case 32000:
1539 sample_freq = HDMI_AUDIO_FS_32000;
1540 break;
1541 case 44100:
1542 sample_freq = HDMI_AUDIO_FS_44100;
1543 break;
1544 case 48000:
1545 sample_freq = HDMI_AUDIO_FS_48000;
1546 break;
1547 default:
1548 return -EINVAL;
1549 }
1550
1551 err = hdmi_config_audio_acr(params_rate(params), &n, &cts);
1552 if (err < 0)
1553 return err;
1554
1555 /* Audio wrapper config */
1556 audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
1557 audio_format.active_chnnls_msk = 0x03;
1558 audio_format.type = HDMI_AUDIO_TYPE_LPCM;
1559 audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
1560 /* Disable start/stop signals of IEC 60958 blocks */
1561 audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
1562
1563 audio_dma.block_size = 0xC0;
1564 audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
1565 audio_dma.fifo_threshold = 0x20; /* in number of samples */
1566
1567 hdmi_wp_audio_config_dma(&audio_dma);
1568 hdmi_wp_audio_config_format(&audio_format);
1569
1570 /*
1571 * I2S config
1572 */
1573 core_cfg.i2s_cfg.en_high_bitrate_aud = false;
1574 /* Only used with high bitrate audio */
1575 core_cfg.i2s_cfg.cbit_order = false;
1576 /* Serial data and word select should change on sck rising edge */
1577 core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
1578 core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
1579 /* Set I2S word select polarity */
1580 core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
1581 core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
1582 /* Set serial data to word select shift. See Phillips spec. */
1583 core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
1584 /* Enable one of the four available serial data channels */
1585 core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
1586
1587 /* Core audio config */
1588 core_cfg.freq_sample = sample_freq;
1589 core_cfg.n = n;
1590 core_cfg.cts = cts;
1591 if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
1592 core_cfg.aud_par_busclk = 0;
1593 core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
1594 core_cfg.use_mclk = false;
1595 } else {
1596 core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
1597 core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
1598 core_cfg.use_mclk = true;
1599 core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS;
1600 }
1601 core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
1602 core_cfg.en_spdif = false;
1603 /* Use sample frequency from channel status word */
1604 core_cfg.fs_override = true;
1605 /* Enable ACR packets */
1606 core_cfg.en_acr_pkt = true;
1607 /* Disable direct streaming digital audio */
1608 core_cfg.en_dsd_audio = false;
1609 /* Use parallel audio interface */
1610 core_cfg.en_parallel_aud_input = true;
1611
1612 hdmi_core_audio_config(&core_cfg);
1613
1614 /*
1615 * Configure packet
1616 * info frame audio see doc CEA861-D page 74
1617 */
1618 aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
1619 aud_if_cfg.db1_channel_count = 2;
1620 aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
1621 aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
1622 aud_if_cfg.db4_channel_alloc = 0x00;
1623 aud_if_cfg.db5_downmix_inh = false;
1624 aud_if_cfg.db5_lsv = 0;
1625
1626 hdmi_core_audio_infoframe_config(&aud_if_cfg);
1627 return 0;
1628}
1629
1630static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
1631 struct snd_soc_dai *dai)
1632{
1633 int err = 0;
1634 switch (cmd) {
1635 case SNDRV_PCM_TRIGGER_START:
1636 case SNDRV_PCM_TRIGGER_RESUME:
1637 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1638 REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 1, 0, 0);
1639 REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 31, 31);
1640 REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 30, 30);
1641 break;
1642
1643 case SNDRV_PCM_TRIGGER_STOP:
1644 case SNDRV_PCM_TRIGGER_SUSPEND:
1645 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1646 REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 0, 0, 0);
1647 REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 30, 30);
1648 REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 31, 31);
1649 break;
1650 default:
1651 err = -EINVAL;
1652 }
1653 return err;
1654}
1655
1656static int hdmi_audio_startup(struct snd_pcm_substream *substream,
1657 struct snd_soc_dai *dai)
1658{
1659 if (!hdmi.mode) {
1660 pr_err("Current video settings do not support audio.\n");
1661 return -EIO;
1662 }
1663 return 0;
1664}
1665
1666static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
1667};
1668
1669static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
1670 .hw_params = hdmi_audio_hw_params,
1671 .trigger = hdmi_audio_trigger,
1672 .startup = hdmi_audio_startup,
1673};
1674
1675static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
1676 .name = "hdmi-audio-codec",
1677 .playback = {
1678 .channels_min = 2,
1679 .channels_max = 2,
1680 .rates = SNDRV_PCM_RATE_32000 |
1681 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
1682 .formats = SNDRV_PCM_FMTBIT_S16_LE |
1683 SNDRV_PCM_FMTBIT_S24_LE,
1684 },
1685 .ops = &hdmi_audio_codec_ops,
1686};
1687#endif
1688
1278/* HDMI HW IP initialisation */ 1689/* HDMI HW IP initialisation */
1279static int omapdss_hdmihw_probe(struct platform_device *pdev) 1690static int omapdss_hdmihw_probe(struct platform_device *pdev)
1280{ 1691{
1281 struct resource *hdmi_mem; 1692 struct resource *hdmi_mem;
1693#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1694 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1695 int ret;
1696#endif
1282 1697
1283 hdmi.pdata = pdev->dev.platform_data; 1698 hdmi.pdata = pdev->dev.platform_data;
1284 hdmi.pdev = pdev; 1699 hdmi.pdev = pdev;
@@ -1300,6 +1715,17 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
1300 1715
1301 hdmi_panel_init(); 1716 hdmi_panel_init();
1302 1717
1718#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1719 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1720
1721 /* Register ASoC codec DAI */
1722 ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
1723 &hdmi_codec_dai_drv, 1);
1724 if (ret) {
1725 DSSERR("can't register ASoC HDMI audio codec\n");
1726 return ret;
1727 }
1728#endif
1303 return 0; 1729 return 0;
1304} 1730}
1305 1731
@@ -1307,6 +1733,11 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev)
1307{ 1733{
1308 hdmi_panel_exit(); 1734 hdmi_panel_exit();
1309 1735
1736#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1737 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1738 snd_soc_unregister_codec(&pdev->dev);
1739#endif
1740
1310 iounmap(hdmi.base_wp); 1741 iounmap(hdmi.base_wp);
1311 1742
1312 return 0; 1743 return 0;
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
index 9887ab96da3c..c885f9cb0659 100644
--- a/drivers/video/omap2/dss/hdmi.h
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -22,7 +22,7 @@
22#define _OMAP4_DSS_HDMI_H_ 22#define _OMAP4_DSS_HDMI_H_
23 23
24#include <linux/string.h> 24#include <linux/string.h>
25#include <plat/display.h> 25#include <video/omapdss.h>
26 26
27#define HDMI_WP 0x0 27#define HDMI_WP 0x0
28#define HDMI_CORE_SYS 0x400 28#define HDMI_CORE_SYS 0x400
@@ -48,6 +48,10 @@ struct hdmi_reg { u16 idx; };
48#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68) 48#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68)
49#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C) 49#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C)
50#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70) 50#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70)
51#define HDMI_WP_AUDIO_CFG HDMI_WP_REG(0x80)
52#define HDMI_WP_AUDIO_CFG2 HDMI_WP_REG(0x84)
53#define HDMI_WP_AUDIO_CTRL HDMI_WP_REG(0x88)
54#define HDMI_WP_AUDIO_DATA HDMI_WP_REG(0x8C)
51 55
52/* HDMI IP Core System */ 56/* HDMI IP Core System */
53#define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx) 57#define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx)
@@ -105,6 +109,8 @@ struct hdmi_reg { u16 idx; };
105#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15) 109#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15)
106#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190) 110#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190)
107#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27) 111#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
112#define HDMI_CORE_AV_AUD_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x210)
113#define HDMI_CORE_AV_AUD_DBYTE_NELEMS HDMI_CORE_AV_REG(10)
108#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290) 114#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290)
109#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27) 115#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
110#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300) 116#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300)
@@ -153,6 +159,10 @@ struct hdmi_reg { u16 idx; };
153#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184) 159#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184)
154#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188) 160#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188)
155#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C) 161#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C)
162#define HDMI_CORE_AV_AUDIO_TYPE HDMI_CORE_AV_REG(0x200)
163#define HDMI_CORE_AV_AUDIO_VERS HDMI_CORE_AV_REG(0x204)
164#define HDMI_CORE_AV_AUDIO_LEN HDMI_CORE_AV_REG(0x208)
165#define HDMI_CORE_AV_AUDIO_CHSUM HDMI_CORE_AV_REG(0x20C)
156#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280) 166#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280)
157#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284) 167#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284)
158#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288) 168#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288)
@@ -272,7 +282,7 @@ enum hdmi_core_packet_ctrl {
272 HDMI_PACKETREPEATOFF = 0 282 HDMI_PACKETREPEATOFF = 0
273}; 283};
274 284
275/* INFOFRAME_AVI_ definitions */ 285/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
276enum hdmi_core_infoframe { 286enum hdmi_core_infoframe {
277 HDMI_INFOFRAME_AVI_DB1Y_RGB = 0, 287 HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
278 HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1, 288 HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
@@ -317,7 +327,36 @@ enum hdmi_core_infoframe {
317 HDMI_INFOFRAME_AVI_DB5PR_7 = 6, 327 HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
318 HDMI_INFOFRAME_AVI_DB5PR_8 = 7, 328 HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
319 HDMI_INFOFRAME_AVI_DB5PR_9 = 8, 329 HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
320 HDMI_INFOFRAME_AVI_DB5PR_10 = 9 330 HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
331 HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
332 HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
333 HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
334 HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
335 HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
336 HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
337 HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
338 HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
339 HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
340 HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
341 HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
342 HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
343 HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
344 HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
345 HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
346 HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
347 HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
348 HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
349 HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
350 HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
351 HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
352 HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
353 HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
354 HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
355 HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
356 HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
357 HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
358 HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
359 HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
321}; 360};
322 361
323enum hdmi_packing_mode { 362enum hdmi_packing_mode {
@@ -327,6 +366,121 @@ enum hdmi_packing_mode {
327 HDMI_PACK_ALREADYPACKED = 7 366 HDMI_PACK_ALREADYPACKED = 7
328}; 367};
329 368
369enum hdmi_core_audio_sample_freq {
370 HDMI_AUDIO_FS_32000 = 0x3,
371 HDMI_AUDIO_FS_44100 = 0x0,
372 HDMI_AUDIO_FS_48000 = 0x2,
373 HDMI_AUDIO_FS_88200 = 0x8,
374 HDMI_AUDIO_FS_96000 = 0xA,
375 HDMI_AUDIO_FS_176400 = 0xC,
376 HDMI_AUDIO_FS_192000 = 0xE,
377 HDMI_AUDIO_FS_NOT_INDICATED = 0x1
378};
379
380enum hdmi_core_audio_layout {
381 HDMI_AUDIO_LAYOUT_2CH = 0,
382 HDMI_AUDIO_LAYOUT_8CH = 1
383};
384
385enum hdmi_core_cts_mode {
386 HDMI_AUDIO_CTS_MODE_HW = 0,
387 HDMI_AUDIO_CTS_MODE_SW = 1
388};
389
390enum hdmi_stereo_channels {
391 HDMI_AUDIO_STEREO_NOCHANNELS = 0,
392 HDMI_AUDIO_STEREO_ONECHANNEL = 1,
393 HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
394 HDMI_AUDIO_STEREO_THREECHANNELS = 3,
395 HDMI_AUDIO_STEREO_FOURCHANNELS = 4
396};
397
398enum hdmi_audio_type {
399 HDMI_AUDIO_TYPE_LPCM = 0,
400 HDMI_AUDIO_TYPE_IEC = 1
401};
402
403enum hdmi_audio_justify {
404 HDMI_AUDIO_JUSTIFY_LEFT = 0,
405 HDMI_AUDIO_JUSTIFY_RIGHT = 1
406};
407
408enum hdmi_audio_sample_order {
409 HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
410 HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
411};
412
413enum hdmi_audio_samples_perword {
414 HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
415 HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
416};
417
418enum hdmi_audio_sample_size {
419 HDMI_AUDIO_SAMPLE_16BITS = 0,
420 HDMI_AUDIO_SAMPLE_24BITS = 1
421};
422
423enum hdmi_audio_transf_mode {
424 HDMI_AUDIO_TRANSF_DMA = 0,
425 HDMI_AUDIO_TRANSF_IRQ = 1
426};
427
428enum hdmi_audio_blk_strt_end_sig {
429 HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
430 HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
431};
432
433enum hdmi_audio_i2s_config {
434 HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
435 HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
436 HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
437 HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
438 HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
439 HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
440 HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
441 HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
442 HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
443 HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
444 HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
445 HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
446 HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
447 HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
448 HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
449 HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
450 HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
451 HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
452 HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
453 HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
454 HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
455 HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
456 HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
457 HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
458 HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
459 HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
460 HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
461 HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
462 HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
463 HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
464 HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
465 HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
466 HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
467 HDMI_AUDIO_I2S_SD0_EN = 1,
468 HDMI_AUDIO_I2S_SD1_EN = 1 << 1,
469 HDMI_AUDIO_I2S_SD2_EN = 1 << 2,
470 HDMI_AUDIO_I2S_SD3_EN = 1 << 3,
471};
472
473enum hdmi_audio_mclk_mode {
474 HDMI_AUDIO_MCLK_128FS = 0,
475 HDMI_AUDIO_MCLK_256FS = 1,
476 HDMI_AUDIO_MCLK_384FS = 2,
477 HDMI_AUDIO_MCLK_512FS = 3,
478 HDMI_AUDIO_MCLK_768FS = 4,
479 HDMI_AUDIO_MCLK_1024FS = 5,
480 HDMI_AUDIO_MCLK_1152FS = 6,
481 HDMI_AUDIO_MCLK_192FS = 7
482};
483
330struct hdmi_core_video_config { 484struct hdmi_core_video_config {
331 enum hdmi_core_inputbus_width ip_bus_width; 485 enum hdmi_core_inputbus_width ip_bus_width;
332 enum hdmi_core_dither_trunc op_dither_truc; 486 enum hdmi_core_dither_trunc op_dither_truc;
@@ -376,6 +530,19 @@ struct hdmi_core_infoframe_avi {
376 u16 db12_13_pixel_sofright; 530 u16 db12_13_pixel_sofright;
377 /* Pixel number start of right bar */ 531 /* Pixel number start of right bar */
378}; 532};
533/*
534 * Refer to section 8.2 in HDMI 1.3 specification for
535 * details about infoframe databytes
536 */
537struct hdmi_core_infoframe_audio {
538 u8 db1_coding_type;
539 u8 db1_channel_count;
540 u8 db2_sample_freq;
541 u8 db2_sample_size;
542 u8 db4_channel_alloc;
543 bool db5_downmix_inh;
544 u8 db5_lsv; /* Level shift values for downmix */
545};
379 546
380struct hdmi_core_packet_enable_repeat { 547struct hdmi_core_packet_enable_repeat {
381 u32 audio_pkt; 548 u32 audio_pkt;
@@ -412,4 +579,53 @@ struct hdmi_config {
412 struct hdmi_cm cm; 579 struct hdmi_cm cm;
413}; 580};
414 581
582struct hdmi_audio_format {
583 enum hdmi_stereo_channels stereo_channels;
584 u8 active_chnnls_msk;
585 enum hdmi_audio_type type;
586 enum hdmi_audio_justify justification;
587 enum hdmi_audio_sample_order sample_order;
588 enum hdmi_audio_samples_perword samples_per_word;
589 enum hdmi_audio_sample_size sample_size;
590 enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
591};
592
593struct hdmi_audio_dma {
594 u8 transfer_size;
595 u8 block_size;
596 enum hdmi_audio_transf_mode mode;
597 u16 fifo_threshold;
598};
599
600struct hdmi_core_audio_i2s_config {
601 u8 word_max_length;
602 u8 word_length;
603 u8 in_length_bits;
604 u8 justification;
605 u8 en_high_bitrate_aud;
606 u8 sck_edge_mode;
607 u8 cbit_order;
608 u8 vbit;
609 u8 ws_polarity;
610 u8 direction;
611 u8 shift;
612 u8 active_sds;
613};
614
615struct hdmi_core_audio_config {
616 struct hdmi_core_audio_i2s_config i2s_cfg;
617 enum hdmi_core_audio_sample_freq freq_sample;
618 bool fs_override;
619 u32 n;
620 u32 cts;
621 u32 aud_par_busclk;
622 enum hdmi_core_audio_layout layout;
623 enum hdmi_core_cts_mode cts_mode;
624 bool use_mclk;
625 enum hdmi_audio_mclk_mode mclk_mode;
626 bool en_acr_pkt;
627 bool en_dsd_audio;
628 bool en_parallel_aud_input;
629 bool en_spdif;
630};
415#endif 631#endif
diff --git a/drivers/video/omap2/dss/hdmi_omap4_panel.c b/drivers/video/omap2/dss/hdmi_omap4_panel.c
index ffb5de94131f..7d4f2bd7c506 100644
--- a/drivers/video/omap2/dss/hdmi_omap4_panel.c
+++ b/drivers/video/omap2/dss/hdmi_omap4_panel.c
@@ -24,7 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <plat/display.h> 27#include <video/omapdss.h>
28 28
29#include "dss.h" 29#include "dss.h"
30 30
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index bcd37ec86952..9aeea50e33ff 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -29,7 +29,7 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/jiffies.h> 30#include <linux/jiffies.h>
31 31
32#include <plat/display.h> 32#include <video/omapdss.h>
33#include <plat/cpu.h> 33#include <plat/cpu.h>
34 34
35#include "dss.h" 35#include "dss.h"
@@ -393,6 +393,7 @@ struct overlay_cache_data {
393 393
394 u32 paddr; 394 u32 paddr;
395 void __iomem *vaddr; 395 void __iomem *vaddr;
396 u32 p_uv_addr; /* relevant for NV12 format only */
396 u16 screen_width; 397 u16 screen_width;
397 u16 width; 398 u16 width;
398 u16 height; 399 u16 height;
@@ -775,10 +776,17 @@ static int configure_overlay(enum omap_plane plane)
775 } 776 }
776 777
777 switch (c->color_mode) { 778 switch (c->color_mode) {
779 case OMAP_DSS_COLOR_NV12:
780 bpp = 8;
781 break;
778 case OMAP_DSS_COLOR_RGB16: 782 case OMAP_DSS_COLOR_RGB16:
779 case OMAP_DSS_COLOR_ARGB16: 783 case OMAP_DSS_COLOR_ARGB16:
780 case OMAP_DSS_COLOR_YUV2: 784 case OMAP_DSS_COLOR_YUV2:
781 case OMAP_DSS_COLOR_UYVY: 785 case OMAP_DSS_COLOR_UYVY:
786 case OMAP_DSS_COLOR_RGBA16:
787 case OMAP_DSS_COLOR_RGBX16:
788 case OMAP_DSS_COLOR_ARGB16_1555:
789 case OMAP_DSS_COLOR_XRGB16_1555:
782 bpp = 16; 790 bpp = 16;
783 break; 791 break;
784 792
@@ -854,7 +862,8 @@ static int configure_overlay(enum omap_plane plane)
854 c->mirror, 862 c->mirror,
855 c->global_alpha, 863 c->global_alpha,
856 c->pre_mult_alpha, 864 c->pre_mult_alpha,
857 c->channel); 865 c->channel,
866 c->p_uv_addr);
858 867
859 if (r) { 868 if (r) {
860 /* this shouldn't happen */ 869 /* this shouldn't happen */
@@ -1269,6 +1278,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1269 1278
1270 oc->paddr = ovl->info.paddr; 1279 oc->paddr = ovl->info.paddr;
1271 oc->vaddr = ovl->info.vaddr; 1280 oc->vaddr = ovl->info.vaddr;
1281 oc->p_uv_addr = ovl->info.p_uv_addr;
1272 oc->screen_width = ovl->info.screen_width; 1282 oc->screen_width = ovl->info.screen_width;
1273 oc->width = ovl->info.width; 1283 oc->width = ovl->info.width;
1274 oc->height = ovl->info.height; 1284 oc->height = ovl->info.height;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index f1aca6d04011..0f08025b1f0e 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -31,7 +31,7 @@
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33 33
34#include <plat/display.h> 34#include <video/omapdss.h>
35#include <plat/cpu.h> 35#include <plat/cpu.h>
36 36
37#include "dss.h" 37#include "dss.h"
@@ -201,12 +201,16 @@ static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
201static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf, 201static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
202 size_t size) 202 size_t size)
203{ 203{
204 int r; 204 int r, enable;
205 struct omap_overlay_info info; 205 struct omap_overlay_info info;
206 206
207 ovl->get_overlay_info(ovl, &info); 207 ovl->get_overlay_info(ovl, &info);
208 208
209 info.enabled = simple_strtoul(buf, NULL, 10); 209 r = kstrtoint(buf, 0, &enable);
210 if (r)
211 return r;
212
213 info.enabled = !!enable;
210 214
211 r = ovl->set_overlay_info(ovl, &info); 215 r = ovl->set_overlay_info(ovl, &info);
212 if (r) 216 if (r)
@@ -231,8 +235,13 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
231 const char *buf, size_t size) 235 const char *buf, size_t size)
232{ 236{
233 int r; 237 int r;
238 u8 alpha;
234 struct omap_overlay_info info; 239 struct omap_overlay_info info;
235 240
241 r = kstrtou8(buf, 0, &alpha);
242 if (r)
243 return r;
244
236 ovl->get_overlay_info(ovl, &info); 245 ovl->get_overlay_info(ovl, &info);
237 246
238 /* Video1 plane does not support global alpha 247 /* Video1 plane does not support global alpha
@@ -242,7 +251,7 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
242 ovl->id == OMAP_DSS_VIDEO1) 251 ovl->id == OMAP_DSS_VIDEO1)
243 info.global_alpha = 255; 252 info.global_alpha = 255;
244 else 253 else
245 info.global_alpha = simple_strtoul(buf, NULL, 10); 254 info.global_alpha = alpha;
246 255
247 r = ovl->set_overlay_info(ovl, &info); 256 r = ovl->set_overlay_info(ovl, &info);
248 if (r) 257 if (r)
@@ -268,8 +277,13 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
268 const char *buf, size_t size) 277 const char *buf, size_t size)
269{ 278{
270 int r; 279 int r;
280 u8 alpha;
271 struct omap_overlay_info info; 281 struct omap_overlay_info info;
272 282
283 r = kstrtou8(buf, 0, &alpha);
284 if (r)
285 return r;
286
273 ovl->get_overlay_info(ovl, &info); 287 ovl->get_overlay_info(ovl, &info);
274 288
275 /* only GFX and Video2 plane support pre alpha multiplied 289 /* only GFX and Video2 plane support pre alpha multiplied
@@ -279,7 +293,7 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
279 ovl->id == OMAP_DSS_VIDEO1) 293 ovl->id == OMAP_DSS_VIDEO1)
280 info.pre_mult_alpha = 0; 294 info.pre_mult_alpha = 0;
281 else 295 else
282 info.pre_mult_alpha = simple_strtoul(buf, NULL, 10); 296 info.pre_mult_alpha = alpha;
283 297
284 r = ovl->set_overlay_info(ovl, &info); 298 r = ovl->set_overlay_info(ovl, &info);
285 if (r) 299 if (r)
@@ -491,13 +505,18 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
491 ovl->manager = mgr; 505 ovl->manager = mgr;
492 506
493 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 507 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
494 /* XXX: on manual update display, in auto update mode, a bug happens 508 /* XXX: When there is an overlay on a DSI manual update display, and
495 * here. When an overlay is first enabled on LCD, then it's disabled, 509 * the overlay is first disabled, then moved to tv, and enabled, we
496 * and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT 510 * seem to get SYNC_LOST_DIGIT error.
497 * errors. Waiting before changing the channel_out fixes it. I'm 511 *
498 * guessing that the overlay is still somehow being used for the LCD, 512 * Waiting doesn't seem to help, but updating the manual update display
499 * but I don't understand how or why. */ 513 * after disabling the overlay seems to fix this. This hints that the
500 msleep(40); 514 * overlay is perhaps somehow tied to the LCD output until the output
515 * is updated.
516 *
517 * Userspace workaround for this is to update the LCD after disabling
518 * the overlay, but before moving the overlay to TV.
519 */
501 dispc_set_channel_out(ovl->id, mgr->id); 520 dispc_set_channel_out(ovl->id, mgr->id);
502 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 521 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
503 522
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 5ea17f49c611..c06fbe0bc678 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -32,8 +32,9 @@
32#include <linux/ktime.h> 32#include <linux/ktime.h>
33#include <linux/hrtimer.h> 33#include <linux/hrtimer.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/semaphore.h>
35 36
36#include <plat/display.h> 37#include <video/omapdss.h>
37#include "dss.h" 38#include "dss.h"
38 39
39struct rfbi_reg { u16 idx; }; 40struct rfbi_reg { u16 idx; };
@@ -65,9 +66,6 @@ struct rfbi_reg { u16 idx; };
65#define REG_FLD_MOD(idx, val, start, end) \ 66#define REG_FLD_MOD(idx, val, start, end) \
66 rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end)) 67 rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end))
67 68
68/* To work around an RFBI transfer rate limitation */
69#define OMAP_RFBI_RATE_LIMIT 1
70
71enum omap_rfbi_cycleformat { 69enum omap_rfbi_cycleformat {
72 OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0, 70 OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0,
73 OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1, 71 OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1,
@@ -89,11 +87,6 @@ enum omap_rfbi_parallelmode {
89 OMAP_DSS_RFBI_PARALLELMODE_16 = 3, 87 OMAP_DSS_RFBI_PARALLELMODE_16 = 3,
90}; 88};
91 89
92enum update_cmd {
93 RFBI_CMD_UPDATE = 0,
94 RFBI_CMD_SYNC = 1,
95};
96
97static int rfbi_convert_timings(struct rfbi_timings *t); 90static int rfbi_convert_timings(struct rfbi_timings *t);
98static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div); 91static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
99 92
@@ -114,20 +107,9 @@ static struct {
114 107
115 struct omap_dss_device *dssdev[2]; 108 struct omap_dss_device *dssdev[2];
116 109
117 struct kfifo cmd_fifo; 110 struct semaphore bus_lock;
118 spinlock_t cmd_lock;
119 struct completion cmd_done;
120 atomic_t cmd_fifo_full;
121 atomic_t cmd_pending;
122} rfbi; 111} rfbi;
123 112
124struct update_region {
125 u16 x;
126 u16 y;
127 u16 w;
128 u16 h;
129};
130
131static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val) 113static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
132{ 114{
133 __raw_writel(val, rfbi.base + idx.idx); 115 __raw_writel(val, rfbi.base + idx.idx);
@@ -146,9 +128,20 @@ static void rfbi_enable_clocks(bool enable)
146 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 128 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
147} 129}
148 130
131void rfbi_bus_lock(void)
132{
133 down(&rfbi.bus_lock);
134}
135EXPORT_SYMBOL(rfbi_bus_lock);
136
137void rfbi_bus_unlock(void)
138{
139 up(&rfbi.bus_lock);
140}
141EXPORT_SYMBOL(rfbi_bus_unlock);
142
149void omap_rfbi_write_command(const void *buf, u32 len) 143void omap_rfbi_write_command(const void *buf, u32 len)
150{ 144{
151 rfbi_enable_clocks(1);
152 switch (rfbi.parallelmode) { 145 switch (rfbi.parallelmode) {
153 case OMAP_DSS_RFBI_PARALLELMODE_8: 146 case OMAP_DSS_RFBI_PARALLELMODE_8:
154 { 147 {
@@ -172,13 +165,11 @@ void omap_rfbi_write_command(const void *buf, u32 len)
172 default: 165 default:
173 BUG(); 166 BUG();
174 } 167 }
175 rfbi_enable_clocks(0);
176} 168}
177EXPORT_SYMBOL(omap_rfbi_write_command); 169EXPORT_SYMBOL(omap_rfbi_write_command);
178 170
179void omap_rfbi_read_data(void *buf, u32 len) 171void omap_rfbi_read_data(void *buf, u32 len)
180{ 172{
181 rfbi_enable_clocks(1);
182 switch (rfbi.parallelmode) { 173 switch (rfbi.parallelmode) {
183 case OMAP_DSS_RFBI_PARALLELMODE_8: 174 case OMAP_DSS_RFBI_PARALLELMODE_8:
184 { 175 {
@@ -206,13 +197,11 @@ void omap_rfbi_read_data(void *buf, u32 len)
206 default: 197 default:
207 BUG(); 198 BUG();
208 } 199 }
209 rfbi_enable_clocks(0);
210} 200}
211EXPORT_SYMBOL(omap_rfbi_read_data); 201EXPORT_SYMBOL(omap_rfbi_read_data);
212 202
213void omap_rfbi_write_data(const void *buf, u32 len) 203void omap_rfbi_write_data(const void *buf, u32 len)
214{ 204{
215 rfbi_enable_clocks(1);
216 switch (rfbi.parallelmode) { 205 switch (rfbi.parallelmode) {
217 case OMAP_DSS_RFBI_PARALLELMODE_8: 206 case OMAP_DSS_RFBI_PARALLELMODE_8:
218 { 207 {
@@ -237,7 +226,6 @@ void omap_rfbi_write_data(const void *buf, u32 len)
237 BUG(); 226 BUG();
238 227
239 } 228 }
240 rfbi_enable_clocks(0);
241} 229}
242EXPORT_SYMBOL(omap_rfbi_write_data); 230EXPORT_SYMBOL(omap_rfbi_write_data);
243 231
@@ -249,8 +237,6 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
249 int horiz_offset = scr_width - w; 237 int horiz_offset = scr_width - w;
250 int i; 238 int i;
251 239
252 rfbi_enable_clocks(1);
253
254 if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && 240 if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
255 rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { 241 rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
256 const u16 __iomem *pd = buf; 242 const u16 __iomem *pd = buf;
@@ -295,12 +281,10 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
295 } else { 281 } else {
296 BUG(); 282 BUG();
297 } 283 }
298
299 rfbi_enable_clocks(0);
300} 284}
301EXPORT_SYMBOL(omap_rfbi_write_pixels); 285EXPORT_SYMBOL(omap_rfbi_write_pixels);
302 286
303void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, 287static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
304 u16 height, void (*callback)(void *data), void *data) 288 u16 height, void (*callback)(void *data), void *data)
305{ 289{
306 u32 l; 290 u32 l;
@@ -317,8 +301,6 @@ void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
317 rfbi.framedone_callback = callback; 301 rfbi.framedone_callback = callback;
318 rfbi.framedone_callback_data = data; 302 rfbi.framedone_callback_data = data;
319 303
320 rfbi_enable_clocks(1);
321
322 rfbi_write_reg(RFBI_PIXEL_CNT, width * height); 304 rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
323 305
324 l = rfbi_read_reg(RFBI_CONTROL); 306 l = rfbi_read_reg(RFBI_CONTROL);
@@ -337,15 +319,11 @@ static void framedone_callback(void *data, u32 mask)
337 319
338 REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0); 320 REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0);
339 321
340 rfbi_enable_clocks(0);
341
342 callback = rfbi.framedone_callback; 322 callback = rfbi.framedone_callback;
343 rfbi.framedone_callback = NULL; 323 rfbi.framedone_callback = NULL;
344 324
345 if (callback != NULL) 325 if (callback != NULL)
346 callback(rfbi.framedone_callback_data); 326 callback(rfbi.framedone_callback_data);
347
348 atomic_set(&rfbi.cmd_pending, 0);
349} 327}
350 328
351#if 1 /* VERBOSE */ 329#if 1 /* VERBOSE */
@@ -435,7 +413,7 @@ static int calc_extif_timings(struct rfbi_timings *t)
435} 413}
436 414
437 415
438void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) 416static void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
439{ 417{
440 int r; 418 int r;
441 419
@@ -447,7 +425,6 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
447 425
448 BUG_ON(!t->converted); 426 BUG_ON(!t->converted);
449 427
450 rfbi_enable_clocks(1);
451 rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]); 428 rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]);
452 rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]); 429 rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]);
453 430
@@ -456,7 +433,6 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
456 (t->tim[2] ? 1 : 0), 4, 4); 433 (t->tim[2] ? 1 : 0), 4, 4);
457 434
458 rfbi_print_timings(); 435 rfbi_print_timings();
459 rfbi_enable_clocks(0);
460} 436}
461 437
462static int ps_to_rfbi_ticks(int time, int div) 438static int ps_to_rfbi_ticks(int time, int div)
@@ -472,59 +448,6 @@ static int ps_to_rfbi_ticks(int time, int div)
472 return ret; 448 return ret;
473} 449}
474 450
475#ifdef OMAP_RFBI_RATE_LIMIT
476unsigned long rfbi_get_max_tx_rate(void)
477{
478 unsigned long l4_rate, dss1_rate;
479 int min_l4_ticks = 0;
480 int i;
481
482 /* According to TI this can't be calculated so make the
483 * adjustments for a couple of known frequencies and warn for
484 * others.
485 */
486 static const struct {
487 unsigned long l4_clk; /* HZ */
488 unsigned long dss1_clk; /* HZ */
489 unsigned long min_l4_ticks;
490 } ftab[] = {
491 { 55, 132, 7, }, /* 7.86 MPix/s */
492 { 110, 110, 12, }, /* 9.16 MPix/s */
493 { 110, 132, 10, }, /* 11 Mpix/s */
494 { 120, 120, 10, }, /* 12 Mpix/s */
495 { 133, 133, 10, }, /* 13.3 Mpix/s */
496 };
497
498 l4_rate = rfbi.l4_khz / 1000;
499 dss1_rate = dss_clk_get_rate(DSS_CLK_FCK) / 1000000;
500
501 for (i = 0; i < ARRAY_SIZE(ftab); i++) {
502 /* Use a window instead of an exact match, to account
503 * for different DPLL multiplier / divider pairs.
504 */
505 if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
506 abs(ftab[i].dss1_clk - dss1_rate) < 3) {
507 min_l4_ticks = ftab[i].min_l4_ticks;
508 break;
509 }
510 }
511 if (i == ARRAY_SIZE(ftab)) {
512 /* Can't be sure, return anyway the maximum not
513 * rate-limited. This might cause a problem only for the
514 * tearing synchronisation.
515 */
516 DSSERR("can't determine maximum RFBI transfer rate\n");
517 return rfbi.l4_khz * 1000;
518 }
519 return rfbi.l4_khz * 1000 / min_l4_ticks;
520}
521#else
522int rfbi_get_max_tx_rate(void)
523{
524 return rfbi.l4_khz * 1000;
525}
526#endif
527
528static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div) 451static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
529{ 452{
530 *clk_period = 1000000000 / rfbi.l4_khz; 453 *clk_period = 1000000000 / rfbi.l4_khz;
@@ -644,7 +567,6 @@ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
644 DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n", 567 DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n",
645 mode, hs, vs, hs_pol_inv, vs_pol_inv); 568 mode, hs, vs, hs_pol_inv, vs_pol_inv);
646 569
647 rfbi_enable_clocks(1);
648 rfbi_write_reg(RFBI_HSYNC_WIDTH, hs); 570 rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
649 rfbi_write_reg(RFBI_VSYNC_WIDTH, vs); 571 rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
650 572
@@ -657,7 +579,6 @@ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
657 l &= ~(1 << 20); 579 l &= ~(1 << 20);
658 else 580 else
659 l |= 1 << 20; 581 l |= 1 << 20;
660 rfbi_enable_clocks(0);
661 582
662 return 0; 583 return 0;
663} 584}
@@ -672,7 +593,6 @@ int omap_rfbi_enable_te(bool enable, unsigned line)
672 if (line > (1 << 11) - 1) 593 if (line > (1 << 11) - 1)
673 return -EINVAL; 594 return -EINVAL;
674 595
675 rfbi_enable_clocks(1);
676 l = rfbi_read_reg(RFBI_CONFIG(0)); 596 l = rfbi_read_reg(RFBI_CONFIG(0));
677 l &= ~(0x3 << 2); 597 l &= ~(0x3 << 2);
678 if (enable) { 598 if (enable) {
@@ -682,50 +602,12 @@ int omap_rfbi_enable_te(bool enable, unsigned line)
682 rfbi.te_enabled = 0; 602 rfbi.te_enabled = 0;
683 rfbi_write_reg(RFBI_CONFIG(0), l); 603 rfbi_write_reg(RFBI_CONFIG(0), l);
684 rfbi_write_reg(RFBI_LINE_NUMBER, line); 604 rfbi_write_reg(RFBI_LINE_NUMBER, line);
685 rfbi_enable_clocks(0);
686 605
687 return 0; 606 return 0;
688} 607}
689EXPORT_SYMBOL(omap_rfbi_enable_te); 608EXPORT_SYMBOL(omap_rfbi_enable_te);
690 609
691#if 0 610static int rfbi_configure(int rfbi_module, int bpp, int lines)
692static void rfbi_enable_config(int enable1, int enable2)
693{
694 u32 l;
695 int cs = 0;
696
697 if (enable1)
698 cs |= 1<<0;
699 if (enable2)
700 cs |= 1<<1;
701
702 rfbi_enable_clocks(1);
703
704 l = rfbi_read_reg(RFBI_CONTROL);
705
706 l = FLD_MOD(l, cs, 3, 2);
707 l = FLD_MOD(l, 0, 1, 1);
708
709 rfbi_write_reg(RFBI_CONTROL, l);
710
711
712 l = rfbi_read_reg(RFBI_CONFIG(0));
713 l = FLD_MOD(l, 0, 3, 2); /* TRIGGERMODE: ITE */
714 /*l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
715 /*l |= FLD_VAL(0, 8, 7); */ /* L4FORMAT, 1pix/L4 */
716
717 l = FLD_MOD(l, 0, 16, 16); /* A0POLARITY */
718 l = FLD_MOD(l, 1, 20, 20); /* TE_VSYNC_POLARITY */
719 l = FLD_MOD(l, 1, 21, 21); /* HSYNCPOLARITY */
720
721 l = FLD_MOD(l, OMAP_DSS_RFBI_PARALLELMODE_8, 1, 0);
722 rfbi_write_reg(RFBI_CONFIG(0), l);
723
724 rfbi_enable_clocks(0);
725}
726#endif
727
728int rfbi_configure(int rfbi_module, int bpp, int lines)
729{ 611{
730 u32 l; 612 u32 l;
731 int cycle1 = 0, cycle2 = 0, cycle3 = 0; 613 int cycle1 = 0, cycle2 = 0, cycle3 = 0;
@@ -821,8 +703,6 @@ int rfbi_configure(int rfbi_module, int bpp, int lines)
821 break; 703 break;
822 } 704 }
823 705
824 rfbi_enable_clocks(1);
825
826 REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */ 706 REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */
827 707
828 l = 0; 708 l = 0;
@@ -856,11 +736,15 @@ int rfbi_configure(int rfbi_module, int bpp, int lines)
856 DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n", 736 DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n",
857 bpp, lines, cycle1, cycle2, cycle3); 737 bpp, lines, cycle1, cycle2, cycle3);
858 738
859 rfbi_enable_clocks(0);
860
861 return 0; 739 return 0;
862} 740}
863EXPORT_SYMBOL(rfbi_configure); 741
742int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size,
743 int data_lines)
744{
745 return rfbi_configure(dssdev->phy.rfbi.channel, pixel_size, data_lines);
746}
747EXPORT_SYMBOL(omap_rfbi_configure);
864 748
865int omap_rfbi_prepare_update(struct omap_dss_device *dssdev, 749int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
866 u16 *x, u16 *y, u16 *w, u16 *h) 750 u16 *x, u16 *y, u16 *w, u16 *h)
@@ -960,6 +844,8 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
960{ 844{
961 int r; 845 int r;
962 846
847 rfbi_enable_clocks(1);
848
963 r = omap_dss_start_device(dssdev); 849 r = omap_dss_start_device(dssdev);
964 if (r) { 850 if (r) {
965 DSSERR("failed to start device\n"); 851 DSSERR("failed to start device\n");
@@ -1002,6 +888,8 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
1002 omap_dispc_unregister_isr(framedone_callback, NULL, 888 omap_dispc_unregister_isr(framedone_callback, NULL,
1003 DISPC_IRQ_FRAMEDONE); 889 DISPC_IRQ_FRAMEDONE);
1004 omap_dss_stop_device(dssdev); 890 omap_dss_stop_device(dssdev);
891
892 rfbi_enable_clocks(0);
1005} 893}
1006EXPORT_SYMBOL(omapdss_rfbi_display_disable); 894EXPORT_SYMBOL(omapdss_rfbi_display_disable);
1007 895
@@ -1021,11 +909,7 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
1021 909
1022 rfbi.pdev = pdev; 910 rfbi.pdev = pdev;
1023 911
1024 spin_lock_init(&rfbi.cmd_lock); 912 sema_init(&rfbi.bus_lock, 1);
1025
1026 init_completion(&rfbi.cmd_done);
1027 atomic_set(&rfbi.cmd_fifo_full, 0);
1028 atomic_set(&rfbi.cmd_pending, 0);
1029 913
1030 rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); 914 rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
1031 if (!rfbi_mem) { 915 if (!rfbi_mem) {
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 54a53e648180..0bd4b0350f80 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -25,7 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
27 27
28#include <plat/display.h> 28#include <video/omapdss.h>
29#include <plat/cpu.h> 29#include <plat/cpu.h>
30#include "dss.h" 30#include "dss.h"
31 31
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 8e35a5bae429..980f919ed987 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -34,7 +34,7 @@
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/regulator/consumer.h> 35#include <linux/regulator/consumer.h>
36 36
37#include <plat/display.h> 37#include <video/omapdss.h>
38#include <plat/cpu.h> 38#include <plat/cpu.h>
39 39
40#include "dss.h" 40#include "dss.h"
@@ -373,8 +373,11 @@ static void venc_reset(void)
373 } 373 }
374 } 374 }
375 375
376#ifdef CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET
376 /* the magical sleep that makes things work */ 377 /* the magical sleep that makes things work */
378 /* XXX more info? What bug this circumvents? */
377 msleep(20); 379 msleep(20);
380#endif
378} 381}
379 382
380static void venc_enable_clocks(int enable) 383static void venc_enable_clocks(int enable)
@@ -473,6 +476,12 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
473 476
474 mutex_lock(&venc.venc_lock); 477 mutex_lock(&venc.venc_lock);
475 478
479 r = omap_dss_start_device(dssdev);
480 if (r) {
481 DSSERR("failed to start device\n");
482 goto err0;
483 }
484
476 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { 485 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
477 r = -EINVAL; 486 r = -EINVAL;
478 goto err1; 487 goto err1;
@@ -484,10 +493,11 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
484 493
485 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 494 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
486 495
487 /* wait couple of vsyncs until enabling the LCD */ 496 mutex_unlock(&venc.venc_lock);
488 msleep(50); 497 return 0;
489
490err1: 498err1:
499 omap_dss_stop_device(dssdev);
500err0:
491 mutex_unlock(&venc.venc_lock); 501 mutex_unlock(&venc.venc_lock);
492 502
493 return r; 503 return r;
@@ -510,10 +520,9 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
510 520
511 venc_power_off(dssdev); 521 venc_power_off(dssdev);
512 522
513 /* wait at least 5 vsyncs after disabling the LCD */
514 msleep(100);
515
516 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 523 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
524
525 omap_dss_stop_device(dssdev);
517end: 526end:
518 mutex_unlock(&venc.venc_lock); 527 mutex_unlock(&venc.venc_lock);
519} 528}
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 6f435450987e..cff450392b79 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -28,7 +28,7 @@
28#include <linux/omapfb.h> 28#include <linux/omapfb.h>
29#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
30 30
31#include <plat/display.h> 31#include <video/omapdss.h>
32#include <plat/vrfb.h> 32#include <plat/vrfb.h>
33#include <plat/vram.h> 33#include <plat/vram.h>
34 34
@@ -895,8 +895,16 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
895 895
896 p.display_info.xres = xres; 896 p.display_info.xres = xres;
897 p.display_info.yres = yres; 897 p.display_info.yres = yres;
898 p.display_info.width = 0; 898
899 p.display_info.height = 0; 899 if (display->driver->get_dimensions) {
900 u32 w, h;
901 display->driver->get_dimensions(display, &w, &h);
902 p.display_info.width = w;
903 p.display_info.height = h;
904 } else {
905 p.display_info.width = 0;
906 p.display_info.height = 0;
907 }
900 908
901 if (copy_to_user((void __user *)arg, &p.display_info, 909 if (copy_to_user((void __user *)arg, &p.display_info,
902 sizeof(p.display_info))) 910 sizeof(p.display_info)))
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 505ec6672049..505bc12a3031 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -30,7 +30,7 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/omapfb.h> 31#include <linux/omapfb.h>
32 32
33#include <plat/display.h> 33#include <video/omapdss.h>
34#include <plat/vram.h> 34#include <plat/vram.h>
35#include <plat/vrfb.h> 35#include <plat/vrfb.h>
36 36
@@ -702,8 +702,16 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
702 var->xres, var->yres, 702 var->xres, var->yres,
703 var->xres_virtual, var->yres_virtual); 703 var->xres_virtual, var->yres_virtual);
704 704
705 var->height = -1; 705 if (display && display->driver->get_dimensions) {
706 var->width = -1; 706 u32 w, h;
707 display->driver->get_dimensions(display, &w, &h);
708 var->width = DIV_ROUND_CLOSEST(w, 1000);
709 var->height = DIV_ROUND_CLOSEST(h, 1000);
710 } else {
711 var->height = -1;
712 var->width = -1;
713 }
714
707 var->grayscale = 0; 715 var->grayscale = 0;
708 716
709 if (display && display->driver->get_timings) { 717 if (display && display->driver->get_timings) {
@@ -749,35 +757,6 @@ static int omapfb_open(struct fb_info *fbi, int user)
749 757
750static int omapfb_release(struct fb_info *fbi, int user) 758static int omapfb_release(struct fb_info *fbi, int user)
751{ 759{
752#if 0
753 struct omapfb_info *ofbi = FB2OFB(fbi);
754 struct omapfb2_device *fbdev = ofbi->fbdev;
755 struct omap_dss_device *display = fb2display(fbi);
756
757 DBG("Closing fb with plane index %d\n", ofbi->id);
758
759 omapfb_lock(fbdev);
760
761 if (display && display->get_update_mode && display->update) {
762 /* XXX this update should be removed, I think. But it's
763 * good for debugging */
764 if (display->get_update_mode(display) ==
765 OMAP_DSS_UPDATE_MANUAL) {
766 u16 w, h;
767
768 if (display->sync)
769 display->sync(display);
770
771 display->get_resolution(display, &w, &h);
772 display->update(display, 0, 0, w, h);
773 }
774 }
775
776 if (display && display->sync)
777 display->sync(display);
778
779 omapfb_unlock(fbdev);
780#endif
781 return 0; 760 return 0;
782} 761}
783 762
@@ -1263,7 +1242,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1263 struct omapfb_info *ofbi = FB2OFB(fbi); 1242 struct omapfb_info *ofbi = FB2OFB(fbi);
1264 struct omapfb2_device *fbdev = ofbi->fbdev; 1243 struct omapfb2_device *fbdev = ofbi->fbdev;
1265 struct omap_dss_device *display = fb2display(fbi); 1244 struct omap_dss_device *display = fb2display(fbi);
1266 int do_update = 0;
1267 int r = 0; 1245 int r = 0;
1268 1246
1269 if (!display) 1247 if (!display)
@@ -1279,11 +1257,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1279 if (display->driver->resume) 1257 if (display->driver->resume)
1280 r = display->driver->resume(display); 1258 r = display->driver->resume(display);
1281 1259
1282 if (r == 0 && display->driver->get_update_mode &&
1283 display->driver->get_update_mode(display) ==
1284 OMAP_DSS_UPDATE_MANUAL)
1285 do_update = 1;
1286
1287 break; 1260 break;
1288 1261
1289 case FB_BLANK_NORMAL: 1262 case FB_BLANK_NORMAL:
@@ -1307,13 +1280,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1307exit: 1280exit:
1308 omapfb_unlock(fbdev); 1281 omapfb_unlock(fbdev);
1309 1282
1310 if (r == 0 && do_update && display->driver->update) {
1311 u16 w, h;
1312 display->driver->get_resolution(display, &w, &h);
1313
1314 r = display->driver->update(display, 0, 0, w, h);
1315 }
1316
1317 return r; 1283 return r;
1318} 1284}
1319 1285
@@ -2030,9 +1996,9 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
2030static int omapfb_mode_to_timings(const char *mode_str, 1996static int omapfb_mode_to_timings(const char *mode_str,
2031 struct omap_video_timings *timings, u8 *bpp) 1997 struct omap_video_timings *timings, u8 *bpp)
2032{ 1998{
2033 struct fb_info fbi; 1999 struct fb_info *fbi;
2034 struct fb_var_screeninfo var; 2000 struct fb_var_screeninfo *var;
2035 struct fb_ops fbops; 2001 struct fb_ops *fbops;
2036 int r; 2002 int r;
2037 2003
2038#ifdef CONFIG_OMAP2_DSS_VENC 2004#ifdef CONFIG_OMAP2_DSS_VENC
@@ -2050,39 +2016,66 @@ static int omapfb_mode_to_timings(const char *mode_str,
2050 /* this is quite a hack, but I wanted to use the modedb and for 2016 /* this is quite a hack, but I wanted to use the modedb and for
2051 * that we need fb_info and var, so we create dummy ones */ 2017 * that we need fb_info and var, so we create dummy ones */
2052 2018
2053 memset(&fbi, 0, sizeof(fbi)); 2019 *bpp = 0;
2054 memset(&var, 0, sizeof(var)); 2020 fbi = NULL;
2055 memset(&fbops, 0, sizeof(fbops)); 2021 var = NULL;
2056 fbi.fbops = &fbops; 2022 fbops = NULL;
2057
2058 r = fb_find_mode(&var, &fbi, mode_str, NULL, 0, NULL, 24);
2059
2060 if (r != 0) {
2061 timings->pixel_clock = PICOS2KHZ(var.pixclock);
2062 timings->hbp = var.left_margin;
2063 timings->hfp = var.right_margin;
2064 timings->vbp = var.upper_margin;
2065 timings->vfp = var.lower_margin;
2066 timings->hsw = var.hsync_len;
2067 timings->vsw = var.vsync_len;
2068 timings->x_res = var.xres;
2069 timings->y_res = var.yres;
2070
2071 switch (var.bits_per_pixel) {
2072 case 16:
2073 *bpp = 16;
2074 break;
2075 case 24:
2076 case 32:
2077 default:
2078 *bpp = 24;
2079 break;
2080 }
2081 2023
2082 return 0; 2024 fbi = kzalloc(sizeof(*fbi), GFP_KERNEL);
2083 } else { 2025 if (fbi == NULL) {
2084 return -EINVAL; 2026 r = -ENOMEM;
2027 goto err;
2028 }
2029
2030 var = kzalloc(sizeof(*var), GFP_KERNEL);
2031 if (var == NULL) {
2032 r = -ENOMEM;
2033 goto err;
2034 }
2035
2036 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
2037 if (fbops == NULL) {
2038 r = -ENOMEM;
2039 goto err;
2040 }
2041
2042 fbi->fbops = fbops;
2043
2044 r = fb_find_mode(var, fbi, mode_str, NULL, 0, NULL, 24);
2045 if (r == 0) {
2046 r = -EINVAL;
2047 goto err;
2048 }
2049
2050 timings->pixel_clock = PICOS2KHZ(var->pixclock);
2051 timings->hbp = var->left_margin;
2052 timings->hfp = var->right_margin;
2053 timings->vbp = var->upper_margin;
2054 timings->vfp = var->lower_margin;
2055 timings->hsw = var->hsync_len;
2056 timings->vsw = var->vsync_len;
2057 timings->x_res = var->xres;
2058 timings->y_res = var->yres;
2059
2060 switch (var->bits_per_pixel) {
2061 case 16:
2062 *bpp = 16;
2063 break;
2064 case 24:
2065 case 32:
2066 default:
2067 *bpp = 24;
2068 break;
2085 } 2069 }
2070
2071 r = 0;
2072
2073err:
2074 kfree(fbi);
2075 kfree(var);
2076 kfree(fbops);
2077
2078 return r;
2086} 2079}
2087 2080
2088static int omapfb_set_def_mode(struct omapfb2_device *fbdev, 2081static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
@@ -2185,6 +2178,61 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
2185 return r; 2178 return r;
2186} 2179}
2187 2180
2181static int omapfb_init_display(struct omapfb2_device *fbdev,
2182 struct omap_dss_device *dssdev)
2183{
2184 struct omap_dss_driver *dssdrv = dssdev->driver;
2185 int r;
2186
2187 r = dssdrv->enable(dssdev);
2188 if (r) {
2189 dev_warn(fbdev->dev, "Failed to enable display '%s'\n",
2190 dssdev->name);
2191 return r;
2192 }
2193
2194 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2195 u16 w, h;
2196 if (dssdrv->enable_te) {
2197 r = dssdrv->enable_te(dssdev, 1);
2198 if (r) {
2199 dev_err(fbdev->dev, "Failed to set TE\n");
2200 return r;
2201 }
2202 }
2203
2204 if (dssdrv->set_update_mode) {
2205 r = dssdrv->set_update_mode(dssdev,
2206 OMAP_DSS_UPDATE_MANUAL);
2207 if (r) {
2208 dev_err(fbdev->dev,
2209 "Failed to set update mode\n");
2210 return r;
2211 }
2212 }
2213
2214 dssdrv->get_resolution(dssdev, &w, &h);
2215 r = dssdrv->update(dssdev, 0, 0, w, h);
2216 if (r) {
2217 dev_err(fbdev->dev,
2218 "Failed to update display\n");
2219 return r;
2220 }
2221 } else {
2222 if (dssdrv->set_update_mode) {
2223 r = dssdrv->set_update_mode(dssdev,
2224 OMAP_DSS_UPDATE_AUTO);
2225 if (r) {
2226 dev_err(fbdev->dev,
2227 "Failed to set update mode\n");
2228 return r;
2229 }
2230 }
2231 }
2232
2233 return 0;
2234}
2235
2188static int omapfb_probe(struct platform_device *pdev) 2236static int omapfb_probe(struct platform_device *pdev)
2189{ 2237{
2190 struct omapfb2_device *fbdev = NULL; 2238 struct omapfb2_device *fbdev = NULL;
@@ -2284,30 +2332,13 @@ static int omapfb_probe(struct platform_device *pdev)
2284 } 2332 }
2285 2333
2286 if (def_display) { 2334 if (def_display) {
2287 struct omap_dss_driver *dssdrv = def_display->driver; 2335 r = omapfb_init_display(fbdev, def_display);
2288
2289 r = def_display->driver->enable(def_display);
2290 if (r) { 2336 if (r) {
2291 dev_warn(fbdev->dev, "Failed to enable display '%s'\n", 2337 dev_err(fbdev->dev,
2292 def_display->name); 2338 "failed to initialize default "
2339 "display\n");
2293 goto cleanup; 2340 goto cleanup;
2294 } 2341 }
2295
2296 if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2297 u16 w, h;
2298 if (dssdrv->enable_te)
2299 dssdrv->enable_te(def_display, 1);
2300 if (dssdrv->set_update_mode)
2301 dssdrv->set_update_mode(def_display,
2302 OMAP_DSS_UPDATE_MANUAL);
2303
2304 dssdrv->get_resolution(def_display, &w, &h);
2305 def_display->driver->update(def_display, 0, 0, w, h);
2306 } else {
2307 if (dssdrv->set_update_mode)
2308 dssdrv->set_update_mode(def_display,
2309 OMAP_DSS_UPDATE_AUTO);
2310 }
2311 } 2342 }
2312 2343
2313 DBG("create sysfs for fbs\n"); 2344 DBG("create sysfs for fbs\n");
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 6f9c72cd6bb0..2f5e817b2a9a 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -29,7 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/omapfb.h> 30#include <linux/omapfb.h>
31 31
32#include <plat/display.h> 32#include <video/omapdss.h>
33#include <plat/vrfb.h> 33#include <plat/vrfb.h>
34 34
35#include "omapfb.h" 35#include "omapfb.h"
@@ -50,10 +50,12 @@ static ssize_t store_rotate_type(struct device *dev,
50 struct fb_info *fbi = dev_get_drvdata(dev); 50 struct fb_info *fbi = dev_get_drvdata(dev);
51 struct omapfb_info *ofbi = FB2OFB(fbi); 51 struct omapfb_info *ofbi = FB2OFB(fbi);
52 struct omapfb2_mem_region *rg; 52 struct omapfb2_mem_region *rg;
53 enum omap_dss_rotation_type rot_type; 53 int rot_type;
54 int r; 54 int r;
55 55
56 rot_type = simple_strtoul(buf, NULL, 0); 56 r = kstrtoint(buf, 0, &rot_type);
57 if (r)
58 return r;
57 59
58 if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB) 60 if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
59 return -EINVAL; 61 return -EINVAL;
@@ -102,14 +104,15 @@ static ssize_t store_mirror(struct device *dev,
102{ 104{
103 struct fb_info *fbi = dev_get_drvdata(dev); 105 struct fb_info *fbi = dev_get_drvdata(dev);
104 struct omapfb_info *ofbi = FB2OFB(fbi); 106 struct omapfb_info *ofbi = FB2OFB(fbi);
105 unsigned long mirror; 107 int mirror;
106 int r; 108 int r;
107 struct fb_var_screeninfo new_var; 109 struct fb_var_screeninfo new_var;
108 110
109 mirror = simple_strtoul(buf, NULL, 0); 111 r = kstrtoint(buf, 0, &mirror);
112 if (r)
113 return r;
110 114
111 if (mirror != 0 && mirror != 1) 115 mirror = !!mirror;
112 return -EINVAL;
113 116
114 if (!lock_fb_info(fbi)) 117 if (!lock_fb_info(fbi))
115 return -ENODEV; 118 return -ENODEV;
@@ -445,7 +448,11 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
445 int r; 448 int r;
446 int i; 449 int i;
447 450
448 size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0)); 451 r = kstrtoul(buf, 0, &size);
452 if (r)
453 return r;
454
455 size = PAGE_ALIGN(size);
449 456
450 if (!lock_fb_info(fbi)) 457 if (!lock_fb_info(fbi))
451 return -ENODEV; 458 return -ENODEV;
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index 1305fc9880ba..aa1b1d974276 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -29,13 +29,15 @@
29 29
30#include <linux/rwsem.h> 30#include <linux/rwsem.h>
31 31
32#include <plat/display.h> 32#include <video/omapdss.h>
33 33
34#ifdef DEBUG 34#ifdef DEBUG
35extern unsigned int omapfb_debug; 35extern unsigned int omapfb_debug;
36#define DBG(format, ...) \ 36#define DBG(format, ...) \
37 if (omapfb_debug) \ 37 do { \
38 printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__) 38 if (omapfb_debug) \
39 printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__); \
40 } while (0)
39#else 41#else
40#define DBG(format, ...) 42#define DBG(format, ...)
41#endif 43#endif
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 3b6cdcac8f1a..0352afa49a39 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -182,6 +182,7 @@ struct s3c_fb_vsync {
182 182
183/** 183/**
184 * struct s3c_fb - overall hardware state of the hardware 184 * struct s3c_fb - overall hardware state of the hardware
185 * @slock: The spinlock protection for this data sturcture.
185 * @dev: The device that we bound to, for printing, etc. 186 * @dev: The device that we bound to, for printing, etc.
186 * @regs_res: The resource we claimed for the IO registers. 187 * @regs_res: The resource we claimed for the IO registers.
187 * @bus_clk: The clk (hclk) feeding our interface and possibly pixclk. 188 * @bus_clk: The clk (hclk) feeding our interface and possibly pixclk.
@@ -195,6 +196,7 @@ struct s3c_fb_vsync {
195 * @vsync_info: VSYNC-related information (count, queues...) 196 * @vsync_info: VSYNC-related information (count, queues...)
196 */ 197 */
197struct s3c_fb { 198struct s3c_fb {
199 spinlock_t slock;
198 struct device *dev; 200 struct device *dev;
199 struct resource *regs_res; 201 struct resource *regs_res;
200 struct clk *bus_clk; 202 struct clk *bus_clk;
@@ -300,6 +302,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
300 var->blue.length = 5; 302 var->blue.length = 5;
301 break; 303 break;
302 304
305 case 32:
303 case 28: 306 case 28:
304 case 25: 307 case 25:
305 var->transp.length = var->bits_per_pixel - 24; 308 var->transp.length = var->bits_per_pixel - 24;
@@ -308,7 +311,6 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
308 case 24: 311 case 24:
309 /* our 24bpp is unpacked, so 32bpp */ 312 /* our 24bpp is unpacked, so 32bpp */
310 var->bits_per_pixel = 32; 313 var->bits_per_pixel = 32;
311 case 32:
312 var->red.offset = 16; 314 var->red.offset = 16;
313 var->red.length = 8; 315 var->red.length = 8;
314 var->green.offset = 8; 316 var->green.offset = 8;
@@ -947,6 +949,8 @@ static irqreturn_t s3c_fb_irq(int irq, void *dev_id)
947 void __iomem *regs = sfb->regs; 949 void __iomem *regs = sfb->regs;
948 u32 irq_sts_reg; 950 u32 irq_sts_reg;
949 951
952 spin_lock(&sfb->slock);
953
950 irq_sts_reg = readl(regs + VIDINTCON1); 954 irq_sts_reg = readl(regs + VIDINTCON1);
951 955
952 if (irq_sts_reg & VIDINTCON1_INT_FRAME) { 956 if (irq_sts_reg & VIDINTCON1_INT_FRAME) {
@@ -963,6 +967,7 @@ static irqreturn_t s3c_fb_irq(int irq, void *dev_id)
963 */ 967 */
964 s3c_fb_disable_irq(sfb); 968 s3c_fb_disable_irq(sfb);
965 969
970 spin_unlock(&sfb->slock);
966 return IRQ_HANDLED; 971 return IRQ_HANDLED;
967} 972}
968 973
@@ -1339,6 +1344,8 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
1339 sfb->pdata = pd; 1344 sfb->pdata = pd;
1340 sfb->variant = fbdrv->variant; 1345 sfb->variant = fbdrv->variant;
1341 1346
1347 spin_lock_init(&sfb->slock);
1348
1342 sfb->bus_clk = clk_get(dev, "lcd"); 1349 sfb->bus_clk = clk_get(dev, "lcd");
1343 if (IS_ERR(sfb->bus_clk)) { 1350 if (IS_ERR(sfb->bus_clk)) {
1344 dev_err(dev, "failed to get bus clock\n"); 1351 dev_err(dev, "failed to get bus clock\n");
@@ -1442,8 +1449,7 @@ err_ioremap:
1442 iounmap(sfb->regs); 1449 iounmap(sfb->regs);
1443 1450
1444err_req_region: 1451err_req_region:
1445 release_resource(sfb->regs_res); 1452 release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
1446 kfree(sfb->regs_res);
1447 1453
1448err_clk: 1454err_clk:
1449 clk_disable(sfb->bus_clk); 1455 clk_disable(sfb->bus_clk);
@@ -1479,8 +1485,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
1479 clk_disable(sfb->bus_clk); 1485 clk_disable(sfb->bus_clk);
1480 clk_put(sfb->bus_clk); 1486 clk_put(sfb->bus_clk);
1481 1487
1482 release_resource(sfb->regs_res); 1488 release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
1483 kfree(sfb->regs_res);
1484 1489
1485 kfree(sfb); 1490 kfree(sfb);
1486 1491
@@ -1521,7 +1526,8 @@ static int s3c_fb_resume(struct device *dev)
1521 1526
1522 clk_enable(sfb->bus_clk); 1527 clk_enable(sfb->bus_clk);
1523 1528
1524 /* setup registers */ 1529 /* setup gpio and output polarity controls */
1530 pd->setup_gpio();
1525 writel(pd->vidcon1, sfb->regs + VIDCON1); 1531 writel(pd->vidcon1, sfb->regs + VIDCON1);
1526 1532
1527 /* zero all windows before we do anything */ 1533 /* zero all windows before we do anything */
@@ -1549,7 +1555,7 @@ static int s3c_fb_resume(struct device *dev)
1549 return 0; 1555 return 0;
1550} 1556}
1551 1557
1552int s3c_fb_runtime_suspend(struct device *dev) 1558static int s3c_fb_runtime_suspend(struct device *dev)
1553{ 1559{
1554 struct platform_device *pdev = to_platform_device(dev); 1560 struct platform_device *pdev = to_platform_device(dev);
1555 struct s3c_fb *sfb = platform_get_drvdata(pdev); 1561 struct s3c_fb *sfb = platform_get_drvdata(pdev);
@@ -1569,7 +1575,7 @@ int s3c_fb_runtime_suspend(struct device *dev)
1569 return 0; 1575 return 0;
1570} 1576}
1571 1577
1572int s3c_fb_runtime_resume(struct device *dev) 1578static int s3c_fb_runtime_resume(struct device *dev)
1573{ 1579{
1574 struct platform_device *pdev = to_platform_device(dev); 1580 struct platform_device *pdev = to_platform_device(dev);
1575 struct s3c_fb *sfb = platform_get_drvdata(pdev); 1581 struct s3c_fb *sfb = platform_get_drvdata(pdev);
@@ -1579,7 +1585,8 @@ int s3c_fb_runtime_resume(struct device *dev)
1579 1585
1580 clk_enable(sfb->bus_clk); 1586 clk_enable(sfb->bus_clk);
1581 1587
1582 /* setup registers */ 1588 /* setup gpio and output polarity controls */
1589 pd->setup_gpio();
1583 writel(pd->vidcon1, sfb->regs + VIDCON1); 1590 writel(pd->vidcon1, sfb->regs + VIDCON1);
1584 1591
1585 /* zero all windows before we do anything */ 1592 /* zero all windows before we do anything */
@@ -1623,28 +1630,31 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = {
1623 .has_osd_c = 1, 1630 .has_osd_c = 1,
1624 .osd_size_off = 0x8, 1631 .osd_size_off = 0x8,
1625 .palette_sz = 256, 1632 .palette_sz = 256,
1626 .valid_bpp = VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(24), 1633 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
1634 VALID_BPP(18) | VALID_BPP(24)),
1627 }, 1635 },
1628 [1] = { 1636 [1] = {
1629 .has_osd_c = 1, 1637 .has_osd_c = 1,
1630 .has_osd_d = 1, 1638 .has_osd_d = 1,
1631 .osd_size_off = 0x12, 1639 .osd_size_off = 0xc,
1632 .has_osd_alpha = 1, 1640 .has_osd_alpha = 1,
1633 .palette_sz = 256, 1641 .palette_sz = 256,
1634 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | 1642 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
1635 VALID_BPP(18) | VALID_BPP(19) | 1643 VALID_BPP(18) | VALID_BPP(19) |
1636 VALID_BPP(24) | VALID_BPP(25)), 1644 VALID_BPP(24) | VALID_BPP(25) |
1645 VALID_BPP(28)),
1637 }, 1646 },
1638 [2] = { 1647 [2] = {
1639 .has_osd_c = 1, 1648 .has_osd_c = 1,
1640 .has_osd_d = 1, 1649 .has_osd_d = 1,
1641 .osd_size_off = 0x12, 1650 .osd_size_off = 0xc,
1642 .has_osd_alpha = 1, 1651 .has_osd_alpha = 1,
1643 .palette_sz = 16, 1652 .palette_sz = 16,
1644 .palette_16bpp = 1, 1653 .palette_16bpp = 1,
1645 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | 1654 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
1646 VALID_BPP(18) | VALID_BPP(19) | 1655 VALID_BPP(18) | VALID_BPP(19) |
1647 VALID_BPP(24) | VALID_BPP(25)), 1656 VALID_BPP(24) | VALID_BPP(25) |
1657 VALID_BPP(28)),
1648 }, 1658 },
1649 [3] = { 1659 [3] = {
1650 .has_osd_c = 1, 1660 .has_osd_c = 1,
@@ -1653,7 +1663,8 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = {
1653 .palette_16bpp = 1, 1663 .palette_16bpp = 1,
1654 .valid_bpp = (VALID_BPP124 | VALID_BPP(16) | 1664 .valid_bpp = (VALID_BPP124 | VALID_BPP(16) |
1655 VALID_BPP(18) | VALID_BPP(19) | 1665 VALID_BPP(18) | VALID_BPP(19) |
1656 VALID_BPP(24) | VALID_BPP(25)), 1666 VALID_BPP(24) | VALID_BPP(25) |
1667 VALID_BPP(28)),
1657 }, 1668 },
1658 [4] = { 1669 [4] = {
1659 .has_osd_c = 1, 1670 .has_osd_c = 1,
@@ -1662,7 +1673,65 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = {
1662 .palette_16bpp = 1, 1673 .palette_16bpp = 1,
1663 .valid_bpp = (VALID_BPP(1) | VALID_BPP(2) | 1674 .valid_bpp = (VALID_BPP(1) | VALID_BPP(2) |
1664 VALID_BPP(16) | VALID_BPP(18) | 1675 VALID_BPP(16) | VALID_BPP(18) |
1665 VALID_BPP(24) | VALID_BPP(25)), 1676 VALID_BPP(19) | VALID_BPP(24) |
1677 VALID_BPP(25) | VALID_BPP(28)),
1678 },
1679};
1680
1681static struct s3c_fb_win_variant s3c_fb_data_s5p_wins[] = {
1682 [0] = {
1683 .has_osd_c = 1,
1684 .osd_size_off = 0x8,
1685 .palette_sz = 256,
1686 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1687 VALID_BPP(15) | VALID_BPP(16) |
1688 VALID_BPP(18) | VALID_BPP(19) |
1689 VALID_BPP(24) | VALID_BPP(25) |
1690 VALID_BPP(32)),
1691 },
1692 [1] = {
1693 .has_osd_c = 1,
1694 .has_osd_d = 1,
1695 .osd_size_off = 0xc,
1696 .has_osd_alpha = 1,
1697 .palette_sz = 256,
1698 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1699 VALID_BPP(15) | VALID_BPP(16) |
1700 VALID_BPP(18) | VALID_BPP(19) |
1701 VALID_BPP(24) | VALID_BPP(25) |
1702 VALID_BPP(32)),
1703 },
1704 [2] = {
1705 .has_osd_c = 1,
1706 .has_osd_d = 1,
1707 .osd_size_off = 0xc,
1708 .has_osd_alpha = 1,
1709 .palette_sz = 256,
1710 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1711 VALID_BPP(15) | VALID_BPP(16) |
1712 VALID_BPP(18) | VALID_BPP(19) |
1713 VALID_BPP(24) | VALID_BPP(25) |
1714 VALID_BPP(32)),
1715 },
1716 [3] = {
1717 .has_osd_c = 1,
1718 .has_osd_alpha = 1,
1719 .palette_sz = 256,
1720 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1721 VALID_BPP(15) | VALID_BPP(16) |
1722 VALID_BPP(18) | VALID_BPP(19) |
1723 VALID_BPP(24) | VALID_BPP(25) |
1724 VALID_BPP(32)),
1725 },
1726 [4] = {
1727 .has_osd_c = 1,
1728 .has_osd_alpha = 1,
1729 .palette_sz = 256,
1730 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1731 VALID_BPP(15) | VALID_BPP(16) |
1732 VALID_BPP(18) | VALID_BPP(19) |
1733 VALID_BPP(24) | VALID_BPP(25) |
1734 VALID_BPP(32)),
1666 }, 1735 },
1667}; 1736};
1668 1737
@@ -1719,11 +1788,11 @@ static struct s3c_fb_driverdata s3c_fb_data_s5pc100 = {
1719 1788
1720 .has_prtcon = 1, 1789 .has_prtcon = 1,
1721 }, 1790 },
1722 .win[0] = &s3c_fb_data_64xx_wins[0], 1791 .win[0] = &s3c_fb_data_s5p_wins[0],
1723 .win[1] = &s3c_fb_data_64xx_wins[1], 1792 .win[1] = &s3c_fb_data_s5p_wins[1],
1724 .win[2] = &s3c_fb_data_64xx_wins[2], 1793 .win[2] = &s3c_fb_data_s5p_wins[2],
1725 .win[3] = &s3c_fb_data_64xx_wins[3], 1794 .win[3] = &s3c_fb_data_s5p_wins[3],
1726 .win[4] = &s3c_fb_data_64xx_wins[4], 1795 .win[4] = &s3c_fb_data_s5p_wins[4],
1727}; 1796};
1728 1797
1729static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = { 1798static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = {
@@ -1749,11 +1818,11 @@ static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = {
1749 1818
1750 .has_shadowcon = 1, 1819 .has_shadowcon = 1,
1751 }, 1820 },
1752 .win[0] = &s3c_fb_data_64xx_wins[0], 1821 .win[0] = &s3c_fb_data_s5p_wins[0],
1753 .win[1] = &s3c_fb_data_64xx_wins[1], 1822 .win[1] = &s3c_fb_data_s5p_wins[1],
1754 .win[2] = &s3c_fb_data_64xx_wins[2], 1823 .win[2] = &s3c_fb_data_s5p_wins[2],
1755 .win[3] = &s3c_fb_data_64xx_wins[3], 1824 .win[3] = &s3c_fb_data_s5p_wins[3],
1756 .win[4] = &s3c_fb_data_64xx_wins[4], 1825 .win[4] = &s3c_fb_data_s5p_wins[4],
1757}; 1826};
1758 1827
1759/* S3C2443/S3C2416 style hardware */ 1828/* S3C2443/S3C2416 style hardware */
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 61c819e35f7f..0aa13761de6e 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -867,7 +867,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
867 goto dealloc_fb; 867 goto dealloc_fb;
868 } 868 }
869 869
870 size = (res->end - res->start) + 1; 870 size = resource_size(res);
871 info->mem = request_mem_region(res->start, size, pdev->name); 871 info->mem = request_mem_region(res->start, size, pdev->name);
872 if (info->mem == NULL) { 872 if (info->mem == NULL) {
873 dev_err(&pdev->dev, "failed to get memory region\n"); 873 dev_err(&pdev->dev, "failed to get memory region\n");
@@ -997,8 +997,7 @@ release_irq:
997release_regs: 997release_regs:
998 iounmap(info->io); 998 iounmap(info->io);
999release_mem: 999release_mem:
1000 release_resource(info->mem); 1000 release_mem_region(res->start, size);
1001 kfree(info->mem);
1002dealloc_fb: 1001dealloc_fb:
1003 platform_set_drvdata(pdev, NULL); 1002 platform_set_drvdata(pdev, NULL);
1004 framebuffer_release(fbinfo); 1003 framebuffer_release(fbinfo);
@@ -1044,8 +1043,7 @@ static int __devexit s3c2410fb_remove(struct platform_device *pdev)
1044 1043
1045 iounmap(info->io); 1044 iounmap(info->io);
1046 1045
1047 release_resource(info->mem); 1046 release_mem_region(info->mem->start, resource_size(info->mem));
1048 kfree(info->mem);
1049 1047
1050 platform_set_drvdata(pdev, NULL); 1048 platform_set_drvdata(pdev, NULL);
1051 framebuffer_release(fbinfo); 1049 framebuffer_release(fbinfo);
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index c4482f2e5799..4ca5d0c8fe84 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -25,6 +25,9 @@
25#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */ 25#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
26#include <video/vga.h> 26#include <video/vga.h>
27 27
28#include <linux/i2c.h>
29#include <linux/i2c-algo-bit.h>
30
28#ifdef CONFIG_MTRR 31#ifdef CONFIG_MTRR
29#include <asm/mtrr.h> 32#include <asm/mtrr.h>
30#endif 33#endif
@@ -36,6 +39,12 @@ struct s3fb_info {
36 struct mutex open_lock; 39 struct mutex open_lock;
37 unsigned int ref_count; 40 unsigned int ref_count;
38 u32 pseudo_palette[16]; 41 u32 pseudo_palette[16];
42#ifdef CONFIG_FB_S3_DDC
43 u8 __iomem *mmio;
44 bool ddc_registered;
45 struct i2c_adapter ddc_adapter;
46 struct i2c_algo_bit_data ddc_algo;
47#endif
39}; 48};
40 49
41 50
@@ -105,6 +114,9 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
105#define CHIP_UNDECIDED_FLAG 0x80 114#define CHIP_UNDECIDED_FLAG 0x80
106#define CHIP_MASK 0xFF 115#define CHIP_MASK 0xFF
107 116
117#define MMIO_OFFSET 0x1000000
118#define MMIO_SIZE 0x10000
119
108/* CRT timing register sets */ 120/* CRT timing register sets */
109 121
110static const struct vga_regset s3_h_total_regs[] = {{0x00, 0, 7}, {0x5D, 0, 0}, VGA_REGSET_END}; 122static const struct vga_regset s3_h_total_regs[] = {{0x00, 0, 7}, {0x5D, 0, 0}, VGA_REGSET_END};
@@ -140,7 +152,7 @@ static const struct svga_timing_regs s3_timing_regs = {
140/* Module parameters */ 152/* Module parameters */
141 153
142 154
143static char *mode_option __devinitdata = "640x480-8@60"; 155static char *mode_option __devinitdata;
144 156
145#ifdef CONFIG_MTRR 157#ifdef CONFIG_MTRR
146static int mtrr __devinitdata = 1; 158static int mtrr __devinitdata = 1;
@@ -169,6 +181,119 @@ MODULE_PARM_DESC(fasttext, "Enable S3 fast text mode (1=enable, 0=disable, defau
169 181
170/* ------------------------------------------------------------------------- */ 182/* ------------------------------------------------------------------------- */
171 183
184#ifdef CONFIG_FB_S3_DDC
185
186#define DDC_REG 0xaa /* Trio 3D/1X/2X */
187#define DDC_MMIO_REG 0xff20 /* all other chips */
188#define DDC_SCL_OUT (1 << 0)
189#define DDC_SDA_OUT (1 << 1)
190#define DDC_SCL_IN (1 << 2)
191#define DDC_SDA_IN (1 << 3)
192#define DDC_DRIVE_EN (1 << 4)
193
194static bool s3fb_ddc_needs_mmio(int chip)
195{
196 return !(chip == CHIP_360_TRIO3D_1X ||
197 chip == CHIP_362_TRIO3D_2X ||
198 chip == CHIP_368_TRIO3D_2X);
199}
200
201static u8 s3fb_ddc_read(struct s3fb_info *par)
202{
203 if (s3fb_ddc_needs_mmio(par->chip))
204 return readb(par->mmio + DDC_MMIO_REG);
205 else
206 return vga_rcrt(par->state.vgabase, DDC_REG);
207}
208
209static void s3fb_ddc_write(struct s3fb_info *par, u8 val)
210{
211 if (s3fb_ddc_needs_mmio(par->chip))
212 writeb(val, par->mmio + DDC_MMIO_REG);
213 else
214 vga_wcrt(par->state.vgabase, DDC_REG, val);
215}
216
217static void s3fb_ddc_setscl(void *data, int val)
218{
219 struct s3fb_info *par = data;
220 unsigned char reg;
221
222 reg = s3fb_ddc_read(par) | DDC_DRIVE_EN;
223 if (val)
224 reg |= DDC_SCL_OUT;
225 else
226 reg &= ~DDC_SCL_OUT;
227 s3fb_ddc_write(par, reg);
228}
229
230static void s3fb_ddc_setsda(void *data, int val)
231{
232 struct s3fb_info *par = data;
233 unsigned char reg;
234
235 reg = s3fb_ddc_read(par) | DDC_DRIVE_EN;
236 if (val)
237 reg |= DDC_SDA_OUT;
238 else
239 reg &= ~DDC_SDA_OUT;
240 s3fb_ddc_write(par, reg);
241}
242
243static int s3fb_ddc_getscl(void *data)
244{
245 struct s3fb_info *par = data;
246
247 return !!(s3fb_ddc_read(par) & DDC_SCL_IN);
248}
249
250static int s3fb_ddc_getsda(void *data)
251{
252 struct s3fb_info *par = data;
253
254 return !!(s3fb_ddc_read(par) & DDC_SDA_IN);
255}
256
257static int __devinit s3fb_setup_ddc_bus(struct fb_info *info)
258{
259 struct s3fb_info *par = info->par;
260
261 strlcpy(par->ddc_adapter.name, info->fix.id,
262 sizeof(par->ddc_adapter.name));
263 par->ddc_adapter.owner = THIS_MODULE;
264 par->ddc_adapter.class = I2C_CLASS_DDC;
265 par->ddc_adapter.algo_data = &par->ddc_algo;
266 par->ddc_adapter.dev.parent = info->device;
267 par->ddc_algo.setsda = s3fb_ddc_setsda;
268 par->ddc_algo.setscl = s3fb_ddc_setscl;
269 par->ddc_algo.getsda = s3fb_ddc_getsda;
270 par->ddc_algo.getscl = s3fb_ddc_getscl;
271 par->ddc_algo.udelay = 10;
272 par->ddc_algo.timeout = 20;
273 par->ddc_algo.data = par;
274
275 i2c_set_adapdata(&par->ddc_adapter, par);
276
277 /*
278 * some Virge cards have external MUX to switch chip I2C bus between
279 * DDC and extension pins - switch it do DDC
280 */
281/* vga_wseq(par->state.vgabase, 0x08, 0x06); - not needed, already unlocked */
282 if (par->chip == CHIP_357_VIRGE_GX2 ||
283 par->chip == CHIP_359_VIRGE_GX2P)
284 svga_wseq_mask(par->state.vgabase, 0x0d, 0x01, 0x03);
285 else
286 svga_wseq_mask(par->state.vgabase, 0x0d, 0x00, 0x03);
287 /* some Virge need this or the DDC is ignored */
288 svga_wcrt_mask(par->state.vgabase, 0x5c, 0x03, 0x03);
289
290 return i2c_bit_add_bus(&par->ddc_adapter);
291}
292#endif /* CONFIG_FB_S3_DDC */
293
294
295/* ------------------------------------------------------------------------- */
296
172/* Set font in S3 fast text mode */ 297/* Set font in S3 fast text mode */
173 298
174static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map) 299static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map)
@@ -994,6 +1119,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
994 struct s3fb_info *par; 1119 struct s3fb_info *par;
995 int rc; 1120 int rc;
996 u8 regval, cr38, cr39; 1121 u8 regval, cr38, cr39;
1122 bool found = false;
997 1123
998 /* Ignore secondary VGA device because there is no VGA arbitration */ 1124 /* Ignore secondary VGA device because there is no VGA arbitration */
999 if (! svga_primary_device(dev)) { 1125 if (! svga_primary_device(dev)) {
@@ -1110,12 +1236,69 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
1110 info->fix.ypanstep = 0; 1236 info->fix.ypanstep = 0;
1111 info->fix.accel = FB_ACCEL_NONE; 1237 info->fix.accel = FB_ACCEL_NONE;
1112 info->pseudo_palette = (void*) (par->pseudo_palette); 1238 info->pseudo_palette = (void*) (par->pseudo_palette);
1239 info->var.bits_per_pixel = 8;
1240
1241#ifdef CONFIG_FB_S3_DDC
1242 /* Enable MMIO if needed */
1243 if (s3fb_ddc_needs_mmio(par->chip)) {
1244 par->mmio = ioremap(info->fix.smem_start + MMIO_OFFSET, MMIO_SIZE);
1245 if (par->mmio)
1246 svga_wcrt_mask(par->state.vgabase, 0x53, 0x08, 0x08); /* enable MMIO */
1247 else
1248 dev_err(info->device, "unable to map MMIO at 0x%lx, disabling DDC",
1249 info->fix.smem_start + MMIO_OFFSET);
1250 }
1251 if (!s3fb_ddc_needs_mmio(par->chip) || par->mmio)
1252 if (s3fb_setup_ddc_bus(info) == 0) {
1253 u8 *edid = fb_ddc_read(&par->ddc_adapter);
1254 par->ddc_registered = true;
1255 if (edid) {
1256 fb_edid_to_monspecs(edid, &info->monspecs);
1257 kfree(edid);
1258 if (!info->monspecs.modedb)
1259 dev_err(info->device, "error getting mode database\n");
1260 else {
1261 const struct fb_videomode *m;
1262
1263 fb_videomode_to_modelist(info->monspecs.modedb,
1264 info->monspecs.modedb_len,
1265 &info->modelist);
1266 m = fb_find_best_display(&info->monspecs, &info->modelist);
1267 if (m) {
1268 fb_videomode_to_var(&info->var, m);
1269 /* fill all other info->var's fields */
1270 if (s3fb_check_var(&info->var, info) == 0)
1271 found = true;
1272 }
1273 }
1274 }
1275 }
1276#endif
1277 if (!mode_option && !found)
1278 mode_option = "640x480-8@60";
1113 1279
1114 /* Prepare startup mode */ 1280 /* Prepare startup mode */
1115 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8); 1281 if (mode_option) {
1116 if (! ((rc == 1) || (rc == 2))) { 1282 rc = fb_find_mode(&info->var, info, mode_option,
1117 rc = -EINVAL; 1283 info->monspecs.modedb, info->monspecs.modedb_len,
1118 dev_err(info->device, "mode %s not found\n", mode_option); 1284 NULL, info->var.bits_per_pixel);
1285 if (!rc || rc == 4) {
1286 rc = -EINVAL;
1287 dev_err(info->device, "mode %s not found\n", mode_option);
1288 fb_destroy_modedb(info->monspecs.modedb);
1289 info->monspecs.modedb = NULL;
1290 goto err_find_mode;
1291 }
1292 }
1293
1294 fb_destroy_modedb(info->monspecs.modedb);
1295 info->monspecs.modedb = NULL;
1296
1297 /* maximize virtual vertical size for fast scrolling */
1298 info->var.yres_virtual = info->fix.smem_len * 8 /
1299 (info->var.bits_per_pixel * info->var.xres_virtual);
1300 if (info->var.yres_virtual < info->var.yres) {
1301 dev_err(info->device, "virtual vertical size smaller than real\n");
1119 goto err_find_mode; 1302 goto err_find_mode;
1120 } 1303 }
1121 1304
@@ -1164,6 +1347,12 @@ err_reg_fb:
1164 fb_dealloc_cmap(&info->cmap); 1347 fb_dealloc_cmap(&info->cmap);
1165err_alloc_cmap: 1348err_alloc_cmap:
1166err_find_mode: 1349err_find_mode:
1350#ifdef CONFIG_FB_S3_DDC
1351 if (par->ddc_registered)
1352 i2c_del_adapter(&par->ddc_adapter);
1353 if (par->mmio)
1354 iounmap(par->mmio);
1355#endif
1167 pci_iounmap(dev, info->screen_base); 1356 pci_iounmap(dev, info->screen_base);
1168err_iomap: 1357err_iomap:
1169 pci_release_regions(dev); 1358 pci_release_regions(dev);
@@ -1180,12 +1369,11 @@ err_enable_device:
1180static void __devexit s3_pci_remove(struct pci_dev *dev) 1369static void __devexit s3_pci_remove(struct pci_dev *dev)
1181{ 1370{
1182 struct fb_info *info = pci_get_drvdata(dev); 1371 struct fb_info *info = pci_get_drvdata(dev);
1372 struct s3fb_info __maybe_unused *par = info->par;
1183 1373
1184 if (info) { 1374 if (info) {
1185 1375
1186#ifdef CONFIG_MTRR 1376#ifdef CONFIG_MTRR
1187 struct s3fb_info *par = info->par;
1188
1189 if (par->mtrr_reg >= 0) { 1377 if (par->mtrr_reg >= 0) {
1190 mtrr_del(par->mtrr_reg, 0, 0); 1378 mtrr_del(par->mtrr_reg, 0, 0);
1191 par->mtrr_reg = -1; 1379 par->mtrr_reg = -1;
@@ -1195,6 +1383,13 @@ static void __devexit s3_pci_remove(struct pci_dev *dev)
1195 unregister_framebuffer(info); 1383 unregister_framebuffer(info);
1196 fb_dealloc_cmap(&info->cmap); 1384 fb_dealloc_cmap(&info->cmap);
1197 1385
1386#ifdef CONFIG_FB_S3_DDC
1387 if (par->ddc_registered)
1388 i2c_del_adapter(&par->ddc_adapter);
1389 if (par->mmio)
1390 iounmap(par->mmio);
1391#endif
1392
1198 pci_iounmap(dev, info->screen_base); 1393 pci_iounmap(dev, info->screen_base);
1199 pci_release_regions(dev); 1394 pci_release_regions(dev);
1200/* pci_disable_device(dev); */ 1395/* pci_disable_device(dev); */
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index bb71fea07284..80fa87e2ae2f 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -171,6 +171,8 @@ void savagefb_create_i2c_busses(struct fb_info *info)
171 171
172 switch (par->chip) { 172 switch (par->chip) {
173 case S3_PROSAVAGE: 173 case S3_PROSAVAGE:
174 case S3_PROSAVAGEDDR:
175 case S3_TWISTER:
174 par->chan.reg = CR_SERIAL2; 176 par->chan.reg = CR_SERIAL2;
175 par->chan.ioaddr = par->mmio.vbase; 177 par->chan.ioaddr = par->mmio.vbase;
176 par->chan.algo.setsda = prosavage_gpio_setsda; 178 par->chan.algo.setsda = prosavage_gpio_setsda;
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 4e9490c19d7d..32549d177b19 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -36,7 +36,6 @@
36#define PCI_CHIP_SAVAGE_IX 0x8c13 36#define PCI_CHIP_SAVAGE_IX 0x8c13
37#define PCI_CHIP_PROSAVAGE_PM 0x8a25 37#define PCI_CHIP_PROSAVAGE_PM 0x8a25
38#define PCI_CHIP_PROSAVAGE_KM 0x8a26 38#define PCI_CHIP_PROSAVAGE_KM 0x8a26
39 /* Twister is a code name; hope I get the real name soon. */
40#define PCI_CHIP_S3TWISTER_P 0x8d01 39#define PCI_CHIP_S3TWISTER_P 0x8d01
41#define PCI_CHIP_S3TWISTER_K 0x8d02 40#define PCI_CHIP_S3TWISTER_K 0x8d02
42#define PCI_CHIP_PROSAVAGE_DDR 0x8d03 41#define PCI_CHIP_PROSAVAGE_DDR 0x8d03
@@ -52,14 +51,15 @@
52#define PCI_CHIP_SUPSAV_IXCDDR 0x8c2f 51#define PCI_CHIP_SUPSAV_IXCDDR 0x8c2f
53 52
54 53
54#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
55 55
56#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) 56#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
57 57
58#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) || (chip==S3_PROSAVAGE)) 58#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR))
59 59
60#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) 60#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
61 61
62#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) 62#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) || (chip==S3_PROSAVAGEDDR))
63 63
64/* Chip tags. These are used to group the adapters into 64/* Chip tags. These are used to group the adapters into
65 * related families. 65 * related families.
@@ -71,6 +71,8 @@ typedef enum {
71 S3_SAVAGE_MX, 71 S3_SAVAGE_MX,
72 S3_SAVAGE4, 72 S3_SAVAGE4,
73 S3_PROSAVAGE, 73 S3_PROSAVAGE,
74 S3_TWISTER,
75 S3_PROSAVAGEDDR,
74 S3_SUPERSAVAGE, 76 S3_SUPERSAVAGE,
75 S3_SAVAGE2000, 77 S3_SAVAGE2000,
76 S3_LAST 78 S3_LAST
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index a2dc1a7ec758..3b7f2f5bae71 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -328,7 +328,9 @@ SavageSetup2DEngine(struct savagefb_par *par)
328 savage_out32(0x48C18, savage_in32(0x48C18, par) | 0x0C, par); 328 savage_out32(0x48C18, savage_in32(0x48C18, par) | 0x0C, par);
329 break; 329 break;
330 case S3_SAVAGE4: 330 case S3_SAVAGE4:
331 case S3_TWISTER:
331 case S3_PROSAVAGE: 332 case S3_PROSAVAGE:
333 case S3_PROSAVAGEDDR:
332 case S3_SUPERSAVAGE: 334 case S3_SUPERSAVAGE:
333 /* Disable BCI */ 335 /* Disable BCI */
334 savage_out32(0x48C18, savage_in32(0x48C18, par) & 0x3FF0, par); 336 savage_out32(0x48C18, savage_in32(0x48C18, par) & 0x3FF0, par);
@@ -1886,6 +1888,8 @@ static int savage_init_hw(struct savagefb_par *par)
1886 break; 1888 break;
1887 1889
1888 case S3_PROSAVAGE: 1890 case S3_PROSAVAGE:
1891 case S3_PROSAVAGEDDR:
1892 case S3_TWISTER:
1889 videoRam = RamSavageNB[(config1 & 0xE0) >> 5] * 1024; 1893 videoRam = RamSavageNB[(config1 & 0xE0) >> 5] * 1024;
1890 break; 1894 break;
1891 1895
@@ -1963,7 +1967,8 @@ static int savage_init_hw(struct savagefb_par *par)
1963 } 1967 }
1964 } 1968 }
1965 1969
1966 if (S3_SAVAGE_MOBILE_SERIES(par->chip) && !par->crtonly) 1970 if ((S3_SAVAGE_MOBILE_SERIES(par->chip) ||
1971 S3_MOBILE_TWISTER_SERIES(par->chip)) && !par->crtonly)
1967 par->display_type = DISP_LCD; 1972 par->display_type = DISP_LCD;
1968 else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi)) 1973 else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi))
1969 par->display_type = DISP_DFP; 1974 par->display_type = DISP_DFP;
@@ -2111,19 +2116,19 @@ static int __devinit savage_init_fb_info(struct fb_info *info,
2111 snprintf(info->fix.id, 16, "ProSavageKM"); 2116 snprintf(info->fix.id, 16, "ProSavageKM");
2112 break; 2117 break;
2113 case FB_ACCEL_S3TWISTER_P: 2118 case FB_ACCEL_S3TWISTER_P:
2114 par->chip = S3_PROSAVAGE; 2119 par->chip = S3_TWISTER;
2115 snprintf(info->fix.id, 16, "TwisterP"); 2120 snprintf(info->fix.id, 16, "TwisterP");
2116 break; 2121 break;
2117 case FB_ACCEL_S3TWISTER_K: 2122 case FB_ACCEL_S3TWISTER_K:
2118 par->chip = S3_PROSAVAGE; 2123 par->chip = S3_TWISTER;
2119 snprintf(info->fix.id, 16, "TwisterK"); 2124 snprintf(info->fix.id, 16, "TwisterK");
2120 break; 2125 break;
2121 case FB_ACCEL_PROSAVAGE_DDR: 2126 case FB_ACCEL_PROSAVAGE_DDR:
2122 par->chip = S3_PROSAVAGE; 2127 par->chip = S3_PROSAVAGEDDR;
2123 snprintf(info->fix.id, 16, "ProSavageDDR"); 2128 snprintf(info->fix.id, 16, "ProSavageDDR");
2124 break; 2129 break;
2125 case FB_ACCEL_PROSAVAGE_DDRK: 2130 case FB_ACCEL_PROSAVAGE_DDRK:
2126 par->chip = S3_PROSAVAGE; 2131 par->chip = S3_PROSAVAGEDDR;
2127 snprintf(info->fix.id, 16, "ProSavage8"); 2132 snprintf(info->fix.id, 16, "ProSavage8");
2128 break; 2133 break;
2129 } 2134 }
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 8fe19582c460..45e47d847163 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -551,8 +551,7 @@ out_unmap:
551 free_irq(par->irq, &par->vsync); 551 free_irq(par->irq, &par->vsync);
552 iounmap(par->base); 552 iounmap(par->base);
553out_res: 553out_res:
554 release_resource(par->ioarea); 554 release_mem_region(res->start, resource_size(res));
555 kfree(par->ioarea);
556out_fb: 555out_fb:
557 framebuffer_release(info); 556 framebuffer_release(info);
558 return ret; 557 return ret;
@@ -570,8 +569,7 @@ static int __devexit sh7760fb_remove(struct platform_device *dev)
570 if (par->irq >= 0) 569 if (par->irq >= 0)
571 free_irq(par->irq, par); 570 free_irq(par->irq, par);
572 iounmap(par->base); 571 iounmap(par->base);
573 release_resource(par->ioarea); 572 release_mem_region(par->ioarea->start, resource_size(par->ioarea));
574 kfree(par->ioarea);
575 framebuffer_release(info); 573 framebuffer_release(info);
576 platform_set_drvdata(dev, NULL); 574 platform_set_drvdata(dev, NULL);
577 575
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 2b9e56a6bde4..6ae40b630dc9 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1131,15 +1131,19 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1131 pm_runtime_get_sync(hdmi->dev); 1131 pm_runtime_get_sync(hdmi->dev);
1132 1132
1133 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate); 1133 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
1134 if (ret < 0) 1134 if (ret < 0) {
1135 pm_runtime_put(hdmi->dev);
1135 goto out; 1136 goto out;
1137 }
1136 1138
1137 hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE; 1139 hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE;
1138 1140
1139 /* Reconfigure the clock */ 1141 /* Reconfigure the clock */
1140 ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate); 1142 ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate);
1141 if (ret < 0) 1143 if (ret < 0) {
1144 pm_runtime_put(hdmi->dev);
1142 goto out; 1145 goto out;
1146 }
1143 1147
1144 msleep(10); 1148 msleep(10);
1145 sh_hdmi_configure(hdmi); 1149 sh_hdmi_configure(hdmi);
@@ -1336,6 +1340,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1336ecodec: 1340ecodec:
1337 free_irq(irq, hdmi); 1341 free_irq(irq, hdmi);
1338ereqirq: 1342ereqirq:
1343 pm_runtime_suspend(&pdev->dev);
1339 pm_runtime_disable(&pdev->dev); 1344 pm_runtime_disable(&pdev->dev);
1340 iounmap(hdmi->base); 1345 iounmap(hdmi->base);
1341emap: 1346emap:
@@ -1372,6 +1377,7 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
1372 free_irq(irq, hdmi); 1377 free_irq(irq, hdmi);
1373 /* Wait for already scheduled work */ 1378 /* Wait for already scheduled work */
1374 cancel_delayed_work_sync(&hdmi->edid_work); 1379 cancel_delayed_work_sync(&hdmi->edid_work);
1380 pm_runtime_suspend(&pdev->dev);
1375 pm_runtime_disable(&pdev->dev); 1381 pm_runtime_disable(&pdev->dev);
1376 clk_disable(hdmi->hdmi_clk); 1382 clk_disable(hdmi->hdmi_clk);
1377 clk_put(hdmi->hdmi_clk); 1383 clk_put(hdmi->hdmi_clk);
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 9bcc61b4ef14..404c03b4b7c7 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -27,6 +27,7 @@
27#include <asm/atomic.h> 27#include <asm/atomic.h>
28 28
29#include "sh_mobile_lcdcfb.h" 29#include "sh_mobile_lcdcfb.h"
30#include "sh_mobile_meram.h"
30 31
31#define SIDE_B_OFFSET 0x1000 32#define SIDE_B_OFFSET 0x1000
32#define MIRROR_OFFSET 0x2000 33#define MIRROR_OFFSET 0x2000
@@ -143,6 +144,7 @@ struct sh_mobile_lcdc_priv {
143 unsigned long saved_shared_regs[NR_SHARED_REGS]; 144 unsigned long saved_shared_regs[NR_SHARED_REGS];
144 int started; 145 int started;
145 int forced_bpp; /* 2 channel LCDC must share bpp setting */ 146 int forced_bpp; /* 2 channel LCDC must share bpp setting */
147 struct sh_mobile_meram_info *meram_dev;
146}; 148};
147 149
148static bool banked(int reg_nr) 150static bool banked(int reg_nr)
@@ -469,7 +471,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
469 int bpp = 0; 471 int bpp = 0;
470 unsigned long ldddsr; 472 unsigned long ldddsr;
471 int k, m; 473 int k, m;
472 int ret = 0;
473 474
474 /* enable clocks before accessing the hardware */ 475 /* enable clocks before accessing the hardware */
475 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 476 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -538,11 +539,12 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
538 lcdc_write_chan(ch, LDPMR, 0); 539 lcdc_write_chan(ch, LDPMR, 0);
539 540
540 board_cfg = &ch->cfg.board_cfg; 541 board_cfg = &ch->cfg.board_cfg;
541 if (board_cfg->setup_sys) 542 if (board_cfg->setup_sys) {
542 ret = board_cfg->setup_sys(board_cfg->board_data, ch, 543 int ret = board_cfg->setup_sys(board_cfg->board_data,
543 &sh_mobile_lcdc_sys_bus_ops); 544 ch, &sh_mobile_lcdc_sys_bus_ops);
544 if (ret) 545 if (ret)
545 return ret; 546 return ret;
547 }
546 } 548 }
547 549
548 /* word and long word swap */ 550 /* word and long word swap */
@@ -564,6 +566,9 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
564 } 566 }
565 567
566 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 568 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
569 unsigned long base_addr_y;
570 unsigned long base_addr_c = 0;
571 int pitch;
567 ch = &priv->ch[k]; 572 ch = &priv->ch[k];
568 573
569 if (!priv->ch[k].enabled) 574 if (!priv->ch[k].enabled)
@@ -598,16 +603,68 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
598 } 603 }
599 lcdc_write_chan(ch, LDDFR, tmp); 604 lcdc_write_chan(ch, LDDFR, tmp);
600 605
606 base_addr_y = ch->info->fix.smem_start;
607 base_addr_c = base_addr_y +
608 ch->info->var.xres *
609 ch->info->var.yres_virtual;
610 pitch = ch->info->fix.line_length;
611
612 /* test if we can enable meram */
613 if (ch->cfg.meram_cfg && priv->meram_dev &&
614 priv->meram_dev->ops) {
615 struct sh_mobile_meram_cfg *cfg;
616 struct sh_mobile_meram_info *mdev;
617 unsigned long icb_addr_y, icb_addr_c;
618 int icb_pitch;
619 int pf;
620
621 cfg = ch->cfg.meram_cfg;
622 mdev = priv->meram_dev;
623 /* we need to de-init configured ICBs before we
624 * we can re-initialize them.
625 */
626 if (ch->meram_enabled)
627 mdev->ops->meram_unregister(mdev, cfg);
628
629 ch->meram_enabled = 0;
630
631 if (ch->info->var.nonstd) {
632 if (ch->info->var.bits_per_pixel == 24)
633 pf = SH_MOBILE_MERAM_PF_NV24;
634 else
635 pf = SH_MOBILE_MERAM_PF_NV;
636 } else {
637 pf = SH_MOBILE_MERAM_PF_RGB;
638 }
639
640 ret = mdev->ops->meram_register(mdev, cfg, pitch,
641 ch->info->var.yres,
642 pf,
643 base_addr_y,
644 base_addr_c,
645 &icb_addr_y,
646 &icb_addr_c,
647 &icb_pitch);
648 if (!ret) {
649 /* set LDSA1R value */
650 base_addr_y = icb_addr_y;
651 pitch = icb_pitch;
652
653 /* set LDSA2R value if required */
654 if (base_addr_c)
655 base_addr_c = icb_addr_c;
656
657 ch->meram_enabled = 1;
658 }
659 }
660
601 /* point out our frame buffer */ 661 /* point out our frame buffer */
602 lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start); 662 lcdc_write_chan(ch, LDSA1R, base_addr_y);
603 if (ch->info->var.nonstd) 663 if (ch->info->var.nonstd)
604 lcdc_write_chan(ch, LDSA2R, 664 lcdc_write_chan(ch, LDSA2R, base_addr_c);
605 ch->info->fix.smem_start +
606 ch->info->var.xres *
607 ch->info->var.yres_virtual);
608 665
609 /* set line size */ 666 /* set line size */
610 lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length); 667 lcdc_write_chan(ch, LDMLSR, pitch);
611 668
612 /* setup deferred io if SYS bus */ 669 /* setup deferred io if SYS bus */
613 tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; 670 tmp = ch->cfg.sys_bus_cfg.deferred_io_msec;
@@ -692,6 +749,17 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
692 board_cfg->display_off(board_cfg->board_data); 749 board_cfg->display_off(board_cfg->board_data);
693 module_put(board_cfg->owner); 750 module_put(board_cfg->owner);
694 } 751 }
752
753 /* disable the meram */
754 if (ch->meram_enabled) {
755 struct sh_mobile_meram_cfg *cfg;
756 struct sh_mobile_meram_info *mdev;
757 cfg = ch->cfg.meram_cfg;
758 mdev = priv->meram_dev;
759 mdev->ops->meram_unregister(mdev, cfg);
760 ch->meram_enabled = 0;
761 }
762
695 } 763 }
696 764
697 /* stop the lcdc */ 765 /* stop the lcdc */
@@ -875,9 +943,29 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
875 } else 943 } else
876 base_addr_c = 0; 944 base_addr_c = 0;
877 945
878 lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y); 946 if (!ch->meram_enabled) {
879 if (base_addr_c) 947 lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
880 lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c); 948 if (base_addr_c)
949 lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
950 } else {
951 struct sh_mobile_meram_cfg *cfg;
952 struct sh_mobile_meram_info *mdev;
953 unsigned long icb_addr_y, icb_addr_c;
954 int ret;
955
956 cfg = ch->cfg.meram_cfg;
957 mdev = priv->meram_dev;
958 ret = mdev->ops->meram_update(mdev, cfg,
959 base_addr_y, base_addr_c,
960 &icb_addr_y, &icb_addr_c);
961 if (ret)
962 return ret;
963
964 lcdc_write_chan_mirror(ch, LDSA1R, icb_addr_y);
965 if (icb_addr_c)
966 lcdc_write_chan_mirror(ch, LDSA2R, icb_addr_c);
967
968 }
881 969
882 if (lcdc_chan_is_sublcd(ch)) 970 if (lcdc_chan_is_sublcd(ch))
883 lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS); 971 lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS);
@@ -1288,7 +1376,6 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb,
1288 struct fb_info *info = event->info; 1376 struct fb_info *info = event->info;
1289 struct sh_mobile_lcdc_chan *ch = info->par; 1377 struct sh_mobile_lcdc_chan *ch = info->par;
1290 struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg; 1378 struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg;
1291 int ret;
1292 1379
1293 if (&ch->lcdc->notifier != nb) 1380 if (&ch->lcdc->notifier != nb)
1294 return NOTIFY_DONE; 1381 return NOTIFY_DONE;
@@ -1302,7 +1389,6 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb,
1302 board_cfg->display_off(board_cfg->board_data); 1389 board_cfg->display_off(board_cfg->board_data);
1303 module_put(board_cfg->owner); 1390 module_put(board_cfg->owner);
1304 } 1391 }
1305 pm_runtime_put(info->device);
1306 sh_mobile_lcdc_stop(ch->lcdc); 1392 sh_mobile_lcdc_stop(ch->lcdc);
1307 break; 1393 break;
1308 case FB_EVENT_RESUME: 1394 case FB_EVENT_RESUME:
@@ -1316,9 +1402,7 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb,
1316 module_put(board_cfg->owner); 1402 module_put(board_cfg->owner);
1317 } 1403 }
1318 1404
1319 ret = sh_mobile_lcdc_start(ch->lcdc); 1405 sh_mobile_lcdc_start(ch->lcdc);
1320 if (!ret)
1321 pm_runtime_get_sync(info->device);
1322 } 1406 }
1323 1407
1324 return NOTIFY_OK; 1408 return NOTIFY_OK;
@@ -1420,6 +1504,8 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
1420 goto err1; 1504 goto err1;
1421 } 1505 }
1422 1506
1507 priv->meram_dev = pdata->meram_dev;
1508
1423 for (i = 0; i < j; i++) { 1509 for (i = 0; i < j; i++) {
1424 struct fb_var_screeninfo *var; 1510 struct fb_var_screeninfo *var;
1425 const struct fb_videomode *lcd_cfg, *max_cfg = NULL; 1511 const struct fb_videomode *lcd_cfg, *max_cfg = NULL;
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h
index f16cb5645a13..aeed6687e6a7 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/sh_mobile_lcdcfb.h
@@ -39,6 +39,7 @@ struct sh_mobile_lcdc_chan {
39 int use_count; 39 int use_count;
40 int blank_status; 40 int blank_status;
41 struct mutex open_lock; /* protects the use counter */ 41 struct mutex open_lock; /* protects the use counter */
42 int meram_enabled;
42}; 43};
43 44
44#endif 45#endif
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
new file mode 100644
index 000000000000..9170c82b495c
--- /dev/null
+++ b/drivers/video/sh_mobile_meram.c
@@ -0,0 +1,567 @@
1/*
2 * SuperH Mobile MERAM Driver for SuperH Mobile LCDC Driver
3 *
4 * Copyright (c) 2011 Damian Hobson-Garcia <dhobsong@igel.co.jp>
5 * Takanari Hayama <taki@igel.co.jp>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/platform_device.h>
18
19#include "sh_mobile_meram.h"
20
21/* meram registers */
22#define MExxCTL 0x0
23#define MExxBSIZE 0x4
24#define MExxMNCF 0x8
25#define MExxSARA 0x10
26#define MExxSARB 0x14
27#define MExxSBSIZE 0x18
28
29#define MERAM_MExxCTL_VAL(ctl, next_icb, addr) \
30 ((ctl) | (((next_icb) & 0x1f) << 11) | (((addr) & 0x7ff) << 16))
31#define MERAM_MExxBSIZE_VAL(a, b, c) \
32 (((a) << 28) | ((b) << 16) | (c))
33
34#define MEVCR1 0x4
35#define MEACTS 0x10
36#define MEQSEL1 0x40
37#define MEQSEL2 0x44
38
39/* settings */
40#define MERAM_SEC_LINE 15
41#define MERAM_LINE_WIDTH 2048
42
43/*
44 * MERAM/ICB access functions
45 */
46
47#define MERAM_ICB_OFFSET(base, idx, off) \
48 ((base) + (0x400 + ((idx) * 0x20) + (off)))
49
50static inline void meram_write_icb(void __iomem *base, int idx, int off,
51 unsigned long val)
52{
53 iowrite32(val, MERAM_ICB_OFFSET(base, idx, off));
54}
55
56static inline unsigned long meram_read_icb(void __iomem *base, int idx, int off)
57{
58 return ioread32(MERAM_ICB_OFFSET(base, idx, off));
59}
60
61static inline void meram_write_reg(void __iomem *base, int off,
62 unsigned long val)
63{
64 iowrite32(val, base + off);
65}
66
67static inline unsigned long meram_read_reg(void __iomem *base, int off)
68{
69 return ioread32(base + off);
70}
71
72/*
73 * register ICB
74 */
75
76#define MERAM_CACHE_START(p) ((p) >> 16)
77#define MERAM_CACHE_END(p) ((p) & 0xffff)
78#define MERAM_CACHE_SET(o, s) ((((o) & 0xffff) << 16) | \
79 (((o) + (s) - 1) & 0xffff))
80
81/*
82 * check if there's no overlaps in MERAM allocation.
83 */
84
85static inline int meram_check_overlap(struct sh_mobile_meram_priv *priv,
86 struct sh_mobile_meram_icb *new)
87{
88 int i;
89 int used_start, used_end, meram_start, meram_end;
90
91 /* valid ICB? */
92 if (new->marker_icb & ~0x1f || new->cache_icb & ~0x1f)
93 return 1;
94
95 if (test_bit(new->marker_icb, &priv->used_icb) ||
96 test_bit(new->cache_icb, &priv->used_icb))
97 return 1;
98
99 for (i = 0; i < priv->used_meram_cache_regions; i++) {
100 used_start = MERAM_CACHE_START(priv->used_meram_cache[i]);
101 used_end = MERAM_CACHE_END(priv->used_meram_cache[i]);
102 meram_start = new->meram_offset;
103 meram_end = new->meram_offset + new->meram_size;
104
105 if ((meram_start >= used_start && meram_start < used_end) ||
106 (meram_end > used_start && meram_end < used_end))
107 return 1;
108 }
109
110 return 0;
111}
112
113/*
114 * mark the specified ICB as used
115 */
116
117static inline void meram_mark(struct sh_mobile_meram_priv *priv,
118 struct sh_mobile_meram_icb *new)
119{
120 int n;
121
122 if (new->marker_icb < 0 || new->cache_icb < 0)
123 return;
124
125 __set_bit(new->marker_icb, &priv->used_icb);
126 __set_bit(new->cache_icb, &priv->used_icb);
127
128 n = priv->used_meram_cache_regions;
129
130 priv->used_meram_cache[n] = MERAM_CACHE_SET(new->meram_offset,
131 new->meram_size);
132
133 priv->used_meram_cache_regions++;
134}
135
136/*
137 * unmark the specified ICB as used
138 */
139
140static inline void meram_unmark(struct sh_mobile_meram_priv *priv,
141 struct sh_mobile_meram_icb *icb)
142{
143 int i;
144 unsigned long pattern;
145
146 if (icb->marker_icb < 0 || icb->cache_icb < 0)
147 return;
148
149 __clear_bit(icb->marker_icb, &priv->used_icb);
150 __clear_bit(icb->cache_icb, &priv->used_icb);
151
152 pattern = MERAM_CACHE_SET(icb->meram_offset, icb->meram_size);
153 for (i = 0; i < priv->used_meram_cache_regions; i++) {
154 if (priv->used_meram_cache[i] == pattern) {
155 while (i < priv->used_meram_cache_regions - 1) {
156 priv->used_meram_cache[i] =
157 priv->used_meram_cache[i + 1] ;
158 i++;
159 }
160 priv->used_meram_cache[i] = 0;
161 priv->used_meram_cache_regions--;
162 break;
163 }
164 }
165}
166
167/*
168 * is this a YCbCr(NV12, NV16 or NV24) colorspace
169 */
170static inline int is_nvcolor(int cspace)
171{
172 if (cspace == SH_MOBILE_MERAM_PF_NV ||
173 cspace == SH_MOBILE_MERAM_PF_NV24)
174 return 1;
175 return 0;
176}
177
178/*
179 * set the next address to fetch
180 */
181static inline void meram_set_next_addr(struct sh_mobile_meram_priv *priv,
182 struct sh_mobile_meram_cfg *cfg,
183 unsigned long base_addr_y,
184 unsigned long base_addr_c)
185{
186 unsigned long target;
187
188 target = (cfg->current_reg) ? MExxSARA : MExxSARB;
189 cfg->current_reg ^= 1;
190
191 /* set the next address to fetch */
192 meram_write_icb(priv->base, cfg->icb[0].cache_icb, target,
193 base_addr_y);
194 meram_write_icb(priv->base, cfg->icb[0].marker_icb, target,
195 base_addr_y + cfg->icb[0].cache_unit);
196
197 if (is_nvcolor(cfg->pixelformat)) {
198 meram_write_icb(priv->base, cfg->icb[1].cache_icb, target,
199 base_addr_c);
200 meram_write_icb(priv->base, cfg->icb[1].marker_icb, target,
201 base_addr_c + cfg->icb[1].cache_unit);
202 }
203}
204
205/*
206 * get the next ICB address
207 */
208static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
209 struct sh_mobile_meram_cfg *cfg,
210 unsigned long *icb_addr_y,
211 unsigned long *icb_addr_c)
212{
213 unsigned long icb_offset;
214
215 if (pdata->addr_mode == SH_MOBILE_MERAM_MODE0)
216 icb_offset = 0x80000000 | (cfg->current_reg << 29);
217 else
218 icb_offset = 0xc0000000 | (cfg->current_reg << 23);
219
220 *icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24);
221 if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat))
222 *icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24);
223}
224
225#define MERAM_CALC_BYTECOUNT(x, y) \
226 (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1))
227
228/*
229 * initialize MERAM
230 */
231
232static int meram_init(struct sh_mobile_meram_priv *priv,
233 struct sh_mobile_meram_icb *icb,
234 int xres, int yres, int *out_pitch)
235{
236 unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres);
237 unsigned long bnm;
238 int lcdc_pitch, xpitch, line_cnt;
239 int save_lines;
240
241 /* adjust pitch to 1024, 2048, 4096 or 8192 */
242 lcdc_pitch = (xres - 1) | 1023;
243 lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 1);
244 lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 2);
245 lcdc_pitch += 1;
246
247 /* derive settings */
248 if (lcdc_pitch == 8192 && yres >= 1024) {
249 lcdc_pitch = xpitch = MERAM_LINE_WIDTH;
250 line_cnt = total_byte_count >> 11;
251 *out_pitch = xres;
252 save_lines = (icb->meram_size / 16 / MERAM_SEC_LINE);
253 save_lines *= MERAM_SEC_LINE;
254 } else {
255 xpitch = xres;
256 line_cnt = yres;
257 *out_pitch = lcdc_pitch;
258 save_lines = icb->meram_size / (lcdc_pitch >> 10) / 2;
259 save_lines &= 0xff;
260 }
261 bnm = (save_lines - 1) << 16;
262
263 /* TODO: we better to check if we have enough MERAM buffer size */
264
265 /* set up ICB */
266 meram_write_icb(priv->base, icb->cache_icb, MExxBSIZE,
267 MERAM_MExxBSIZE_VAL(0x0, line_cnt - 1, xpitch - 1));
268 meram_write_icb(priv->base, icb->marker_icb, MExxBSIZE,
269 MERAM_MExxBSIZE_VAL(0xf, line_cnt - 1, xpitch - 1));
270
271 meram_write_icb(priv->base, icb->cache_icb, MExxMNCF, bnm);
272 meram_write_icb(priv->base, icb->marker_icb, MExxMNCF, bnm);
273
274 meram_write_icb(priv->base, icb->cache_icb, MExxSBSIZE, xpitch);
275 meram_write_icb(priv->base, icb->marker_icb, MExxSBSIZE, xpitch);
276
277 /* save a cache unit size */
278 icb->cache_unit = xres * save_lines;
279
280 /*
281 * Set MERAM for framebuffer
282 *
283 * 0x70f: WD = 0x3, WS=0x1, CM=0x1, MD=FB mode
284 * we also chain the cache_icb and the marker_icb.
285 * we also split the allocated MERAM buffer between two ICBs.
286 */
287 meram_write_icb(priv->base, icb->cache_icb, MExxCTL,
288 MERAM_MExxCTL_VAL(0x70f, icb->marker_icb,
289 icb->meram_offset));
290 meram_write_icb(priv->base, icb->marker_icb, MExxCTL,
291 MERAM_MExxCTL_VAL(0x70f, icb->cache_icb,
292 icb->meram_offset +
293 icb->meram_size / 2));
294
295 return 0;
296}
297
298static void meram_deinit(struct sh_mobile_meram_priv *priv,
299 struct sh_mobile_meram_icb *icb)
300{
301 /* disable ICB */
302 meram_write_icb(priv->base, icb->cache_icb, MExxCTL, 0);
303 meram_write_icb(priv->base, icb->marker_icb, MExxCTL, 0);
304 icb->cache_unit = 0;
305}
306
307/*
308 * register the ICB
309 */
310
311static int sh_mobile_meram_register(struct sh_mobile_meram_info *pdata,
312 struct sh_mobile_meram_cfg *cfg,
313 int xres, int yres, int pixelformat,
314 unsigned long base_addr_y,
315 unsigned long base_addr_c,
316 unsigned long *icb_addr_y,
317 unsigned long *icb_addr_c,
318 int *pitch)
319{
320 struct platform_device *pdev;
321 struct sh_mobile_meram_priv *priv;
322 int n, out_pitch;
323 int error = 0;
324
325 if (!pdata || !pdata->priv || !pdata->pdev || !cfg)
326 return -EINVAL;
327
328 if (pixelformat != SH_MOBILE_MERAM_PF_NV &&
329 pixelformat != SH_MOBILE_MERAM_PF_NV24 &&
330 pixelformat != SH_MOBILE_MERAM_PF_RGB)
331 return -EINVAL;
332
333 priv = pdata->priv;
334 pdev = pdata->pdev;
335
336 dev_dbg(&pdev->dev, "registering %dx%d (%s) (y=%08lx, c=%08lx)",
337 xres, yres, (!pixelformat) ? "yuv" : "rgb",
338 base_addr_y, base_addr_c);
339
340 mutex_lock(&priv->lock);
341
342 /* we can't handle wider than 8192px */
343 if (xres > 8192) {
344 dev_err(&pdev->dev, "width exceeding the limit (> 8192).");
345 error = -EINVAL;
346 goto err;
347 }
348
349 if (priv->used_meram_cache_regions + 2 > SH_MOBILE_MERAM_ICB_NUM) {
350 dev_err(&pdev->dev, "no more ICB available.");
351 error = -EINVAL;
352 goto err;
353 }
354
355 /* do we have at least one ICB config? */
356 if (cfg->icb[0].marker_icb < 0 || cfg->icb[0].cache_icb < 0) {
357 dev_err(&pdev->dev, "at least one ICB is required.");
358 error = -EINVAL;
359 goto err;
360 }
361
362 /* make sure that there's no overlaps */
363 if (meram_check_overlap(priv, &cfg->icb[0])) {
364 dev_err(&pdev->dev, "conflicting config detected.");
365 error = -EINVAL;
366 goto err;
367 }
368 n = 1;
369
370 /* do the same if we have the second ICB set */
371 if (cfg->icb[1].marker_icb >= 0 && cfg->icb[1].cache_icb >= 0) {
372 if (meram_check_overlap(priv, &cfg->icb[1])) {
373 dev_err(&pdev->dev, "conflicting config detected.");
374 error = -EINVAL;
375 goto err;
376 }
377 n = 2;
378 }
379
380 if (is_nvcolor(pixelformat) && n != 2) {
381 dev_err(&pdev->dev, "requires two ICB sets for planar Y/C.");
382 error = -EINVAL;
383 goto err;
384 }
385
386 /* we now register the ICB */
387 cfg->pixelformat = pixelformat;
388 meram_mark(priv, &cfg->icb[0]);
389 if (is_nvcolor(pixelformat))
390 meram_mark(priv, &cfg->icb[1]);
391
392 /* initialize MERAM */
393 meram_init(priv, &cfg->icb[0], xres, yres, &out_pitch);
394 *pitch = out_pitch;
395 if (pixelformat == SH_MOBILE_MERAM_PF_NV)
396 meram_init(priv, &cfg->icb[1], xres, (yres + 1) / 2,
397 &out_pitch);
398 else if (pixelformat == SH_MOBILE_MERAM_PF_NV24)
399 meram_init(priv, &cfg->icb[1], 2 * xres, (yres + 1) / 2,
400 &out_pitch);
401
402 cfg->current_reg = 1;
403 meram_set_next_addr(priv, cfg, base_addr_y, base_addr_c);
404 meram_get_next_icb_addr(pdata, cfg, icb_addr_y, icb_addr_c);
405
406 dev_dbg(&pdev->dev, "registered - can access via y=%08lx, c=%08lx",
407 *icb_addr_y, *icb_addr_c);
408
409err:
410 mutex_unlock(&priv->lock);
411 return error;
412}
413
414static int sh_mobile_meram_unregister(struct sh_mobile_meram_info *pdata,
415 struct sh_mobile_meram_cfg *cfg)
416{
417 struct sh_mobile_meram_priv *priv;
418
419 if (!pdata || !pdata->priv || !cfg)
420 return -EINVAL;
421
422 priv = pdata->priv;
423
424 mutex_lock(&priv->lock);
425
426 /* deinit & unmark */
427 if (is_nvcolor(cfg->pixelformat)) {
428 meram_deinit(priv, &cfg->icb[1]);
429 meram_unmark(priv, &cfg->icb[1]);
430 }
431 meram_deinit(priv, &cfg->icb[0]);
432 meram_unmark(priv, &cfg->icb[0]);
433
434 mutex_unlock(&priv->lock);
435
436 return 0;
437}
438
439static int sh_mobile_meram_update(struct sh_mobile_meram_info *pdata,
440 struct sh_mobile_meram_cfg *cfg,
441 unsigned long base_addr_y,
442 unsigned long base_addr_c,
443 unsigned long *icb_addr_y,
444 unsigned long *icb_addr_c)
445{
446 struct sh_mobile_meram_priv *priv;
447
448 if (!pdata || !pdata->priv || !cfg)
449 return -EINVAL;
450
451 priv = pdata->priv;
452
453 mutex_lock(&priv->lock);
454
455 meram_set_next_addr(priv, cfg, base_addr_y, base_addr_c);
456 meram_get_next_icb_addr(pdata, cfg, icb_addr_y, icb_addr_c);
457
458 mutex_unlock(&priv->lock);
459
460 return 0;
461}
462
463static struct sh_mobile_meram_ops sh_mobile_meram_ops = {
464 .module = THIS_MODULE,
465 .meram_register = sh_mobile_meram_register,
466 .meram_unregister = sh_mobile_meram_unregister,
467 .meram_update = sh_mobile_meram_update,
468};
469
470/*
471 * initialize MERAM
472 */
473
474static int sh_mobile_meram_remove(struct platform_device *pdev);
475
476static int __devinit sh_mobile_meram_probe(struct platform_device *pdev)
477{
478 struct sh_mobile_meram_priv *priv;
479 struct sh_mobile_meram_info *pdata = pdev->dev.platform_data;
480 struct resource *res;
481 int error;
482
483 if (!pdata) {
484 dev_err(&pdev->dev, "no platform data defined\n");
485 return -EINVAL;
486 }
487
488 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
489 if (!res) {
490 dev_err(&pdev->dev, "cannot get platform resources\n");
491 return -ENOENT;
492 }
493
494 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
495 if (!priv) {
496 dev_err(&pdev->dev, "cannot allocate device data\n");
497 return -ENOMEM;
498 }
499
500 platform_set_drvdata(pdev, priv);
501
502 /* initialize private data */
503 mutex_init(&priv->lock);
504 priv->base = ioremap_nocache(res->start, resource_size(res));
505 if (!priv->base) {
506 dev_err(&pdev->dev, "ioremap failed\n");
507 error = -EFAULT;
508 goto err;
509 }
510 pdata->ops = &sh_mobile_meram_ops;
511 pdata->priv = priv;
512 pdata->pdev = pdev;
513
514 /* initialize ICB addressing mode */
515 if (pdata->addr_mode == SH_MOBILE_MERAM_MODE1)
516 meram_write_reg(priv->base, MEVCR1, 1 << 29);
517
518 dev_info(&pdev->dev, "sh_mobile_meram initialized.");
519
520 return 0;
521
522err:
523 sh_mobile_meram_remove(pdev);
524
525 return error;
526}
527
528
529static int sh_mobile_meram_remove(struct platform_device *pdev)
530{
531 struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev);
532
533 if (priv->base)
534 iounmap(priv->base);
535
536 mutex_destroy(&priv->lock);
537
538 kfree(priv);
539
540 return 0;
541}
542
543static struct platform_driver sh_mobile_meram_driver = {
544 .driver = {
545 .name = "sh_mobile_meram",
546 .owner = THIS_MODULE,
547 },
548 .probe = sh_mobile_meram_probe,
549 .remove = sh_mobile_meram_remove,
550};
551
552static int __init sh_mobile_meram_init(void)
553{
554 return platform_driver_register(&sh_mobile_meram_driver);
555}
556
557static void __exit sh_mobile_meram_exit(void)
558{
559 platform_driver_unregister(&sh_mobile_meram_driver);
560}
561
562module_init(sh_mobile_meram_init);
563module_exit(sh_mobile_meram_exit);
564
565MODULE_DESCRIPTION("SuperH Mobile MERAM driver");
566MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama");
567MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/sh_mobile_meram.h b/drivers/video/sh_mobile_meram.h
new file mode 100644
index 000000000000..82c54fbce8bd
--- /dev/null
+++ b/drivers/video/sh_mobile_meram.h
@@ -0,0 +1,41 @@
1#ifndef __sh_mobile_meram_h__
2#define __sh_mobile_meram_h__
3
4#include <linux/mutex.h>
5#include <video/sh_mobile_meram.h>
6
7/*
8 * MERAM private
9 */
10
11#define MERAM_ICB_Y 0x1
12#define MERAM_ICB_C 0x2
13
14/* MERAM cache size */
15#define SH_MOBILE_MERAM_ICB_NUM 32
16
17#define SH_MOBILE_MERAM_CACHE_OFFSET(p) ((p) >> 16)
18#define SH_MOBILE_MERAM_CACHE_SIZE(p) ((p) & 0xffff)
19
20struct sh_mobile_meram_priv {
21 void __iomem *base;
22 struct mutex lock;
23 unsigned long used_icb;
24 int used_meram_cache_regions;
25 unsigned long used_meram_cache[SH_MOBILE_MERAM_ICB_NUM];
26};
27
28int sh_mobile_meram_alloc_icb(const struct sh_mobile_meram_cfg *cfg,
29 int xres,
30 int yres,
31 unsigned int base_addr,
32 int yuv_mode,
33 int *marker_icb,
34 int *out_pitch);
35
36void sh_mobile_meram_free_icb(int marker_icb);
37
38#define SH_MOBILE_MERAM_START(ind, ab) \
39 (0xC0000000 | ((ab & 0x1) << 23) | ((ind & 0x1F) << 24))
40
41#endif /* !__sh_mobile_meram_h__ */
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 56ef6b3a9851..87f0be1e78b5 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -1625,22 +1625,22 @@ static int sm501fb_start(struct sm501fb_info *info,
1625 return 0; /* everything is setup */ 1625 return 0; /* everything is setup */
1626 1626
1627 err_mem_res: 1627 err_mem_res:
1628 release_resource(info->fbmem_res); 1628 release_mem_region(info->fbmem_res->start,
1629 kfree(info->fbmem_res); 1629 resource_size(info->fbmem_res));
1630 1630
1631 err_regs2d_map: 1631 err_regs2d_map:
1632 iounmap(info->regs2d); 1632 iounmap(info->regs2d);
1633 1633
1634 err_regs2d_res: 1634 err_regs2d_res:
1635 release_resource(info->regs2d_res); 1635 release_mem_region(info->regs2d_res->start,
1636 kfree(info->regs2d_res); 1636 resource_size(info->regs2d_res));
1637 1637
1638 err_regs_map: 1638 err_regs_map:
1639 iounmap(info->regs); 1639 iounmap(info->regs);
1640 1640
1641 err_regs_res: 1641 err_regs_res:
1642 release_resource(info->regs_res); 1642 release_mem_region(info->regs_res->start,
1643 kfree(info->regs_res); 1643 resource_size(info->regs_res));
1644 1644
1645 err_release: 1645 err_release:
1646 return ret; 1646 return ret;
@@ -1652,16 +1652,16 @@ static void sm501fb_stop(struct sm501fb_info *info)
1652 sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0); 1652 sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0);
1653 1653
1654 iounmap(info->fbmem); 1654 iounmap(info->fbmem);
1655 release_resource(info->fbmem_res); 1655 release_mem_region(info->fbmem_res->start,
1656 kfree(info->fbmem_res); 1656 resource_size(info->fbmem_res));
1657 1657
1658 iounmap(info->regs2d); 1658 iounmap(info->regs2d);
1659 release_resource(info->regs2d_res); 1659 release_mem_region(info->regs2d_res->start,
1660 kfree(info->regs2d_res); 1660 resource_size(info->regs2d_res));
1661 1661
1662 iounmap(info->regs); 1662 iounmap(info->regs);
1663 release_resource(info->regs_res); 1663 release_mem_region(info->regs_res->start,
1664 kfree(info->regs_res); 1664 resource_size(info->regs_res));
1665} 1665}
1666 1666
1667static int sm501fb_init_fb(struct fb_info *fb, 1667static int sm501fb_init_fb(struct fb_info *fb,
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index 0c341d739604..cd1c4dcef8fd 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -250,7 +250,7 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
250 */ 250 */
251static int tmiofb_hw_stop(struct platform_device *dev) 251static int tmiofb_hw_stop(struct platform_device *dev)
252{ 252{
253 struct tmio_fb_data *data = mfd_get_data(dev); 253 struct tmio_fb_data *data = dev->dev.platform_data;
254 struct fb_info *info = platform_get_drvdata(dev); 254 struct fb_info *info = platform_get_drvdata(dev);
255 struct tmiofb_par *par = info->par; 255 struct tmiofb_par *par = info->par;
256 256
@@ -311,7 +311,7 @@ static int tmiofb_hw_init(struct platform_device *dev)
311 */ 311 */
312static void tmiofb_hw_mode(struct platform_device *dev) 312static void tmiofb_hw_mode(struct platform_device *dev)
313{ 313{
314 struct tmio_fb_data *data = mfd_get_data(dev); 314 struct tmio_fb_data *data = dev->dev.platform_data;
315 struct fb_info *info = platform_get_drvdata(dev); 315 struct fb_info *info = platform_get_drvdata(dev);
316 struct fb_videomode *mode = info->mode; 316 struct fb_videomode *mode = info->mode;
317 struct tmiofb_par *par = info->par; 317 struct tmiofb_par *par = info->par;
@@ -557,8 +557,7 @@ static int tmiofb_ioctl(struct fb_info *fbi,
557static struct fb_videomode * 557static struct fb_videomode *
558tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var) 558tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
559{ 559{
560 struct tmio_fb_data *data = 560 struct tmio_fb_data *data = info->device->platform_data;
561 mfd_get_data(to_platform_device(info->device));
562 struct fb_videomode *best = NULL; 561 struct fb_videomode *best = NULL;
563 int i; 562 int i;
564 563
@@ -578,8 +577,7 @@ static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
578{ 577{
579 578
580 struct fb_videomode *mode; 579 struct fb_videomode *mode;
581 struct tmio_fb_data *data = 580 struct tmio_fb_data *data = info->device->platform_data;
582 mfd_get_data(to_platform_device(info->device));
583 581
584 mode = tmiofb_find_mode(info, var); 582 mode = tmiofb_find_mode(info, var);
585 if (!mode || var->bits_per_pixel > 16) 583 if (!mode || var->bits_per_pixel > 16)
@@ -680,7 +678,7 @@ static struct fb_ops tmiofb_ops = {
680static int __devinit tmiofb_probe(struct platform_device *dev) 678static int __devinit tmiofb_probe(struct platform_device *dev)
681{ 679{
682 const struct mfd_cell *cell = mfd_get_cell(dev); 680 const struct mfd_cell *cell = mfd_get_cell(dev);
683 struct tmio_fb_data *data = mfd_get_data(dev); 681 struct tmio_fb_data *data = dev->dev.platform_data;
684 struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1); 682 struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
685 struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0); 683 struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
686 struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2); 684 struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 695066b5b2e6..52b0f3e8ccac 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/prefetch.h> 30#include <linux/prefetch.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/prefetch.h>
32#include <video/udlfb.h> 33#include <video/udlfb.h>
33#include "edid.h" 34#include "edid.h"
34 35
@@ -1587,10 +1588,19 @@ static int dlfb_usb_probe(struct usb_interface *interface,
1587 goto error; 1588 goto error;
1588 } 1589 }
1589 1590
1590 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) 1591 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) {
1591 device_create_file(info->dev, &fb_device_attrs[i]); 1592 retval = device_create_file(info->dev, &fb_device_attrs[i]);
1593 if (retval) {
1594 pr_err("device_create_file failed %d\n", retval);
1595 goto err_del_attrs;
1596 }
1597 }
1592 1598
1593 device_create_bin_file(info->dev, &edid_attr); 1599 retval = device_create_bin_file(info->dev, &edid_attr);
1600 if (retval) {
1601 pr_err("device_create_bin_file failed %d\n", retval);
1602 goto err_del_attrs;
1603 }
1594 1604
1595 pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution." 1605 pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution."
1596 " Using %dK framebuffer memory\n", info->node, 1606 " Using %dK framebuffer memory\n", info->node,
@@ -1599,6 +1609,10 @@ static int dlfb_usb_probe(struct usb_interface *interface,
1599 info->fix.smem_len * 2 : info->fix.smem_len) >> 10); 1609 info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
1600 return 0; 1610 return 0;
1601 1611
1612err_del_attrs:
1613 for (i -= 1; i >= 0; i--)
1614 device_remove_file(info->dev, &fb_device_attrs[i]);
1615
1602error: 1616error:
1603 if (dev) { 1617 if (dev) {
1604 1618
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/via/via-gpio.c
index c2a0a1cfd3b3..ab5341814c74 100644
--- a/drivers/video/via/via-gpio.c
+++ b/drivers/video/via/via-gpio.c
@@ -145,7 +145,7 @@ static int via_gpio_get(struct gpio_chip *chip, unsigned int nr)
145} 145}
146 146
147 147
148static struct viafb_gpio_cfg gpio_config = { 148static struct viafb_gpio_cfg viafb_gpio_config = {
149 .gpio_chip = { 149 .gpio_chip = {
150 .label = "VIAFB onboard GPIO", 150 .label = "VIAFB onboard GPIO",
151 .owner = THIS_MODULE, 151 .owner = THIS_MODULE,
@@ -183,8 +183,8 @@ static int viafb_gpio_resume(void *private)
183{ 183{
184 int i; 184 int i;
185 185
186 for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2) 186 for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2)
187 viafb_gpio_enable(gpio_config.active_gpios[i]); 187 viafb_gpio_enable(viafb_gpio_config.active_gpios[i]);
188 return 0; 188 return 0;
189} 189}
190 190
@@ -201,9 +201,9 @@ int viafb_gpio_lookup(const char *name)
201{ 201{
202 int i; 202 int i;
203 203
204 for (i = 0; i < gpio_config.gpio_chip.ngpio; i++) 204 for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i++)
205 if (!strcmp(name, gpio_config.active_gpios[i]->vg_name)) 205 if (!strcmp(name, viafb_gpio_config.active_gpios[i]->vg_name))
206 return gpio_config.gpio_chip.base + i; 206 return viafb_gpio_config.gpio_chip.base + i;
207 return -1; 207 return -1;
208} 208}
209EXPORT_SYMBOL_GPL(viafb_gpio_lookup); 209EXPORT_SYMBOL_GPL(viafb_gpio_lookup);
@@ -229,14 +229,15 @@ static __devinit int viafb_gpio_probe(struct platform_device *platdev)
229 for (gpio = viafb_all_gpios; 229 for (gpio = viafb_all_gpios;
230 gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++) 230 gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++)
231 if (gpio->vg_port_index == port_cfg[i].ioport_index) { 231 if (gpio->vg_port_index == port_cfg[i].ioport_index) {
232 gpio_config.active_gpios[ngpio] = gpio; 232 viafb_gpio_config.active_gpios[ngpio] = gpio;
233 gpio_config.gpio_names[ngpio] = gpio->vg_name; 233 viafb_gpio_config.gpio_names[ngpio] =
234 gpio->vg_name;
234 ngpio++; 235 ngpio++;
235 } 236 }
236 } 237 }
237 gpio_config.gpio_chip.ngpio = ngpio; 238 viafb_gpio_config.gpio_chip.ngpio = ngpio;
238 gpio_config.gpio_chip.names = gpio_config.gpio_names; 239 viafb_gpio_config.gpio_chip.names = viafb_gpio_config.gpio_names;
239 gpio_config.vdev = vdev; 240 viafb_gpio_config.vdev = vdev;
240 if (ngpio == 0) { 241 if (ngpio == 0) {
241 printk(KERN_INFO "viafb: no GPIOs configured\n"); 242 printk(KERN_INFO "viafb: no GPIOs configured\n");
242 return 0; 243 return 0;
@@ -245,18 +246,18 @@ static __devinit int viafb_gpio_probe(struct platform_device *platdev)
245 * Enable the ports. They come in pairs, with a single 246 * Enable the ports. They come in pairs, with a single
246 * enable bit for both. 247 * enable bit for both.
247 */ 248 */
248 spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags); 249 spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags);
249 for (i = 0; i < ngpio; i += 2) 250 for (i = 0; i < ngpio; i += 2)
250 viafb_gpio_enable(gpio_config.active_gpios[i]); 251 viafb_gpio_enable(viafb_gpio_config.active_gpios[i]);
251 spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags); 252 spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags);
252 /* 253 /*
253 * Get registered. 254 * Get registered.
254 */ 255 */
255 gpio_config.gpio_chip.base = -1; /* Dynamic */ 256 viafb_gpio_config.gpio_chip.base = -1; /* Dynamic */
256 ret = gpiochip_add(&gpio_config.gpio_chip); 257 ret = gpiochip_add(&viafb_gpio_config.gpio_chip);
257 if (ret) { 258 if (ret) {
258 printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret); 259 printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret);
259 gpio_config.gpio_chip.ngpio = 0; 260 viafb_gpio_config.gpio_chip.ngpio = 0;
260 } 261 }
261#ifdef CONFIG_PM 262#ifdef CONFIG_PM
262 viafb_pm_register(&viafb_gpio_pm_hooks); 263 viafb_pm_register(&viafb_gpio_pm_hooks);
@@ -277,8 +278,8 @@ static int viafb_gpio_remove(struct platform_device *platdev)
277 /* 278 /*
278 * Get unregistered. 279 * Get unregistered.
279 */ 280 */
280 if (gpio_config.gpio_chip.ngpio > 0) { 281 if (viafb_gpio_config.gpio_chip.ngpio > 0) {
281 ret = gpiochip_remove(&gpio_config.gpio_chip); 282 ret = gpiochip_remove(&viafb_gpio_config.gpio_chip);
282 if (ret) { /* Somebody still using it? */ 283 if (ret) { /* Somebody still using it? */
283 printk(KERN_ERR "Viafb: GPIO remove failed\n"); 284 printk(KERN_ERR "Viafb: GPIO remove failed\n");
284 return ret; 285 return ret;
@@ -287,11 +288,11 @@ static int viafb_gpio_remove(struct platform_device *platdev)
287 /* 288 /*
288 * Disable the ports. 289 * Disable the ports.
289 */ 290 */
290 spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags); 291 spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags);
291 for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2) 292 for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2)
292 viafb_gpio_disable(gpio_config.active_gpios[i]); 293 viafb_gpio_disable(viafb_gpio_config.active_gpios[i]);
293 gpio_config.gpio_chip.ngpio = 0; 294 viafb_gpio_config.gpio_chip.ngpio = 0;
294 spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags); 295 spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags);
295 return ret; 296 return ret;
296} 297}
297 298
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 7c608c5ccf84..00d615d7aa21 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
42 42
43config W1_MASTER_DS1WM 43config W1_MASTER_DS1WM
44 tristate "Maxim DS1WM 1-wire busmaster" 44 tristate "Maxim DS1WM 1-wire busmaster"
45 depends on W1 && ARM && HAVE_CLK 45 depends on W1
46 help 46 help
47 Say Y here to enable the DS1WM 1-wire driver, such as that 47 Say Y here to enable the DS1WM 1-wire driver, such as that
48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like 48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 2f4fa02744a5..ad57593d224a 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -33,6 +33,7 @@
33#define DS1WM_INT 0x02 /* R/W interrupt status */ 33#define DS1WM_INT 0x02 /* R/W interrupt status */
34#define DS1WM_INT_EN 0x03 /* R/W interrupt enable */ 34#define DS1WM_INT_EN 0x03 /* R/W interrupt enable */
35#define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */ 35#define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */
36#define DS1WM_CNTRL 0x05 /* R/W master control register (not used yet) */
36 37
37#define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */ 38#define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */
38#define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */ 39#define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */
@@ -56,6 +57,7 @@
56#define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */ 57#define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */
57#define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */ 58#define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */
58 59
60#define DS1WM_INTEN_NOT_IAS (~DS1WM_INTEN_IAS) /* all but INTR active state */
59 61
60#define DS1WM_TIMEOUT (HZ * 5) 62#define DS1WM_TIMEOUT (HZ * 5)
61 63
@@ -63,41 +65,50 @@ static struct {
63 unsigned long freq; 65 unsigned long freq;
64 unsigned long divisor; 66 unsigned long divisor;
65} freq[] = { 67} freq[] = {
66 { 4000000, 0x8 }, 68 { 1000000, 0x80 },
67 { 5000000, 0x2 }, 69 { 2000000, 0x84 },
68 { 6000000, 0x5 }, 70 { 3000000, 0x81 },
69 { 7000000, 0x3 }, 71 { 4000000, 0x88 },
70 { 8000000, 0xc }, 72 { 5000000, 0x82 },
71 { 10000000, 0x6 }, 73 { 6000000, 0x85 },
72 { 12000000, 0x9 }, 74 { 7000000, 0x83 },
73 { 14000000, 0x7 }, 75 { 8000000, 0x8c },
74 { 16000000, 0x10 }, 76 { 10000000, 0x86 },
75 { 20000000, 0xa }, 77 { 12000000, 0x89 },
76 { 24000000, 0xd }, 78 { 14000000, 0x87 },
77 { 28000000, 0xb }, 79 { 16000000, 0x90 },
78 { 32000000, 0x14 }, 80 { 20000000, 0x8a },
79 { 40000000, 0xe }, 81 { 24000000, 0x8d },
80 { 48000000, 0x11 }, 82 { 28000000, 0x8b },
81 { 56000000, 0xf }, 83 { 32000000, 0x94 },
82 { 64000000, 0x18 }, 84 { 40000000, 0x8e },
83 { 80000000, 0x12 }, 85 { 48000000, 0x91 },
84 { 96000000, 0x15 }, 86 { 56000000, 0x8f },
85 { 112000000, 0x13 }, 87 { 64000000, 0x98 },
86 { 128000000, 0x1c }, 88 { 80000000, 0x92 },
89 { 96000000, 0x95 },
90 { 112000000, 0x93 },
91 { 128000000, 0x9c },
92/* you can continue this table, consult the OPERATION - CLOCK DIVISOR
93 section of the ds1wm spec sheet. */
87}; 94};
88 95
89struct ds1wm_data { 96struct ds1wm_data {
90 void __iomem *map; 97 void __iomem *map;
91 int bus_shift; /* # of shifts to calc register offsets */ 98 int bus_shift; /* # of shifts to calc register offsets */
92 struct platform_device *pdev; 99 struct platform_device *pdev;
93 const struct mfd_cell *cell; 100 const struct mfd_cell *cell;
94 int irq; 101 int irq;
95 int active_high; 102 int slave_present;
96 int slave_present; 103 void *reset_complete;
97 void *reset_complete; 104 void *read_complete;
98 void *read_complete; 105 void *write_complete;
99 void *write_complete; 106 int read_error;
100 u8 read_byte; /* last byte received */ 107 /* last byte received */
108 u8 read_byte;
109 /* byte to write that makes all intr disabled, */
110 /* considering active_state (IAS) (optimization) */
111 u8 int_en_reg_none;
101}; 112};
102 113
103static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, 114static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
@@ -115,23 +126,39 @@ static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg)
115static irqreturn_t ds1wm_isr(int isr, void *data) 126static irqreturn_t ds1wm_isr(int isr, void *data)
116{ 127{
117 struct ds1wm_data *ds1wm_data = data; 128 struct ds1wm_data *ds1wm_data = data;
118 u8 intr = ds1wm_read_register(ds1wm_data, DS1WM_INT); 129 u8 intr;
130 u8 inten = ds1wm_read_register(ds1wm_data, DS1WM_INT_EN);
131 /* if no bits are set in int enable register (except the IAS)
132 than go no further, reading the regs below has side effects */
133 if (!(inten & DS1WM_INTEN_NOT_IAS))
134 return IRQ_NONE;
119 135
120 ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1; 136 ds1wm_write_register(ds1wm_data,
137 DS1WM_INT_EN, ds1wm_data->int_en_reg_none);
121 138
122 if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) 139 /* this read action clears the INTR and certain flags in ds1wm */
123 complete(ds1wm_data->reset_complete); 140 intr = ds1wm_read_register(ds1wm_data, DS1WM_INT);
124 141
125 if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) 142 ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1;
126 complete(ds1wm_data->write_complete);
127 143
144 if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) {
145 inten &= ~DS1WM_INTEN_ETMT;
146 complete(ds1wm_data->write_complete);
147 }
128 if (intr & DS1WM_INT_RBF) { 148 if (intr & DS1WM_INT_RBF) {
149 /* this read clears the RBF flag */
129 ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data, 150 ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data,
130 DS1WM_DATA); 151 DS1WM_DATA);
152 inten &= ~DS1WM_INTEN_ERBF;
131 if (ds1wm_data->read_complete) 153 if (ds1wm_data->read_complete)
132 complete(ds1wm_data->read_complete); 154 complete(ds1wm_data->read_complete);
133 } 155 }
156 if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) {
157 inten &= ~DS1WM_INTEN_EPD;
158 complete(ds1wm_data->reset_complete);
159 }
134 160
161 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, inten);
135 return IRQ_HANDLED; 162 return IRQ_HANDLED;
136} 163}
137 164
@@ -142,33 +169,19 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
142 169
143 ds1wm_data->reset_complete = &reset_done; 170 ds1wm_data->reset_complete = &reset_done;
144 171
172 /* enable Presence detect only */
145 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD | 173 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD |
146 (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); 174 ds1wm_data->int_en_reg_none);
147 175
148 ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET); 176 ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET);
149 177
150 timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); 178 timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT);
151 ds1wm_data->reset_complete = NULL; 179 ds1wm_data->reset_complete = NULL;
152 if (!timeleft) { 180 if (!timeleft) {
153 dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); 181 dev_err(&ds1wm_data->pdev->dev, "reset failed, timed out\n");
154 return 1; 182 return 1;
155 } 183 }
156 184
157 /* Wait for the end of the reset. According to the specs, the time
158 * from when the interrupt is asserted to the end of the reset is:
159 * tRSTH - tPDH - tPDL - tPDI
160 * 625 us - 60 us - 240 us - 100 ns = 324.9 us
161 *
162 * We'll wait a bit longer just to be sure.
163 * Was udelay(500), but if it is going to busywait the cpu that long,
164 * might as well come back later.
165 */
166 msleep(1);
167
168 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
169 DS1WM_INTEN_ERBF | DS1WM_INTEN_ETMT | DS1WM_INTEN_EPD |
170 (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0));
171
172 if (!ds1wm_data->slave_present) { 185 if (!ds1wm_data->slave_present) {
173 dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); 186 dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n");
174 return 1; 187 return 1;
@@ -179,26 +192,47 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
179 192
180static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) 193static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data)
181{ 194{
195 unsigned long timeleft;
182 DECLARE_COMPLETION_ONSTACK(write_done); 196 DECLARE_COMPLETION_ONSTACK(write_done);
183 ds1wm_data->write_complete = &write_done; 197 ds1wm_data->write_complete = &write_done;
184 198
199 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
200 ds1wm_data->int_en_reg_none | DS1WM_INTEN_ETMT);
201
185 ds1wm_write_register(ds1wm_data, DS1WM_DATA, data); 202 ds1wm_write_register(ds1wm_data, DS1WM_DATA, data);
186 203
187 wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT); 204 timeleft = wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT);
205
188 ds1wm_data->write_complete = NULL; 206 ds1wm_data->write_complete = NULL;
207 if (!timeleft) {
208 dev_err(&ds1wm_data->pdev->dev, "write failed, timed out\n");
209 return -ETIMEDOUT;
210 }
189 211
190 return 0; 212 return 0;
191} 213}
192 214
193static int ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data) 215static u8 ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data)
194{ 216{
217 unsigned long timeleft;
218 u8 intEnable = DS1WM_INTEN_ERBF | ds1wm_data->int_en_reg_none;
195 DECLARE_COMPLETION_ONSTACK(read_done); 219 DECLARE_COMPLETION_ONSTACK(read_done);
220
221 ds1wm_read_register(ds1wm_data, DS1WM_DATA);
222
196 ds1wm_data->read_complete = &read_done; 223 ds1wm_data->read_complete = &read_done;
224 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, intEnable);
197 225
198 ds1wm_write(ds1wm_data, write_data); 226 ds1wm_write_register(ds1wm_data, DS1WM_DATA, write_data);
199 wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT); 227 timeleft = wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT);
200 ds1wm_data->read_complete = NULL;
201 228
229 ds1wm_data->read_complete = NULL;
230 if (!timeleft) {
231 dev_err(&ds1wm_data->pdev->dev, "read failed, timed out\n");
232 ds1wm_data->read_error = -ETIMEDOUT;
233 return 0xFF;
234 }
235 ds1wm_data->read_error = 0;
202 return ds1wm_data->read_byte; 236 return ds1wm_data->read_byte;
203} 237}
204 238
@@ -206,8 +240,8 @@ static int ds1wm_find_divisor(int gclk)
206{ 240{
207 int i; 241 int i;
208 242
209 for (i = 0; i < ARRAY_SIZE(freq); i++) 243 for (i = ARRAY_SIZE(freq)-1; i >= 0; --i)
210 if (gclk <= freq[i].freq) 244 if (gclk >= freq[i].freq)
211 return freq[i].divisor; 245 return freq[i].divisor;
212 246
213 return 0; 247 return 0;
@@ -216,12 +250,14 @@ static int ds1wm_find_divisor(int gclk)
216static void ds1wm_up(struct ds1wm_data *ds1wm_data) 250static void ds1wm_up(struct ds1wm_data *ds1wm_data)
217{ 251{
218 int divisor; 252 int divisor;
219 struct ds1wm_driver_data *plat = mfd_get_data(ds1wm_data->pdev); 253 struct ds1wm_driver_data *plat = ds1wm_data->pdev->dev.platform_data;
220 254
221 if (ds1wm_data->cell->enable) 255 if (ds1wm_data->cell->enable)
222 ds1wm_data->cell->enable(ds1wm_data->pdev); 256 ds1wm_data->cell->enable(ds1wm_data->pdev);
223 257
224 divisor = ds1wm_find_divisor(plat->clock_rate); 258 divisor = ds1wm_find_divisor(plat->clock_rate);
259 dev_dbg(&ds1wm_data->pdev->dev,
260 "found divisor 0x%x for clock %d\n", divisor, plat->clock_rate);
225 if (divisor == 0) { 261 if (divisor == 0) {
226 dev_err(&ds1wm_data->pdev->dev, 262 dev_err(&ds1wm_data->pdev->dev,
227 "no suitable divisor for %dHz clock\n", 263 "no suitable divisor for %dHz clock\n",
@@ -242,7 +278,7 @@ static void ds1wm_down(struct ds1wm_data *ds1wm_data)
242 278
243 /* Disable interrupts. */ 279 /* Disable interrupts. */
244 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, 280 ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
245 ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0); 281 ds1wm_data->int_en_reg_none);
246 282
247 if (ds1wm_data->cell->disable) 283 if (ds1wm_data->cell->disable)
248 ds1wm_data->cell->disable(ds1wm_data->pdev); 284 ds1wm_data->cell->disable(ds1wm_data->pdev);
@@ -279,41 +315,121 @@ static void ds1wm_search(void *data, struct w1_master *master_dev,
279{ 315{
280 struct ds1wm_data *ds1wm_data = data; 316 struct ds1wm_data *ds1wm_data = data;
281 int i; 317 int i;
282 unsigned long long rom_id; 318 int ms_discrep_bit = -1;
283 319 u64 r = 0; /* holds the progress of the search */
284 /* XXX We need to iterate for multiple devices per the DS1WM docs. 320 u64 r_prime, d;
285 * See http://www.maxim-ic.com/appnotes.cfm/appnote_number/120. */ 321 unsigned slaves_found = 0;
286 if (ds1wm_reset(ds1wm_data)) 322 unsigned int pass = 0;
287 return; 323
288 324 dev_dbg(&ds1wm_data->pdev->dev, "search begin\n");
289 ds1wm_write(ds1wm_data, search_type); 325 while (true) {
290 ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA); 326 ++pass;
291 327 if (pass > 100) {
292 for (rom_id = 0, i = 0; i < 16; i++) { 328 dev_dbg(&ds1wm_data->pdev->dev,
293 329 "too many attempts (100), search aborted\n");
294 unsigned char resp, r, d; 330 return;
295 331 }
296 resp = ds1wm_read(ds1wm_data, 0x00); 332
297 333 if (ds1wm_reset(ds1wm_data)) {
298 r = ((resp & 0x02) >> 1) | 334 dev_dbg(&ds1wm_data->pdev->dev,
299 ((resp & 0x08) >> 2) | 335 "pass: %d reset error (or no slaves)\n", pass);
300 ((resp & 0x20) >> 3) | 336 break;
301 ((resp & 0x80) >> 4); 337 }
302 338
303 d = ((resp & 0x01) >> 0) | 339 dev_dbg(&ds1wm_data->pdev->dev,
304 ((resp & 0x04) >> 1) | 340 "pass: %d r : %0#18llx writing SEARCH_ROM\n", pass, r);
305 ((resp & 0x10) >> 2) | 341 ds1wm_write(ds1wm_data, search_type);
306 ((resp & 0x40) >> 3); 342 dev_dbg(&ds1wm_data->pdev->dev,
307 343 "pass: %d entering ASM\n", pass);
308 rom_id |= (unsigned long long) r << (i * 4); 344 ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA);
309 345 dev_dbg(&ds1wm_data->pdev->dev,
310 } 346 "pass: %d begining nibble loop\n", pass);
311 dev_dbg(&ds1wm_data->pdev->dev, "found 0x%08llX\n", rom_id); 347
312 348 r_prime = 0;
313 ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA); 349 d = 0;
314 ds1wm_reset(ds1wm_data); 350 /* we work one nibble at a time */
315 351 /* each nibble is interleaved to form a byte */
316 slave_found(master_dev, rom_id); 352 for (i = 0; i < 16; i++) {
353
354 unsigned char resp, _r, _r_prime, _d;
355
356 _r = (r >> (4*i)) & 0xf;
357 _r = ((_r & 0x1) << 1) |
358 ((_r & 0x2) << 2) |
359 ((_r & 0x4) << 3) |
360 ((_r & 0x8) << 4);
361
362 /* writes _r, then reads back: */
363 resp = ds1wm_read(ds1wm_data, _r);
364
365 if (ds1wm_data->read_error) {
366 dev_err(&ds1wm_data->pdev->dev,
367 "pass: %d nibble: %d read error\n", pass, i);
368 break;
369 }
370
371 _r_prime = ((resp & 0x02) >> 1) |
372 ((resp & 0x08) >> 2) |
373 ((resp & 0x20) >> 3) |
374 ((resp & 0x80) >> 4);
375
376 _d = ((resp & 0x01) >> 0) |
377 ((resp & 0x04) >> 1) |
378 ((resp & 0x10) >> 2) |
379 ((resp & 0x40) >> 3);
380
381 r_prime |= (unsigned long long) _r_prime << (i * 4);
382 d |= (unsigned long long) _d << (i * 4);
383
384 }
385 if (ds1wm_data->read_error) {
386 dev_err(&ds1wm_data->pdev->dev,
387 "pass: %d read error, retrying\n", pass);
388 break;
389 }
390 dev_dbg(&ds1wm_data->pdev->dev,
391 "pass: %d r\': %0#18llx d:%0#18llx\n",
392 pass, r_prime, d);
393 dev_dbg(&ds1wm_data->pdev->dev,
394 "pass: %d nibble loop complete, exiting ASM\n", pass);
395 ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA);
396 dev_dbg(&ds1wm_data->pdev->dev,
397 "pass: %d resetting bus\n", pass);
398 ds1wm_reset(ds1wm_data);
399 if ((r_prime & ((u64)1 << 63)) && (d & ((u64)1 << 63))) {
400 dev_err(&ds1wm_data->pdev->dev,
401 "pass: %d bus error, retrying\n", pass);
402 continue; /* start over */
403 }
404
405
406 dev_dbg(&ds1wm_data->pdev->dev,
407 "pass: %d found %0#18llx\n", pass, r_prime);
408 slave_found(master_dev, r_prime);
409 ++slaves_found;
410 dev_dbg(&ds1wm_data->pdev->dev,
411 "pass: %d complete, preparing next pass\n", pass);
412
413 /* any discrepency found which we already choose the
414 '1' branch is now is now irrelevant we reveal the
415 next branch with this: */
416 d &= ~r;
417 /* find last bit set, i.e. the most signif. bit set */
418 ms_discrep_bit = fls64(d) - 1;
419 dev_dbg(&ds1wm_data->pdev->dev,
420 "pass: %d new d:%0#18llx MS discrep bit:%d\n",
421 pass, d, ms_discrep_bit);
422
423 /* prev_ms_discrep_bit = ms_discrep_bit;
424 prepare for next ROM search: */
425 if (ms_discrep_bit == -1)
426 break;
427
428 r = (r & ~(~0ull << (ms_discrep_bit))) | 1 << ms_discrep_bit;
429 } /* end while true */
430 dev_dbg(&ds1wm_data->pdev->dev,
431 "pass: %d total: %d search done ms d bit pos: %d\n", pass,
432 slaves_found, ms_discrep_bit);
317} 433}
318 434
319/* --------------------------------------------------------------------- */ 435/* --------------------------------------------------------------------- */
@@ -351,13 +467,21 @@ static int ds1wm_probe(struct platform_device *pdev)
351 ret = -ENOMEM; 467 ret = -ENOMEM;
352 goto err0; 468 goto err0;
353 } 469 }
354 plat = mfd_get_data(pdev);
355 470
356 /* calculate bus shift from mem resource */ 471 /* calculate bus shift from mem resource */
357 ds1wm_data->bus_shift = resource_size(res) >> 3; 472 ds1wm_data->bus_shift = resource_size(res) >> 3;
358 473
359 ds1wm_data->pdev = pdev; 474 ds1wm_data->pdev = pdev;
360 ds1wm_data->cell = mfd_get_cell(pdev); 475 ds1wm_data->cell = mfd_get_cell(pdev);
476 if (!ds1wm_data->cell) {
477 ret = -ENODEV;
478 goto err1;
479 }
480 plat = pdev->dev.platform_data;
481 if (!plat) {
482 ret = -ENODEV;
483 goto err1;
484 }
361 485
362 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 486 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
363 if (!res) { 487 if (!res) {
@@ -365,15 +489,15 @@ static int ds1wm_probe(struct platform_device *pdev)
365 goto err1; 489 goto err1;
366 } 490 }
367 ds1wm_data->irq = res->start; 491 ds1wm_data->irq = res->start;
368 ds1wm_data->active_high = plat->active_high; 492 ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
369 493
370 if (res->flags & IORESOURCE_IRQ_HIGHEDGE) 494 if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
371 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); 495 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
372 if (res->flags & IORESOURCE_IRQ_LOWEDGE) 496 if (res->flags & IORESOURCE_IRQ_LOWEDGE)
373 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); 497 irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
374 498
375 ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, 499 ret = request_irq(ds1wm_data->irq, ds1wm_isr,
376 "ds1wm", ds1wm_data); 500 IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
377 if (ret) 501 if (ret)
378 goto err1; 502 goto err1;
379 503
@@ -460,5 +584,6 @@ module_exit(ds1wm_exit);
460 584
461MODULE_LICENSE("GPL"); 585MODULE_LICENSE("GPL");
462MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, " 586MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, "
463 "Matt Reimer <mreimer@vpop.net>"); 587 "Matt Reimer <mreimer@vpop.net>,"
588 "Jean-Francois Dagenais <dagenaisj@sonatest.com>");
464MODULE_DESCRIPTION("DS1WM w1 busmaster driver"); 589MODULE_DESCRIPTION("DS1WM w1 busmaster driver");
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index f0c909625bd1..d0cb01b42012 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,13 @@ config W1_SLAVE_SMEM
16 Say Y here if you want to connect 1-wire 16 Say Y here if you want to connect 1-wire
17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire. 17 simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
18 18
19config W1_SLAVE_DS2408
20 tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
21 help
22 Say Y here if you want to use a 1-wire
23
24 DS2408 8-Channel Addressable Switch device support
25
19config W1_SLAVE_DS2423 26config W1_SLAVE_DS2423
20 tristate "Counter 1-wire device (DS2423)" 27 tristate "Counter 1-wire device (DS2423)"
21 select CRC16 28 select CRC16
@@ -61,6 +68,19 @@ config W1_SLAVE_DS2760
61 68
62 If you are unsure, say N. 69 If you are unsure, say N.
63 70
71config W1_SLAVE_DS2780
72 tristate "Dallas 2780 battery monitor chip"
73 depends on W1
74 help
75 If you enable this you will have the DS2780 battery monitor
76 chip support.
77
78 The battery monitor chip is used in many batteries/devices
79 as the one who is responsible for charging/discharging/monitoring
80 Li+ batteries.
81
82 If you are unsure, say N.
83
64config W1_SLAVE_BQ27000 84config W1_SLAVE_BQ27000
65 tristate "BQ27000 slave support" 85 tristate "BQ27000 slave support"
66 depends on W1 86 depends on W1
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 3c76350a24f7..1f31e9fb0b25 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,8 +4,10 @@
4 4
5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o 5obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o
6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o 6obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o
7obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
7obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o 8obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
8obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o 9obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
9obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o 10obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
10obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o 11obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
12obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
11obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o 13obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
new file mode 100644
index 000000000000..c37781899d90
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -0,0 +1,402 @@
1/*
2 * w1_ds2408.c - w1 family 29 (DS2408) driver
3 *
4 * Copyright (c) 2010 Jean-Francois Dagenais <dagenaisj@sonatest.com>
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/device.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/slab.h>
17
18#include "../w1.h"
19#include "../w1_int.h"
20#include "../w1_family.h"
21
22MODULE_LICENSE("GPL");
23MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>");
24MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
25
26
27#define W1_F29_RETRIES 3
28
29#define W1_F29_REG_LOGIG_STATE 0x88 /* R */
30#define W1_F29_REG_OUTPUT_LATCH_STATE 0x89 /* R */
31#define W1_F29_REG_ACTIVITY_LATCH_STATE 0x8A /* R */
32#define W1_F29_REG_COND_SEARCH_SELECT_MASK 0x8B /* RW */
33#define W1_F29_REG_COND_SEARCH_POL_SELECT 0x8C /* RW */
34#define W1_F29_REG_CONTROL_AND_STATUS 0x8D /* RW */
35
36#define W1_F29_FUNC_READ_PIO_REGS 0xF0
37#define W1_F29_FUNC_CHANN_ACCESS_READ 0xF5
38#define W1_F29_FUNC_CHANN_ACCESS_WRITE 0x5A
39/* also used to write the control/status reg (0x8D): */
40#define W1_F29_FUNC_WRITE_COND_SEARCH_REG 0xCC
41#define W1_F29_FUNC_RESET_ACTIVITY_LATCHES 0xC3
42
43#define W1_F29_SUCCESS_CONFIRM_BYTE 0xAA
44
45static int _read_reg(struct w1_slave *sl, u8 address, unsigned char* buf)
46{
47 u8 wrbuf[3];
48 dev_dbg(&sl->dev,
49 "Reading with slave: %p, reg addr: %0#4x, buff addr: %p",
50 sl, (unsigned int)address, buf);
51
52 if (!buf)
53 return -EINVAL;
54
55 mutex_lock(&sl->master->mutex);
56 dev_dbg(&sl->dev, "mutex locked");
57
58 if (w1_reset_select_slave(sl)) {
59 mutex_unlock(&sl->master->mutex);
60 return -EIO;
61 }
62
63 wrbuf[0] = W1_F29_FUNC_READ_PIO_REGS;
64 wrbuf[1] = address;
65 wrbuf[2] = 0;
66 w1_write_block(sl->master, wrbuf, 3);
67 *buf = w1_read_8(sl->master);
68
69 mutex_unlock(&sl->master->mutex);
70 dev_dbg(&sl->dev, "mutex unlocked");
71 return 1;
72}
73
74static ssize_t w1_f29_read_state(
75 struct file *filp, struct kobject *kobj,
76 struct bin_attribute *bin_attr,
77 char *buf, loff_t off, size_t count)
78{
79 dev_dbg(&kobj_to_w1_slave(kobj)->dev,
80 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
81 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
82 if (count != 1 || off != 0)
83 return -EFAULT;
84 return _read_reg(kobj_to_w1_slave(kobj), W1_F29_REG_LOGIG_STATE, buf);
85}
86
87static ssize_t w1_f29_read_output(
88 struct file *filp, struct kobject *kobj,
89 struct bin_attribute *bin_attr,
90 char *buf, loff_t off, size_t count)
91{
92 dev_dbg(&kobj_to_w1_slave(kobj)->dev,
93 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
94 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
95 if (count != 1 || off != 0)
96 return -EFAULT;
97 return _read_reg(kobj_to_w1_slave(kobj),
98 W1_F29_REG_OUTPUT_LATCH_STATE, buf);
99}
100
101static ssize_t w1_f29_read_activity(
102 struct file *filp, struct kobject *kobj,
103 struct bin_attribute *bin_attr,
104 char *buf, loff_t off, size_t count)
105{
106 dev_dbg(&kobj_to_w1_slave(kobj)->dev,
107 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
108 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
109 if (count != 1 || off != 0)
110 return -EFAULT;
111 return _read_reg(kobj_to_w1_slave(kobj),
112 W1_F29_REG_ACTIVITY_LATCH_STATE, buf);
113}
114
115static ssize_t w1_f29_read_cond_search_mask(
116 struct file *filp, struct kobject *kobj,
117 struct bin_attribute *bin_attr,
118 char *buf, loff_t off, size_t count)
119{
120 dev_dbg(&kobj_to_w1_slave(kobj)->dev,
121 "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
122 bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
123 if (count != 1 || off != 0)
124 return -EFAULT;
125 return _read_reg(kobj_to_w1_slave(kobj),
126 W1_F29_REG_COND_SEARCH_SELECT_MASK, buf);
127}
128
129static ssize_t w1_f29_read_cond_search_polarity(
130 struct file *filp, struct kobject *kobj,
131 struct bin_attribute *bin_attr,
132 char *buf, loff_t off, size_t count)
133{
134 if (count != 1 || off != 0)
135 return -EFAULT;
136 return _read_reg(kobj_to_w1_slave(kobj),
137 W1_F29_REG_COND_SEARCH_POL_SELECT, buf);
138}
139
140static ssize_t w1_f29_read_status_control(
141 struct file *filp, struct kobject *kobj,
142 struct bin_attribute *bin_attr,
143 char *buf, loff_t off, size_t count)
144{
145 if (count != 1 || off != 0)
146 return -EFAULT;
147 return _read_reg(kobj_to_w1_slave(kobj),
148 W1_F29_REG_CONTROL_AND_STATUS, buf);
149}
150
151
152
153
154static ssize_t w1_f29_write_output(
155 struct file *filp, struct kobject *kobj,
156 struct bin_attribute *bin_attr,
157 char *buf, loff_t off, size_t count)
158{
159 struct w1_slave *sl = kobj_to_w1_slave(kobj);
160 u8 w1_buf[3];
161 u8 readBack;
162 unsigned int retries = W1_F29_RETRIES;
163
164 if (count != 1 || off != 0)
165 return -EFAULT;
166
167 dev_dbg(&sl->dev, "locking mutex for write_output");
168 mutex_lock(&sl->master->mutex);
169 dev_dbg(&sl->dev, "mutex locked");
170
171 if (w1_reset_select_slave(sl))
172 goto error;
173
174 while (retries--) {
175 w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE;
176 w1_buf[1] = *buf;
177 w1_buf[2] = ~(*buf);
178 w1_write_block(sl->master, w1_buf, 3);
179
180 readBack = w1_read_8(sl->master);
181 /* here the master could read another byte which
182 would be the PIO reg (the actual pin logic state)
183 since in this driver we don't know which pins are
184 in and outs, there's no value to read the state and
185 compare. with (*buf) so end this command abruptly: */
186 if (w1_reset_resume_command(sl->master))
187 goto error;
188
189 if (readBack != 0xAA) {
190 /* try again, the slave is ready for a command */
191 continue;
192 }
193
194 /* go read back the output latches */
195 /* (the direct effect of the write above) */
196 w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
197 w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
198 w1_buf[2] = 0;
199 w1_write_block(sl->master, w1_buf, 3);
200 /* read the result of the READ_PIO_REGS command */
201 if (w1_read_8(sl->master) == *buf) {
202 /* success! */
203 mutex_unlock(&sl->master->mutex);
204 dev_dbg(&sl->dev,
205 "mutex unlocked, retries:%d", retries);
206 return 1;
207 }
208 }
209error:
210 mutex_unlock(&sl->master->mutex);
211 dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
212
213 return -EIO;
214}
215
216
217/**
218 * Writing to the activity file resets the activity latches.
219 */
220static ssize_t w1_f29_write_activity(
221 struct file *filp, struct kobject *kobj,
222 struct bin_attribute *bin_attr,
223 char *buf, loff_t off, size_t count)
224{
225 struct w1_slave *sl = kobj_to_w1_slave(kobj);
226 unsigned int retries = W1_F29_RETRIES;
227
228 if (count != 1 || off != 0)
229 return -EFAULT;
230
231 mutex_lock(&sl->master->mutex);
232
233 if (w1_reset_select_slave(sl))
234 goto error;
235
236 while (retries--) {
237 w1_write_8(sl->master, W1_F29_FUNC_RESET_ACTIVITY_LATCHES);
238 if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE) {
239 mutex_unlock(&sl->master->mutex);
240 return 1;
241 }
242 if (w1_reset_resume_command(sl->master))
243 goto error;
244 }
245
246error:
247 mutex_unlock(&sl->master->mutex);
248 return -EIO;
249}
250
251static ssize_t w1_f29_write_status_control(
252 struct file *filp,
253 struct kobject *kobj,
254 struct bin_attribute *bin_attr,
255 char *buf,
256 loff_t off,
257 size_t count)
258{
259 struct w1_slave *sl = kobj_to_w1_slave(kobj);
260 u8 w1_buf[4];
261 unsigned int retries = W1_F29_RETRIES;
262
263 if (count != 1 || off != 0)
264 return -EFAULT;
265
266 mutex_lock(&sl->master->mutex);
267
268 if (w1_reset_select_slave(sl))
269 goto error;
270
271 while (retries--) {
272 w1_buf[0] = W1_F29_FUNC_WRITE_COND_SEARCH_REG;
273 w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
274 w1_buf[2] = 0;
275 w1_buf[3] = *buf;
276
277 w1_write_block(sl->master, w1_buf, 4);
278 if (w1_reset_resume_command(sl->master))
279 goto error;
280
281 w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
282 w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
283 w1_buf[2] = 0;
284
285 w1_write_block(sl->master, w1_buf, 3);
286 if (w1_read_8(sl->master) == *buf) {
287 /* success! */
288 mutex_unlock(&sl->master->mutex);
289 return 1;
290 }
291 }
292error:
293 mutex_unlock(&sl->master->mutex);
294
295 return -EIO;
296}
297
298
299
300#define NB_SYSFS_BIN_FILES 6
301static struct bin_attribute w1_f29_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
302 {
303 .attr = {
304 .name = "state",
305 .mode = S_IRUGO,
306 },
307 .size = 1,
308 .read = w1_f29_read_state,
309 },
310 {
311 .attr = {
312 .name = "output",
313 .mode = S_IRUGO | S_IWUSR | S_IWGRP,
314 },
315 .size = 1,
316 .read = w1_f29_read_output,
317 .write = w1_f29_write_output,
318 },
319 {
320 .attr = {
321 .name = "activity",
322 .mode = S_IRUGO,
323 },
324 .size = 1,
325 .read = w1_f29_read_activity,
326 .write = w1_f29_write_activity,
327 },
328 {
329 .attr = {
330 .name = "cond_search_mask",
331 .mode = S_IRUGO,
332 },
333 .size = 1,
334 .read = w1_f29_read_cond_search_mask,
335 .write = 0,
336 },
337 {
338 .attr = {
339 .name = "cond_search_polarity",
340 .mode = S_IRUGO,
341 },
342 .size = 1,
343 .read = w1_f29_read_cond_search_polarity,
344 .write = 0,
345 },
346 {
347 .attr = {
348 .name = "status_control",
349 .mode = S_IRUGO | S_IWUSR | S_IWGRP,
350 },
351 .size = 1,
352 .read = w1_f29_read_status_control,
353 .write = w1_f29_write_status_control,
354 }
355};
356
357static int w1_f29_add_slave(struct w1_slave *sl)
358{
359 int err = 0;
360 int i;
361
362 for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
363 err = sysfs_create_bin_file(
364 &sl->dev.kobj,
365 &(w1_f29_sysfs_bin_files[i]));
366 if (err)
367 while (--i >= 0)
368 sysfs_remove_bin_file(&sl->dev.kobj,
369 &(w1_f29_sysfs_bin_files[i]));
370 return err;
371}
372
373static void w1_f29_remove_slave(struct w1_slave *sl)
374{
375 int i;
376 for (i = NB_SYSFS_BIN_FILES; i <= 0; --i)
377 sysfs_remove_bin_file(&sl->dev.kobj,
378 &(w1_f29_sysfs_bin_files[i]));
379}
380
381static struct w1_family_ops w1_f29_fops = {
382 .add_slave = w1_f29_add_slave,
383 .remove_slave = w1_f29_remove_slave,
384};
385
386static struct w1_family w1_family_29 = {
387 .fid = W1_FAMILY_DS2408,
388 .fops = &w1_f29_fops,
389};
390
391static int __init w1_f29_init(void)
392{
393 return w1_register_family(&w1_family_29);
394}
395
396static void __exit w1_f29_exit(void)
397{
398 w1_unregister_family(&w1_family_29);
399}
400
401module_init(w1_f29_init);
402module_exit(w1_f29_exit);
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
new file mode 100644
index 000000000000..274c8f38303f
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -0,0 +1,217 @@
1/*
2 * 1-Wire implementation for the ds2780 chip
3 *
4 * Copyright (C) 2010 Indesign, LLC
5 *
6 * Author: Clifton Barnes <cabarnes@indesign-llc.com>
7 *
8 * Based on w1-ds2760 driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/types.h>
20#include <linux/platform_device.h>
21#include <linux/mutex.h>
22#include <linux/idr.h>
23
24#include "../w1.h"
25#include "../w1_int.h"
26#include "../w1_family.h"
27#include "w1_ds2780.h"
28
29int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
30 int io)
31{
32 struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
33
34 if (!dev)
35 return -ENODEV;
36
37 mutex_lock(&sl->master->mutex);
38
39 if (addr > DS2780_DATA_SIZE || addr < 0) {
40 count = 0;
41 goto out;
42 }
43 count = min_t(int, count, DS2780_DATA_SIZE - addr);
44
45 if (w1_reset_select_slave(sl) == 0) {
46 if (io) {
47 w1_write_8(sl->master, W1_DS2780_WRITE_DATA);
48 w1_write_8(sl->master, addr);
49 w1_write_block(sl->master, buf, count);
50 /* XXX w1_write_block returns void, not n_written */
51 } else {
52 w1_write_8(sl->master, W1_DS2780_READ_DATA);
53 w1_write_8(sl->master, addr);
54 count = w1_read_block(sl->master, buf, count);
55 }
56 }
57
58out:
59 mutex_unlock(&sl->master->mutex);
60
61 return count;
62}
63EXPORT_SYMBOL(w1_ds2780_io);
64
65int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
66{
67 struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
68
69 if (!dev)
70 return -EINVAL;
71
72 mutex_lock(&sl->master->mutex);
73
74 if (w1_reset_select_slave(sl) == 0) {
75 w1_write_8(sl->master, cmd);
76 w1_write_8(sl->master, addr);
77 }
78
79 mutex_unlock(&sl->master->mutex);
80 return 0;
81}
82EXPORT_SYMBOL(w1_ds2780_eeprom_cmd);
83
84static ssize_t w1_ds2780_read_bin(struct file *filp,
85 struct kobject *kobj,
86 struct bin_attribute *bin_attr,
87 char *buf, loff_t off, size_t count)
88{
89 struct device *dev = container_of(kobj, struct device, kobj);
90 return w1_ds2780_io(dev, buf, off, count, 0);
91}
92
93static struct bin_attribute w1_ds2780_bin_attr = {
94 .attr = {
95 .name = "w1_slave",
96 .mode = S_IRUGO,
97 },
98 .size = DS2780_DATA_SIZE,
99 .read = w1_ds2780_read_bin,
100};
101
102static DEFINE_IDR(bat_idr);
103static DEFINE_MUTEX(bat_idr_lock);
104
105static int new_bat_id(void)
106{
107 int ret;
108
109 while (1) {
110 int id;
111
112 ret = idr_pre_get(&bat_idr, GFP_KERNEL);
113 if (ret == 0)
114 return -ENOMEM;
115
116 mutex_lock(&bat_idr_lock);
117 ret = idr_get_new(&bat_idr, NULL, &id);
118 mutex_unlock(&bat_idr_lock);
119
120 if (ret == 0) {
121 ret = id & MAX_ID_MASK;
122 break;
123 } else if (ret == -EAGAIN) {
124 continue;
125 } else {
126 break;
127 }
128 }
129
130 return ret;
131}
132
133static void release_bat_id(int id)
134{
135 mutex_lock(&bat_idr_lock);
136 idr_remove(&bat_idr, id);
137 mutex_unlock(&bat_idr_lock);
138}
139
140static int w1_ds2780_add_slave(struct w1_slave *sl)
141{
142 int ret;
143 int id;
144 struct platform_device *pdev;
145
146 id = new_bat_id();
147 if (id < 0) {
148 ret = id;
149 goto noid;
150 }
151
152 pdev = platform_device_alloc("ds2780-battery", id);
153 if (!pdev) {
154 ret = -ENOMEM;
155 goto pdev_alloc_failed;
156 }
157 pdev->dev.parent = &sl->dev;
158
159 ret = platform_device_add(pdev);
160 if (ret)
161 goto pdev_add_failed;
162
163 ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
164 if (ret)
165 goto bin_attr_failed;
166
167 dev_set_drvdata(&sl->dev, pdev);
168
169 return 0;
170
171bin_attr_failed:
172pdev_add_failed:
173 platform_device_unregister(pdev);
174pdev_alloc_failed:
175 release_bat_id(id);
176noid:
177 return ret;
178}
179
180static void w1_ds2780_remove_slave(struct w1_slave *sl)
181{
182 struct platform_device *pdev = dev_get_drvdata(&sl->dev);
183 int id = pdev->id;
184
185 platform_device_unregister(pdev);
186 release_bat_id(id);
187 sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
188}
189
190static struct w1_family_ops w1_ds2780_fops = {
191 .add_slave = w1_ds2780_add_slave,
192 .remove_slave = w1_ds2780_remove_slave,
193};
194
195static struct w1_family w1_ds2780_family = {
196 .fid = W1_FAMILY_DS2780,
197 .fops = &w1_ds2780_fops,
198};
199
200static int __init w1_ds2780_init(void)
201{
202 idr_init(&bat_idr);
203 return w1_register_family(&w1_ds2780_family);
204}
205
206static void __exit w1_ds2780_exit(void)
207{
208 w1_unregister_family(&w1_ds2780_family);
209 idr_destroy(&bat_idr);
210}
211
212module_init(w1_ds2780_init);
213module_exit(w1_ds2780_exit);
214
215MODULE_LICENSE("GPL");
216MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
217MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC");
diff --git a/drivers/w1/slaves/w1_ds2780.h b/drivers/w1/slaves/w1_ds2780.h
new file mode 100644
index 000000000000..a1fba79eb1b5
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2780.h
@@ -0,0 +1,129 @@
1/*
2 * 1-Wire implementation for the ds2780 chip
3 *
4 * Copyright (C) 2010 Indesign, LLC
5 *
6 * Author: Clifton Barnes <cabarnes@indesign-llc.com>
7 *
8 * Based on w1-ds2760 driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#ifndef _W1_DS2780_H
17#define _W1_DS2780_H
18
19/* Function commands */
20#define W1_DS2780_READ_DATA 0x69
21#define W1_DS2780_WRITE_DATA 0x6C
22#define W1_DS2780_COPY_DATA 0x48
23#define W1_DS2780_RECALL_DATA 0xB8
24#define W1_DS2780_LOCK 0x6A
25
26/* Register map */
27/* Register 0x00 Reserved */
28#define DS2780_STATUS_REG 0x01
29#define DS2780_RAAC_MSB_REG 0x02
30#define DS2780_RAAC_LSB_REG 0x03
31#define DS2780_RSAC_MSB_REG 0x04
32#define DS2780_RSAC_LSB_REG 0x05
33#define DS2780_RARC_REG 0x06
34#define DS2780_RSRC_REG 0x07
35#define DS2780_IAVG_MSB_REG 0x08
36#define DS2780_IAVG_LSB_REG 0x09
37#define DS2780_TEMP_MSB_REG 0x0A
38#define DS2780_TEMP_LSB_REG 0x0B
39#define DS2780_VOLT_MSB_REG 0x0C
40#define DS2780_VOLT_LSB_REG 0x0D
41#define DS2780_CURRENT_MSB_REG 0x0E
42#define DS2780_CURRENT_LSB_REG 0x0F
43#define DS2780_ACR_MSB_REG 0x10
44#define DS2780_ACR_LSB_REG 0x11
45#define DS2780_ACRL_MSB_REG 0x12
46#define DS2780_ACRL_LSB_REG 0x13
47#define DS2780_AS_REG 0x14
48#define DS2780_SFR_REG 0x15
49#define DS2780_FULL_MSB_REG 0x16
50#define DS2780_FULL_LSB_REG 0x17
51#define DS2780_AE_MSB_REG 0x18
52#define DS2780_AE_LSB_REG 0x19
53#define DS2780_SE_MSB_REG 0x1A
54#define DS2780_SE_LSB_REG 0x1B
55/* Register 0x1C - 0x1E Reserved */
56#define DS2780_EEPROM_REG 0x1F
57#define DS2780_EEPROM_BLOCK0_START 0x20
58/* Register 0x20 - 0x2F User EEPROM */
59#define DS2780_EEPROM_BLOCK0_END 0x2F
60/* Register 0x30 - 0x5F Reserved */
61#define DS2780_EEPROM_BLOCK1_START 0x60
62#define DS2780_CONTROL_REG 0x60
63#define DS2780_AB_REG 0x61
64#define DS2780_AC_MSB_REG 0x62
65#define DS2780_AC_LSB_REG 0x63
66#define DS2780_VCHG_REG 0x64
67#define DS2780_IMIN_REG 0x65
68#define DS2780_VAE_REG 0x66
69#define DS2780_IAE_REG 0x67
70#define DS2780_AE_40_REG 0x68
71#define DS2780_RSNSP_REG 0x69
72#define DS2780_FULL_40_MSB_REG 0x6A
73#define DS2780_FULL_40_LSB_REG 0x6B
74#define DS2780_FULL_3040_SLOPE_REG 0x6C
75#define DS2780_FULL_2030_SLOPE_REG 0x6D
76#define DS2780_FULL_1020_SLOPE_REG 0x6E
77#define DS2780_FULL_0010_SLOPE_REG 0x6F
78#define DS2780_AE_3040_SLOPE_REG 0x70
79#define DS2780_AE_2030_SLOPE_REG 0x71
80#define DS2780_AE_1020_SLOPE_REG 0x72
81#define DS2780_AE_0010_SLOPE_REG 0x73
82#define DS2780_SE_3040_SLOPE_REG 0x74
83#define DS2780_SE_2030_SLOPE_REG 0x75
84#define DS2780_SE_1020_SLOPE_REG 0x76
85#define DS2780_SE_0010_SLOPE_REG 0x77
86#define DS2780_RSGAIN_MSB_REG 0x78
87#define DS2780_RSGAIN_LSB_REG 0x79
88#define DS2780_RSTC_REG 0x7A
89#define DS2780_FRSGAIN_MSB_REG 0x7B
90#define DS2780_FRSGAIN_LSB_REG 0x7C
91#define DS2780_EEPROM_BLOCK1_END 0x7C
92/* Register 0x7D - 0xFF Reserved */
93
94/* Number of valid register addresses */
95#define DS2780_DATA_SIZE 0x80
96
97/* Status register bits */
98#define DS2780_STATUS_REG_CHGTF (1 << 7)
99#define DS2780_STATUS_REG_AEF (1 << 6)
100#define DS2780_STATUS_REG_SEF (1 << 5)
101#define DS2780_STATUS_REG_LEARNF (1 << 4)
102/* Bit 3 Reserved */
103#define DS2780_STATUS_REG_UVF (1 << 2)
104#define DS2780_STATUS_REG_PORF (1 << 1)
105/* Bit 0 Reserved */
106
107/* Control register bits */
108/* Bit 7 Reserved */
109#define DS2780_CONTROL_REG_UVEN (1 << 6)
110#define DS2780_CONTROL_REG_PMOD (1 << 5)
111#define DS2780_CONTROL_REG_RNAOP (1 << 4)
112/* Bit 0 - 3 Reserved */
113
114/* Special feature register bits */
115/* Bit 1 - 7 Reserved */
116#define DS2780_SFR_REG_PIOSC (1 << 0)
117
118/* EEPROM register bits */
119#define DS2780_EEPROM_REG_EEC (1 << 7)
120#define DS2780_EEPROM_REG_LOCK (1 << 6)
121/* Bit 2 - 6 Reserved */
122#define DS2780_EEPROM_REG_BL1 (1 << 1)
123#define DS2780_EEPROM_REG_BL0 (1 << 0)
124
125extern int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
126 int io);
127extern int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd);
128
129#endif /* !_W1_DS2780_H */
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index b7b5014ff714..10606c822756 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -827,7 +827,7 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
827 mutex_unlock(&w1_mlock); 827 mutex_unlock(&w1_mlock);
828} 828}
829 829
830static void w1_slave_found(struct w1_master *dev, u64 rn) 830void w1_slave_found(struct w1_master *dev, u64 rn)
831{ 831{
832 struct w1_slave *sl; 832 struct w1_slave *sl;
833 struct w1_reg_num *tmp; 833 struct w1_reg_num *tmp;
@@ -933,14 +933,15 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
933 } 933 }
934} 934}
935 935
936void w1_search_process(struct w1_master *dev, u8 search_type) 936void w1_search_process_cb(struct w1_master *dev, u8 search_type,
937 w1_slave_found_callback cb)
937{ 938{
938 struct w1_slave *sl, *sln; 939 struct w1_slave *sl, *sln;
939 940
940 list_for_each_entry(sl, &dev->slist, w1_slave_entry) 941 list_for_each_entry(sl, &dev->slist, w1_slave_entry)
941 clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); 942 clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
942 943
943 w1_search_devices(dev, search_type, w1_slave_found); 944 w1_search_devices(dev, search_type, cb);
944 945
945 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { 946 list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
946 if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl) 947 if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl)
@@ -953,6 +954,11 @@ void w1_search_process(struct w1_master *dev, u8 search_type)
953 dev->search_count--; 954 dev->search_count--;
954} 955}
955 956
957static void w1_search_process(struct w1_master *dev, u8 search_type)
958{
959 w1_search_process_cb(dev, search_type, w1_slave_found);
960}
961
956int w1_process(void *data) 962int w1_process(void *data)
957{ 963{
958 struct w1_master *dev = (struct w1_master *) data; 964 struct w1_master *dev = (struct w1_master *) data;
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index d8a9709f3449..1ce23fc6186c 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -55,6 +55,7 @@ struct w1_reg_num
55#define W1_READ_ROM 0x33 55#define W1_READ_ROM 0x33
56#define W1_READ_PSUPPLY 0xB4 56#define W1_READ_PSUPPLY 0xB4
57#define W1_MATCH_ROM 0x55 57#define W1_MATCH_ROM 0x55
58#define W1_RESUME_CMD 0xA5
58 59
59#define W1_SLAVE_ACTIVE 0 60#define W1_SLAVE_ACTIVE 0
60 61
@@ -193,7 +194,9 @@ void w1_destroy_master_attributes(struct w1_master *master);
193void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 194void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
194void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); 195void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
195struct w1_slave *w1_search_slave(struct w1_reg_num *id); 196struct w1_slave *w1_search_slave(struct w1_reg_num *id);
196void w1_search_process(struct w1_master *dev, u8 search_type); 197void w1_slave_found(struct w1_master *dev, u64 rn);
198void w1_search_process_cb(struct w1_master *dev, u8 search_type,
199 w1_slave_found_callback cb);
197struct w1_master *w1_search_master_id(u32 id); 200struct w1_master *w1_search_master_id(u32 id);
198 201
199/* Disconnect and reconnect devices in the given family. Used for finding 202/* Disconnect and reconnect devices in the given family. Used for finding
@@ -213,6 +216,7 @@ void w1_write_block(struct w1_master *, const u8 *, int);
213void w1_touch_block(struct w1_master *, u8 *, int); 216void w1_touch_block(struct w1_master *, u8 *, int);
214u8 w1_read_block(struct w1_master *, u8 *, int); 217u8 w1_read_block(struct w1_master *, u8 *, int);
215int w1_reset_select_slave(struct w1_slave *sl); 218int w1_reset_select_slave(struct w1_slave *sl);
219int w1_reset_resume_command(struct w1_master *);
216void w1_next_pullup(struct w1_master *, int); 220void w1_next_pullup(struct w1_master *, int);
217 221
218static inline struct w1_slave* dev_to_w1_slave(struct device *dev) 222static inline struct w1_slave* dev_to_w1_slave(struct device *dev)
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index f3b636d7cafe..97479ae70b9c 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -34,8 +34,10 @@
34#define W1_THERM_DS1822 0x22 34#define W1_THERM_DS1822 0x22
35#define W1_EEPROM_DS2433 0x23 35#define W1_EEPROM_DS2433 0x23
36#define W1_THERM_DS18B20 0x28 36#define W1_THERM_DS18B20 0x28
37#define W1_FAMILY_DS2408 0x29
37#define W1_EEPROM_DS2431 0x2D 38#define W1_EEPROM_DS2431 0x2D
38#define W1_FAMILY_DS2760 0x30 39#define W1_FAMILY_DS2760 0x30
40#define W1_FAMILY_DS2780 0x32
39 41
40#define MAXNAMELEN 32 42#define MAXNAMELEN 32
41 43
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 3ebe9726a9e5..8e8b64cfafb6 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -390,6 +390,32 @@ int w1_reset_select_slave(struct w1_slave *sl)
390EXPORT_SYMBOL_GPL(w1_reset_select_slave); 390EXPORT_SYMBOL_GPL(w1_reset_select_slave);
391 391
392/** 392/**
393 * When the workflow with a slave amongst many requires several
394 * successive commands a reset between each, this function is similar
395 * to doing a reset then a match ROM for the last matched ROM. The
396 * advantage being that the matched ROM step is skipped in favor of the
397 * resume command. The slave must support the command of course.
398 *
399 * If the bus has only one slave, traditionnaly the match ROM is skipped
400 * and a "SKIP ROM" is done for efficiency. On multi-slave busses, this
401 * doesn't work of course, but the resume command is the next best thing.
402 *
403 * The w1 master lock must be held.
404 *
405 * @param dev the master device
406 */
407int w1_reset_resume_command(struct w1_master *dev)
408{
409 if (w1_reset_bus(dev))
410 return -1;
411
412 /* This will make only the last matched slave perform a skip ROM. */
413 w1_write_8(dev, W1_RESUME_CMD);
414 return 0;
415}
416EXPORT_SYMBOL_GPL(w1_reset_resume_command);
417
418/**
393 * Put out a strong pull-up of the specified duration after the next write 419 * Put out a strong pull-up of the specified duration after the next write
394 * operation. Not all hardware supports strong pullups. Hardware that 420 * operation. Not all hardware supports strong pullups. Hardware that
395 * doesn't support strong pullups will sleep for the given time after the 421 * doesn't support strong pullups will sleep for the given time after the
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 7e667bc77ef2..55aabd927c60 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -55,6 +55,9 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
55 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); 55 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
56 int avail; 56 int avail;
57 57
58 /* update kernel slave list */
59 w1_slave_found(dev, rn);
60
58 avail = dev->priv_size - cmd->len; 61 avail = dev->priv_size - cmd->len;
59 62
60 if (avail > 8) { 63 if (avail > 8) {
@@ -85,7 +88,7 @@ static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
85 dev->priv = msg; 88 dev->priv = msg;
86 dev->priv_size = avail; 89 dev->priv_size = avail;
87 90
88 w1_search_devices(dev, search_type, w1_send_slave); 91 w1_search_process_cb(dev, search_type, w1_send_slave);
89 92
90 msg->ack = 0; 93 msg->ack = 0;
91 cn_netlink_send(msg, 0, GFP_KERNEL); 94 cn_netlink_send(msg, 0, GFP_KERNEL);
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index d8e725082fdc..428f8a1583e8 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -37,7 +37,6 @@
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39#include <linux/mfd/rdc321x.h> 39#include <linux/mfd/rdc321x.h>
40#include <linux/mfd/core.h>
41 40
42#define RDC_WDT_MASK 0x80000000 /* Mask */ 41#define RDC_WDT_MASK 0x80000000 /* Mask */
43#define RDC_WDT_EN 0x00800000 /* Enable bit */ 42#define RDC_WDT_EN 0x00800000 /* Enable bit */
@@ -232,7 +231,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
232 struct resource *r; 231 struct resource *r;
233 struct rdc321x_wdt_pdata *pdata; 232 struct rdc321x_wdt_pdata *pdata;
234 233
235 pdata = mfd_get_data(pdev); 234 pdata = pdev->dev.platform_data;
236 if (!pdata) { 235 if (!pdata) {
237 dev_err(&pdev->dev, "no platform data supplied\n"); 236 dev_err(&pdev->dev, "no platform data supplied\n");
238 return -ENODEV; 237 return -ENODEV;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 4781f806701d..bbc18258ecc5 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,5 +1,6 @@
1obj-y += grant-table.o features.o events.o manage.o balloon.o 1obj-y += grant-table.o features.o events.o manage.o balloon.o
2obj-y += xenbus/ 2obj-y += xenbus/
3obj-y += tmem.o
3 4
4nostackp := $(call cc-option, -fno-stack-protector) 5nostackp := $(call cc-option, -fno-stack-protector)
5CFLAGS_features.o := $(nostackp) 6CFLAGS_features.o := $(nostackp)
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
new file mode 100644
index 000000000000..816a44959ef0
--- /dev/null
+++ b/drivers/xen/tmem.c
@@ -0,0 +1,264 @@
1/*
2 * Xen implementation for transcendent memory (tmem)
3 *
4 * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
5 * Author: Dan Magenheimer
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/init.h>
11#include <linux/pagemap.h>
12#include <linux/cleancache.h>
13
14#include <xen/xen.h>
15#include <xen/interface/xen.h>
16#include <asm/xen/hypercall.h>
17#include <asm/xen/page.h>
18#include <asm/xen/hypervisor.h>
19
20#define TMEM_CONTROL 0
21#define TMEM_NEW_POOL 1
22#define TMEM_DESTROY_POOL 2
23#define TMEM_NEW_PAGE 3
24#define TMEM_PUT_PAGE 4
25#define TMEM_GET_PAGE 5
26#define TMEM_FLUSH_PAGE 6
27#define TMEM_FLUSH_OBJECT 7
28#define TMEM_READ 8
29#define TMEM_WRITE 9
30#define TMEM_XCHG 10
31
32/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
33#define TMEM_POOL_PERSIST 1
34#define TMEM_POOL_SHARED 2
35#define TMEM_POOL_PAGESIZE_SHIFT 4
36#define TMEM_VERSION_SHIFT 24
37
38
39struct tmem_pool_uuid {
40 u64 uuid_lo;
41 u64 uuid_hi;
42};
43
44struct tmem_oid {
45 u64 oid[3];
46};
47
48#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
49
50/* flags for tmem_ops.new_pool */
51#define TMEM_POOL_PERSIST 1
52#define TMEM_POOL_SHARED 2
53
54/* xen tmem foundation ops/hypercalls */
55
56static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
57 u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
58{
59 struct tmem_op op;
60 int rc = 0;
61
62 op.cmd = tmem_cmd;
63 op.pool_id = tmem_pool;
64 op.u.gen.oid[0] = oid.oid[0];
65 op.u.gen.oid[1] = oid.oid[1];
66 op.u.gen.oid[2] = oid.oid[2];
67 op.u.gen.index = index;
68 op.u.gen.tmem_offset = tmem_offset;
69 op.u.gen.pfn_offset = pfn_offset;
70 op.u.gen.len = len;
71 set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
72 rc = HYPERVISOR_tmem_op(&op);
73 return rc;
74}
75
76static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
77 u32 flags, unsigned long pagesize)
78{
79 struct tmem_op op;
80 int rc = 0, pageshift;
81
82 for (pageshift = 0; pagesize != 1; pageshift++)
83 pagesize >>= 1;
84 flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
85 flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
86 op.cmd = TMEM_NEW_POOL;
87 op.u.new.uuid[0] = uuid.uuid_lo;
88 op.u.new.uuid[1] = uuid.uuid_hi;
89 op.u.new.flags = flags;
90 rc = HYPERVISOR_tmem_op(&op);
91 return rc;
92}
93
94/* xen generic tmem ops */
95
96static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
97 u32 index, unsigned long pfn)
98{
99 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
100
101 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
102 gmfn, 0, 0, 0);
103}
104
105static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
106 u32 index, unsigned long pfn)
107{
108 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
109
110 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
111 gmfn, 0, 0, 0);
112}
113
114static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
115{
116 return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
117 0, 0, 0, 0);
118}
119
120static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
121{
122 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
123}
124
125static int xen_tmem_destroy_pool(u32 pool_id)
126{
127 struct tmem_oid oid = { { 0 } };
128
129 return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
130}
131
132int tmem_enabled;
133
134static int __init enable_tmem(char *s)
135{
136 tmem_enabled = 1;
137 return 1;
138}
139
140__setup("tmem", enable_tmem);
141
142/* cleancache ops */
143
144static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
145 pgoff_t index, struct page *page)
146{
147 u32 ind = (u32) index;
148 struct tmem_oid oid = *(struct tmem_oid *)&key;
149 unsigned long pfn = page_to_pfn(page);
150
151 if (pool < 0)
152 return;
153 if (ind != index)
154 return;
155 mb(); /* ensure page is quiescent; tmem may address it with an alias */
156 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
157}
158
159static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
160 pgoff_t index, struct page *page)
161{
162 u32 ind = (u32) index;
163 struct tmem_oid oid = *(struct tmem_oid *)&key;
164 unsigned long pfn = page_to_pfn(page);
165 int ret;
166
167 /* translate return values to linux semantics */
168 if (pool < 0)
169 return -1;
170 if (ind != index)
171 return -1;
172 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
173 if (ret == 1)
174 return 0;
175 else
176 return -1;
177}
178
179static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
180 pgoff_t index)
181{
182 u32 ind = (u32) index;
183 struct tmem_oid oid = *(struct tmem_oid *)&key;
184
185 if (pool < 0)
186 return;
187 if (ind != index)
188 return;
189 (void)xen_tmem_flush_page((u32)pool, oid, ind);
190}
191
192static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
193{
194 struct tmem_oid oid = *(struct tmem_oid *)&key;
195
196 if (pool < 0)
197 return;
198 (void)xen_tmem_flush_object((u32)pool, oid);
199}
200
201static void tmem_cleancache_flush_fs(int pool)
202{
203 if (pool < 0)
204 return;
205 (void)xen_tmem_destroy_pool((u32)pool);
206}
207
208static int tmem_cleancache_init_fs(size_t pagesize)
209{
210 struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
211
212 return xen_tmem_new_pool(uuid_private, 0, pagesize);
213}
214
215static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
216{
217 struct tmem_pool_uuid shared_uuid;
218
219 shared_uuid.uuid_lo = *(u64 *)uuid;
220 shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
221 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
222}
223
224static int use_cleancache = 1;
225
226static int __init no_cleancache(char *s)
227{
228 use_cleancache = 0;
229 return 1;
230}
231
232__setup("nocleancache", no_cleancache);
233
234static struct cleancache_ops tmem_cleancache_ops = {
235 .put_page = tmem_cleancache_put_page,
236 .get_page = tmem_cleancache_get_page,
237 .flush_page = tmem_cleancache_flush_page,
238 .flush_inode = tmem_cleancache_flush_inode,
239 .flush_fs = tmem_cleancache_flush_fs,
240 .init_shared_fs = tmem_cleancache_init_shared_fs,
241 .init_fs = tmem_cleancache_init_fs
242};
243
244static int __init xen_tmem_init(void)
245{
246 struct cleancache_ops old_ops;
247
248 if (!xen_domain())
249 return 0;
250#ifdef CONFIG_CLEANCACHE
251 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
252 if (tmem_enabled && use_cleancache) {
253 char *s = "";
254 old_ops = cleancache_register_ops(&tmem_cleancache_ops);
255 if (old_ops.init_fs != NULL)
256 s = " (WARNING: cleancache_ops overridden)";
257 printk(KERN_INFO "cleancache enabled, RAM provided by "
258 "Xen Transcendent Memory%s\n", s);
259 }
260#endif
261 return 0;
262}
263
264module_init(xen_tmem_init)
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 814ac4e213a8..0a93dc1cb4ac 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -1,6 +1,6 @@
1config 9P_FS 1config 9P_FS
2 tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" 2 tristate "Plan 9 Resource Sharing Support (9P2000)"
3 depends on INET && NET_9P && EXPERIMENTAL 3 depends on INET && NET_9P
4 help 4 help
5 If you say Y here, you will get experimental support for 5 If you say Y here, you will get experimental support for
6 Plan 9 resource sharing via the 9P2000 protocol. 6 Plan 9 resource sharing via the 9P2000 protocol.
@@ -10,7 +10,6 @@ config 9P_FS
10 If unsure, say N. 10 If unsure, say N.
11 11
12if 9P_FS 12if 9P_FS
13
14config 9P_FSCACHE 13config 9P_FSCACHE
15 bool "Enable 9P client caching support (EXPERIMENTAL)" 14 bool "Enable 9P client caching support (EXPERIMENTAL)"
16 depends on EXPERIMENTAL 15 depends on EXPERIMENTAL
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 7f6c67703195..8d7f3e69ae29 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -814,6 +814,7 @@ int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
814 814
815int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) 815int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
816{ 816{
817 dentry_unhash(d);
817 return v9fs_remove(i, d, 1); 818 return v9fs_remove(i, d, 1);
818} 819}
819 820
@@ -839,6 +840,9 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
839 struct p9_fid *newdirfid; 840 struct p9_fid *newdirfid;
840 struct p9_wstat wstat; 841 struct p9_wstat wstat;
841 842
843 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
844 dentry_unhash(new_dentry);
845
842 P9_DPRINTK(P9_DEBUG_VFS, "\n"); 846 P9_DPRINTK(P9_DEBUG_VFS, "\n");
843 retval = 0; 847 retval = 0;
844 old_inode = old_dentry->d_inode; 848 old_inode = old_dentry->d_inode;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 82a7c38ddad0..691c78f58bef 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -259,7 +259,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
259 if (IS_ERR(inode_fid)) { 259 if (IS_ERR(inode_fid)) {
260 err = PTR_ERR(inode_fid); 260 err = PTR_ERR(inode_fid);
261 mutex_unlock(&v9inode->v_mutex); 261 mutex_unlock(&v9inode->v_mutex);
262 goto error; 262 goto err_clunk_old_fid;
263 } 263 }
264 v9inode->writeback_fid = (void *) inode_fid; 264 v9inode->writeback_fid = (void *) inode_fid;
265 } 265 }
@@ -267,8 +267,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
267 /* Since we are opening a file, assign the open fid to the file */ 267 /* Since we are opening a file, assign the open fid to the file */
268 filp = lookup_instantiate_filp(nd, dentry, generic_file_open); 268 filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
269 if (IS_ERR(filp)) { 269 if (IS_ERR(filp)) {
270 p9_client_clunk(ofid); 270 err = PTR_ERR(filp);
271 return PTR_ERR(filp); 271 goto err_clunk_old_fid;
272 } 272 }
273 filp->private_data = ofid; 273 filp->private_data = ofid;
274#ifdef CONFIG_9P_FSCACHE 274#ifdef CONFIG_9P_FSCACHE
@@ -278,10 +278,11 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
278 return 0; 278 return 0;
279 279
280error: 280error:
281 if (ofid)
282 p9_client_clunk(ofid);
283 if (fid) 281 if (fid)
284 p9_client_clunk(fid); 282 p9_client_clunk(fid);
283err_clunk_old_fid:
284 if (ofid)
285 p9_client_clunk(ofid);
285 return err; 286 return err;
286} 287}
287 288
diff --git a/fs/Kconfig b/fs/Kconfig
index f3aa9b08b228..19891aab9c6e 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -47,7 +47,7 @@ config FS_POSIX_ACL
47 def_bool n 47 def_bool n
48 48
49config EXPORTFS 49config EXPORTFS
50 bool 50 tristate
51 51
52config FILE_LOCKING 52config FILE_LOCKING
53 bool "Enable POSIX file locking API" if EXPERT 53 bool "Enable POSIX file locking API" if EXPERT
@@ -124,6 +124,7 @@ config TMPFS
124config TMPFS_POSIX_ACL 124config TMPFS_POSIX_ACL
125 bool "Tmpfs POSIX Access Control Lists" 125 bool "Tmpfs POSIX Access Control Lists"
126 depends on TMPFS 126 depends on TMPFS
127 select TMPFS_XATTR
127 select GENERIC_ACL 128 select GENERIC_ACL
128 help 129 help
129 POSIX Access Control Lists (ACLs) support permissions for users and 130 POSIX Access Control Lists (ACLs) support permissions for users and
@@ -134,6 +135,22 @@ config TMPFS_POSIX_ACL
134 135
135 If you don't know what Access Control Lists are, say N. 136 If you don't know what Access Control Lists are, say N.
136 137
138config TMPFS_XATTR
139 bool "Tmpfs extended attributes"
140 depends on TMPFS
141 default n
142 help
143 Extended attributes are name:value pairs associated with inodes by
144 the kernel or by users (see the attr(5) manual page, or visit
145 <http://acl.bestbits.at/> for details).
146
147 Currently this enables support for the trusted.* and
148 security.* namespaces.
149
150 You need this for POSIX ACL support on tmpfs.
151
152 If unsure, say N.
153
137config HUGETLBFS 154config HUGETLBFS
138 bool "HugeTLB file system support" 155 bool "HugeTLB file system support"
139 depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \ 156 depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index e3e9efc1fdd8..03330e2e390c 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -320,6 +320,8 @@ affs_rmdir(struct inode *dir, struct dentry *dentry)
320 dentry->d_inode->i_ino, 320 dentry->d_inode->i_ino,
321 (int)dentry->d_name.len, dentry->d_name.name); 321 (int)dentry->d_name.len, dentry->d_name.name);
322 322
323 dentry_unhash(dentry);
324
323 return affs_remove_header(dentry); 325 return affs_remove_header(dentry);
324} 326}
325 327
@@ -417,6 +419,9 @@ affs_rename(struct inode *old_dir, struct dentry *old_dentry,
417 struct buffer_head *bh = NULL; 419 struct buffer_head *bh = NULL;
418 int retval; 420 int retval;
419 421
422 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
423 dentry_unhash(new_dentry);
424
420 pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n", 425 pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n",
421 (u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name, 426 (u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name,
422 (u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name); 427 (u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 20c106f24927..2c4e05160042 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -845,6 +845,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
845 _enter("{%x:%u},{%s}", 845 _enter("{%x:%u},{%s}",
846 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); 846 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
847 847
848 dentry_unhash(dentry);
849
848 ret = -ENAMETOOLONG; 850 ret = -ENAMETOOLONG;
849 if (dentry->d_name.len >= AFSNAMEMAX) 851 if (dentry->d_name.len >= AFSNAMEMAX)
850 goto error; 852 goto error;
@@ -1146,6 +1148,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1146 struct key *key; 1148 struct key *key;
1147 int ret; 1149 int ret;
1148 1150
1151 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
1152 dentry_unhash(new_dentry);
1153
1149 vnode = AFS_FS_I(old_dentry->d_inode); 1154 vnode = AFS_FS_I(old_dentry->d_inode);
1150 orig_dvnode = AFS_FS_I(old_dir); 1155 orig_dvnode = AFS_FS_I(old_dir);
1151 new_dvnode = AFS_FS_I(new_dir); 1156 new_dvnode = AFS_FS_I(new_dir);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index f55ae23b137e..87d95a8cddbc 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -583,6 +583,8 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
583 if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) 583 if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
584 return -EACCES; 584 return -EACCES;
585 585
586 dentry_unhash(dentry);
587
586 if (atomic_dec_and_test(&ino->count)) { 588 if (atomic_dec_and_test(&ino->count)) {
587 p_ino = autofs4_dentry_ino(dentry->d_parent); 589 p_ino = autofs4_dentry_ino(dentry->d_parent);
588 if (p_ino && dentry->d_parent != dentry) 590 if (p_ino && dentry->d_parent != dentry)
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index b14cebfd9047..c7d1d06b0483 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -224,6 +224,9 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
224 struct bfs_sb_info *info; 224 struct bfs_sb_info *info;
225 int error = -ENOENT; 225 int error = -ENOENT;
226 226
227 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
228 dentry_unhash(new_dentry);
229
227 old_bh = new_bh = NULL; 230 old_bh = new_bh = NULL;
228 old_inode = old_dentry->d_inode; 231 old_inode = old_dentry->d_inode;
229 if (S_ISDIR(old_inode->i_mode)) 232 if (S_ISDIR(old_inode->i_mode))
diff --git a/fs/block_dev.c b/fs/block_dev.c
index bf9c7a720371..1f2b19978333 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1238,6 +1238,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1238 res = __blkdev_get(bdev, mode, 0); 1238 res = __blkdev_get(bdev, mode, 0);
1239 1239
1240 if (whole) { 1240 if (whole) {
1241 struct gendisk *disk = whole->bd_disk;
1242
1241 /* finish claiming */ 1243 /* finish claiming */
1242 mutex_lock(&bdev->bd_mutex); 1244 mutex_lock(&bdev->bd_mutex);
1243 spin_lock(&bdev_lock); 1245 spin_lock(&bdev_lock);
@@ -1264,15 +1266,16 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1264 spin_unlock(&bdev_lock); 1266 spin_unlock(&bdev_lock);
1265 1267
1266 /* 1268 /*
1267 * Block event polling for write claims. Any write 1269 * Block event polling for write claims if requested. Any
1268 * holder makes the write_holder state stick until all 1270 * write holder makes the write_holder state stick until
1269 * are released. This is good enough and tracking 1271 * all are released. This is good enough and tracking
1270 * individual writeable reference is too fragile given 1272 * individual writeable reference is too fragile given the
1271 * the way @mode is used in blkdev_get/put(). 1273 * way @mode is used in blkdev_get/put().
1272 */ 1274 */
1273 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { 1275 if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) &&
1276 !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
1274 bdev->bd_write_holder = true; 1277 bdev->bd_write_holder = true;
1275 disk_block_events(bdev->bd_disk); 1278 disk_block_events(disk);
1276 } 1279 }
1277 1280
1278 mutex_unlock(&bdev->bd_mutex); 1281 mutex_unlock(&bdev->bd_mutex);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 96fcfa522dab..4f9893243dae 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -11,6 +11,7 @@
11#include <linux/writeback.h> 11#include <linux/writeback.h>
12#include <linux/pagevec.h> 12#include <linux/pagevec.h>
13#include <linux/prefetch.h> 13#include <linux/prefetch.h>
14#include <linux/cleancache.h>
14#include "extent_io.h" 15#include "extent_io.h"
15#include "extent_map.h" 16#include "extent_map.h"
16#include "compat.h" 17#include "compat.h"
@@ -2016,6 +2017,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2016 2017
2017 set_page_extent_mapped(page); 2018 set_page_extent_mapped(page);
2018 2019
2020 if (!PageUptodate(page)) {
2021 if (cleancache_get_page(page) == 0) {
2022 BUG_ON(blocksize != PAGE_SIZE);
2023 goto out;
2024 }
2025 }
2026
2019 end = page_end; 2027 end = page_end;
2020 while (1) { 2028 while (1) {
2021 lock_extent(tree, start, end, GFP_NOFS); 2029 lock_extent(tree, start, end, GFP_NOFS);
@@ -2149,6 +2157,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2149 cur = cur + iosize; 2157 cur = cur + iosize;
2150 page_offset += iosize; 2158 page_offset += iosize;
2151 } 2159 }
2160out:
2152 if (!nr) { 2161 if (!nr) {
2153 if (!PageError(page)) 2162 if (!PageError(page))
2154 SetPageUptodate(page); 2163 SetPageUptodate(page);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 0ac712efcdf2..be4ffa12f3ef 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -39,6 +39,7 @@
39#include <linux/miscdevice.h> 39#include <linux/miscdevice.h>
40#include <linux/magic.h> 40#include <linux/magic.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/cleancache.h>
42#include "compat.h" 43#include "compat.h"
43#include "ctree.h" 44#include "ctree.h"
44#include "disk-io.h" 45#include "disk-io.h"
@@ -624,6 +625,7 @@ static int btrfs_fill_super(struct super_block *sb,
624 sb->s_root = root_dentry; 625 sb->s_root = root_dentry;
625 626
626 save_mount_options(sb, data); 627 save_mount_options(sb, data);
628 cleancache_init_fs(sb);
627 return 0; 629 return 0;
628 630
629fail_close: 631fail_close:
diff --git a/fs/buffer.c b/fs/buffer.c
index a08bb8e61c6f..698c6b2cc462 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -41,6 +41,7 @@
41#include <linux/bitops.h> 41#include <linux/bitops.h>
42#include <linux/mpage.h> 42#include <linux/mpage.h>
43#include <linux/bit_spinlock.h> 43#include <linux/bit_spinlock.h>
44#include <linux/cleancache.h>
44 45
45static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 46static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 47
@@ -269,6 +270,10 @@ void invalidate_bdev(struct block_device *bdev)
269 invalidate_bh_lrus(); 270 invalidate_bh_lrus();
270 lru_add_drain_all(); /* make sure all lru add caches are flushed */ 271 lru_add_drain_all(); /* make sure all lru add caches are flushed */
271 invalidate_mapping_pages(mapping, 0, -1); 272 invalidate_mapping_pages(mapping, 0, -1);
273 /* 99% of the time, we don't need to flush the cleancache on the bdev.
274 * But, for the strange corners, lets be cautious
275 */
276 cleancache_flush_inode(mapping);
272} 277}
273EXPORT_SYMBOL(invalidate_bdev); 278EXPORT_SYMBOL(invalidate_bdev);
274 279
@@ -2331,24 +2336,26 @@ EXPORT_SYMBOL(block_commit_write);
2331 * page lock we can determine safely if the page is beyond EOF. If it is not 2336 * page lock we can determine safely if the page is beyond EOF. If it is not
2332 * beyond EOF, then the page is guaranteed safe against truncation until we 2337 * beyond EOF, then the page is guaranteed safe against truncation until we
2333 * unlock the page. 2338 * unlock the page.
2339 *
2340 * Direct callers of this function should call vfs_check_frozen() so that page
2341 * fault does not busyloop until the fs is thawed.
2334 */ 2342 */
2335int 2343int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2336block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 2344 get_block_t get_block)
2337 get_block_t get_block)
2338{ 2345{
2339 struct page *page = vmf->page; 2346 struct page *page = vmf->page;
2340 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 2347 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2341 unsigned long end; 2348 unsigned long end;
2342 loff_t size; 2349 loff_t size;
2343 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 2350 int ret;
2344 2351
2345 lock_page(page); 2352 lock_page(page);
2346 size = i_size_read(inode); 2353 size = i_size_read(inode);
2347 if ((page->mapping != inode->i_mapping) || 2354 if ((page->mapping != inode->i_mapping) ||
2348 (page_offset(page) > size)) { 2355 (page_offset(page) > size)) {
2349 /* page got truncated out from underneath us */ 2356 /* We overload EFAULT to mean page got truncated */
2350 unlock_page(page); 2357 ret = -EFAULT;
2351 goto out; 2358 goto out_unlock;
2352 } 2359 }
2353 2360
2354 /* page is wholly or partially inside EOF */ 2361 /* page is wholly or partially inside EOF */
@@ -2361,18 +2368,41 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2361 if (!ret) 2368 if (!ret)
2362 ret = block_commit_write(page, 0, end); 2369 ret = block_commit_write(page, 0, end);
2363 2370
2364 if (unlikely(ret)) { 2371 if (unlikely(ret < 0))
2365 unlock_page(page); 2372 goto out_unlock;
2366 if (ret == -ENOMEM) 2373 /*
2367 ret = VM_FAULT_OOM; 2374 * Freezing in progress? We check after the page is marked dirty and
2368 else /* -ENOSPC, -EIO, etc */ 2375 * with page lock held so if the test here fails, we are sure freezing
2369 ret = VM_FAULT_SIGBUS; 2376 * code will wait during syncing until the page fault is done - at that
2370 } else 2377 * point page will be dirty and unlocked so freezing code will write it
2371 ret = VM_FAULT_LOCKED; 2378 * and writeprotect it again.
2372 2379 */
2373out: 2380 set_page_dirty(page);
2381 if (inode->i_sb->s_frozen != SB_UNFROZEN) {
2382 ret = -EAGAIN;
2383 goto out_unlock;
2384 }
2385 return 0;
2386out_unlock:
2387 unlock_page(page);
2374 return ret; 2388 return ret;
2375} 2389}
2390EXPORT_SYMBOL(__block_page_mkwrite);
2391
2392int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2393 get_block_t get_block)
2394{
2395 int ret;
2396 struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
2397
2398 /*
2399 * This check is racy but catches the common case. The check in
2400 * __block_page_mkwrite() is reliable.
2401 */
2402 vfs_check_frozen(sb, SB_FREEZE_WRITE);
2403 ret = __block_page_mkwrite(vma, vmf, get_block);
2404 return block_page_mkwrite_return(ret);
2405}
2376EXPORT_SYMBOL(block_page_mkwrite); 2406EXPORT_SYMBOL(block_page_mkwrite);
2377 2407
2378/* 2408/*
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 38b8ab554924..33da49dc3cc6 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -848,7 +848,8 @@ get_more_pages:
848 op->payload_len = cpu_to_le32(len); 848 op->payload_len = cpu_to_le32(len);
849 req->r_request->hdr.data_len = cpu_to_le32(len); 849 req->r_request->hdr.data_len = cpu_to_le32(len);
850 850
851 ceph_osdc_start_request(&fsc->client->osdc, req, true); 851 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
852 BUG_ON(rc);
852 req = NULL; 853 req = NULL;
853 854
854 /* continue? */ 855 /* continue? */
@@ -880,8 +881,6 @@ release_pvec_pages:
880out: 881out:
881 if (req) 882 if (req)
882 ceph_osdc_put_request(req); 883 ceph_osdc_put_request(req);
883 if (rc > 0)
884 rc = 0; /* vfs expects us to return 0 */
885 ceph_put_snap_context(snapc); 884 ceph_put_snap_context(snapc);
886 dout("writepages done, rc = %d\n", rc); 885 dout("writepages done, rc = %d\n", rc);
887 return rc; 886 return rc;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 2a5404c1c42f..1f72b00447c4 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -569,7 +569,8 @@ retry:
569 list_add_tail(&cap->session_caps, &session->s_caps); 569 list_add_tail(&cap->session_caps, &session->s_caps);
570 session->s_nr_caps++; 570 session->s_nr_caps++;
571 spin_unlock(&session->s_cap_lock); 571 spin_unlock(&session->s_cap_lock);
572 } 572 } else if (new_cap)
573 ceph_put_cap(mdsc, new_cap);
573 574
574 if (!ci->i_snap_realm) { 575 if (!ci->i_snap_realm) {
575 /* 576 /*
@@ -2634,6 +2635,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2634 struct ceph_mds_session *session, 2635 struct ceph_mds_session *session,
2635 int *open_target_sessions) 2636 int *open_target_sessions)
2636{ 2637{
2638 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2637 struct ceph_inode_info *ci = ceph_inode(inode); 2639 struct ceph_inode_info *ci = ceph_inode(inode);
2638 int mds = session->s_mds; 2640 int mds = session->s_mds;
2639 unsigned mseq = le32_to_cpu(ex->migrate_seq); 2641 unsigned mseq = le32_to_cpu(ex->migrate_seq);
@@ -2670,6 +2672,19 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2670 * export targets, so that we get the matching IMPORT 2672 * export targets, so that we get the matching IMPORT
2671 */ 2673 */
2672 *open_target_sessions = 1; 2674 *open_target_sessions = 1;
2675
2676 /*
2677 * we can't flush dirty caps that we've seen the
2678 * EXPORT but no IMPORT for
2679 */
2680 spin_lock(&mdsc->cap_dirty_lock);
2681 if (!list_empty(&ci->i_dirty_item)) {
2682 dout(" moving %p to cap_dirty_migrating\n",
2683 inode);
2684 list_move(&ci->i_dirty_item,
2685 &mdsc->cap_dirty_migrating);
2686 }
2687 spin_unlock(&mdsc->cap_dirty_lock);
2673 } 2688 }
2674 __ceph_remove_cap(cap); 2689 __ceph_remove_cap(cap);
2675 } 2690 }
@@ -2707,6 +2722,13 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
2707 ci->i_cap_exporting_issued = 0; 2722 ci->i_cap_exporting_issued = 0;
2708 ci->i_cap_exporting_mseq = 0; 2723 ci->i_cap_exporting_mseq = 0;
2709 ci->i_cap_exporting_mds = -1; 2724 ci->i_cap_exporting_mds = -1;
2725
2726 spin_lock(&mdsc->cap_dirty_lock);
2727 if (!list_empty(&ci->i_dirty_item)) {
2728 dout(" moving %p back to cap_dirty\n", inode);
2729 list_move(&ci->i_dirty_item, &mdsc->cap_dirty);
2730 }
2731 spin_unlock(&mdsc->cap_dirty_lock);
2710 } else { 2732 } else {
2711 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", 2733 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2712 inode, ci, mds, mseq); 2734 inode, ci, mds, mseq);
@@ -2910,38 +2932,16 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2910 */ 2932 */
2911void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) 2933void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2912{ 2934{
2913 struct ceph_inode_info *ci, *nci = NULL; 2935 struct ceph_inode_info *ci;
2914 struct inode *inode, *ninode = NULL; 2936 struct inode *inode;
2915 struct list_head *p, *n;
2916 2937
2917 dout("flush_dirty_caps\n"); 2938 dout("flush_dirty_caps\n");
2918 spin_lock(&mdsc->cap_dirty_lock); 2939 spin_lock(&mdsc->cap_dirty_lock);
2919 list_for_each_safe(p, n, &mdsc->cap_dirty) { 2940 while (!list_empty(&mdsc->cap_dirty)) {
2920 if (nci) { 2941 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
2921 ci = nci; 2942 i_dirty_item);
2922 inode = ninode; 2943 inode = igrab(&ci->vfs_inode);
2923 ci->i_ceph_flags &= ~CEPH_I_NOFLUSH; 2944 dout("flush_dirty_caps %p\n", inode);
2924 dout("flush_dirty_caps inode %p (was next inode)\n",
2925 inode);
2926 } else {
2927 ci = list_entry(p, struct ceph_inode_info,
2928 i_dirty_item);
2929 inode = igrab(&ci->vfs_inode);
2930 BUG_ON(!inode);
2931 dout("flush_dirty_caps inode %p\n", inode);
2932 }
2933 if (n != &mdsc->cap_dirty) {
2934 nci = list_entry(n, struct ceph_inode_info,
2935 i_dirty_item);
2936 ninode = igrab(&nci->vfs_inode);
2937 BUG_ON(!ninode);
2938 nci->i_ceph_flags |= CEPH_I_NOFLUSH;
2939 dout("flush_dirty_caps next inode %p, noflush\n",
2940 ninode);
2941 } else {
2942 nci = NULL;
2943 ninode = NULL;
2944 }
2945 spin_unlock(&mdsc->cap_dirty_lock); 2945 spin_unlock(&mdsc->cap_dirty_lock);
2946 if (inode) { 2946 if (inode) {
2947 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, 2947 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
@@ -2951,6 +2951,7 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2951 spin_lock(&mdsc->cap_dirty_lock); 2951 spin_lock(&mdsc->cap_dirty_lock);
2952 } 2952 }
2953 spin_unlock(&mdsc->cap_dirty_lock); 2953 spin_unlock(&mdsc->cap_dirty_lock);
2954 dout("flush_dirty_caps done\n");
2954} 2955}
2955 2956
2956/* 2957/*
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 1a867a3601ae..33729e822bb9 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -360,7 +360,7 @@ more:
360 rinfo = &fi->last_readdir->r_reply_info; 360 rinfo = &fi->last_readdir->r_reply_info;
361 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 361 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
362 rinfo->dir_nr, off, fi->offset); 362 rinfo->dir_nr, off, fi->offset);
363 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) { 363 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
364 u64 pos = ceph_make_fpos(frag, off); 364 u64 pos = ceph_make_fpos(frag, off);
365 struct ceph_mds_reply_inode *in = 365 struct ceph_mds_reply_inode *in =
366 rinfo->dir_in[off - fi->offset].in; 366 rinfo->dir_in[off - fi->offset].in;
@@ -1066,16 +1066,17 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1066 struct inode *inode = file->f_dentry->d_inode; 1066 struct inode *inode = file->f_dentry->d_inode;
1067 struct ceph_inode_info *ci = ceph_inode(inode); 1067 struct ceph_inode_info *ci = ceph_inode(inode);
1068 int left; 1068 int left;
1069 const int bufsize = 1024;
1069 1070
1070 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1071 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1071 return -EISDIR; 1072 return -EISDIR;
1072 1073
1073 if (!cf->dir_info) { 1074 if (!cf->dir_info) {
1074 cf->dir_info = kmalloc(1024, GFP_NOFS); 1075 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
1075 if (!cf->dir_info) 1076 if (!cf->dir_info)
1076 return -ENOMEM; 1077 return -ENOMEM;
1077 cf->dir_info_len = 1078 cf->dir_info_len =
1078 sprintf(cf->dir_info, 1079 snprintf(cf->dir_info, bufsize,
1079 "entries: %20lld\n" 1080 "entries: %20lld\n"
1080 " files: %20lld\n" 1081 " files: %20lld\n"
1081 " subdirs: %20lld\n" 1082 " subdirs: %20lld\n"
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index e41056174bf8..a610d3d67488 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -86,6 +86,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
86static struct dentry *__fh_to_dentry(struct super_block *sb, 86static struct dentry *__fh_to_dentry(struct super_block *sb,
87 struct ceph_nfs_fh *fh) 87 struct ceph_nfs_fh *fh)
88{ 88{
89 struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
89 struct inode *inode; 90 struct inode *inode;
90 struct dentry *dentry; 91 struct dentry *dentry;
91 struct ceph_vino vino; 92 struct ceph_vino vino;
@@ -95,8 +96,24 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
95 vino.ino = fh->ino; 96 vino.ino = fh->ino;
96 vino.snap = CEPH_NOSNAP; 97 vino.snap = CEPH_NOSNAP;
97 inode = ceph_find_inode(sb, vino); 98 inode = ceph_find_inode(sb, vino);
98 if (!inode) 99 if (!inode) {
99 return ERR_PTR(-ESTALE); 100 struct ceph_mds_request *req;
101
102 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO,
103 USE_ANY_MDS);
104 if (IS_ERR(req))
105 return ERR_CAST(req);
106
107 req->r_ino1 = vino;
108 req->r_num_caps = 1;
109 err = ceph_mdsc_do_request(mdsc, NULL, req);
110 inode = req->r_target_inode;
111 if (inode)
112 igrab(inode);
113 ceph_mdsc_put_request(req);
114 if (!inode)
115 return ERR_PTR(-ESTALE);
116 }
100 117
101 dentry = d_obtain_alias(inode); 118 dentry = d_obtain_alias(inode);
102 if (IS_ERR(dentry)) { 119 if (IS_ERR(dentry)) {
@@ -148,8 +165,10 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
148 snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash); 165 snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash);
149 req->r_num_caps = 1; 166 req->r_num_caps = 1;
150 err = ceph_mdsc_do_request(mdsc, NULL, req); 167 err = ceph_mdsc_do_request(mdsc, NULL, req);
168 inode = req->r_target_inode;
169 if (inode)
170 igrab(inode);
151 ceph_mdsc_put_request(req); 171 ceph_mdsc_put_request(req);
152 inode = ceph_find_inode(sb, vino);
153 if (!inode) 172 if (!inode)
154 return ERR_PTR(err ? err : -ESTALE); 173 return ERR_PTR(err ? err : -ESTALE);
155 } 174 }
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d0fae4ce9ba5..79743d146be6 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -578,6 +578,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
578 if (dir) { 578 if (dir) {
579 struct ceph_inode_info *ci = ceph_inode(dir); 579 struct ceph_inode_info *ci = ceph_inode(dir);
580 580
581 ihold(dir);
581 spin_lock(&ci->i_unsafe_lock); 582 spin_lock(&ci->i_unsafe_lock);
582 req->r_unsafe_dir = dir; 583 req->r_unsafe_dir = dir;
583 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); 584 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
@@ -598,6 +599,9 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
598 spin_lock(&ci->i_unsafe_lock); 599 spin_lock(&ci->i_unsafe_lock);
599 list_del_init(&req->r_unsafe_dir_item); 600 list_del_init(&req->r_unsafe_dir_item);
600 spin_unlock(&ci->i_unsafe_lock); 601 spin_unlock(&ci->i_unsafe_lock);
602
603 iput(req->r_unsafe_dir);
604 req->r_unsafe_dir = NULL;
601 } 605 }
602 606
603 ceph_mdsc_put_request(req); 607 ceph_mdsc_put_request(req);
@@ -2691,7 +2695,6 @@ static void handle_lease(struct ceph_mds_client *mdsc,
2691{ 2695{
2692 struct super_block *sb = mdsc->fsc->sb; 2696 struct super_block *sb = mdsc->fsc->sb;
2693 struct inode *inode; 2697 struct inode *inode;
2694 struct ceph_inode_info *ci;
2695 struct dentry *parent, *dentry; 2698 struct dentry *parent, *dentry;
2696 struct ceph_dentry_info *di; 2699 struct ceph_dentry_info *di;
2697 int mds = session->s_mds; 2700 int mds = session->s_mds;
@@ -2728,7 +2731,6 @@ static void handle_lease(struct ceph_mds_client *mdsc,
2728 dout("handle_lease no inode %llx\n", vino.ino); 2731 dout("handle_lease no inode %llx\n", vino.ino);
2729 goto release; 2732 goto release;
2730 } 2733 }
2731 ci = ceph_inode(inode);
2732 2734
2733 /* dentry */ 2735 /* dentry */
2734 parent = d_find_alias(inode); 2736 parent = d_find_alias(inode);
@@ -3002,6 +3004,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
3002 spin_lock_init(&mdsc->snap_flush_lock); 3004 spin_lock_init(&mdsc->snap_flush_lock);
3003 mdsc->cap_flush_seq = 0; 3005 mdsc->cap_flush_seq = 0;
3004 INIT_LIST_HEAD(&mdsc->cap_dirty); 3006 INIT_LIST_HEAD(&mdsc->cap_dirty);
3007 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3005 mdsc->num_cap_flushing = 0; 3008 mdsc->num_cap_flushing = 0;
3006 spin_lock_init(&mdsc->cap_dirty_lock); 3009 spin_lock_init(&mdsc->cap_dirty_lock);
3007 init_waitqueue_head(&mdsc->cap_flushing_wq); 3010 init_waitqueue_head(&mdsc->cap_flushing_wq);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 4e3a9cc0bba6..7d8a0d662d56 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -278,6 +278,7 @@ struct ceph_mds_client {
278 278
279 u64 cap_flush_seq; 279 u64 cap_flush_seq;
280 struct list_head cap_dirty; /* inodes with dirty caps */ 280 struct list_head cap_dirty; /* inodes with dirty caps */
281 struct list_head cap_dirty_migrating; /* ...that are migration... */
281 int num_cap_flushing; /* # caps we are flushing */ 282 int num_cap_flushing; /* # caps we are flushing */
282 spinlock_t cap_dirty_lock; /* protects above items */ 283 spinlock_t cap_dirty_lock; /* protects above items */
283 wait_queue_head_t cap_flushing_wq; 284 wait_queue_head_t cap_flushing_wq;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 2b8dae4d121e..a46126fd5735 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -336,6 +336,8 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
336 int len = de->d_name.len; 336 int len = de->d_name.len;
337 int error; 337 int error;
338 338
339 dentry_unhash(de);
340
339 error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len); 341 error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
340 if (!error) { 342 if (!error) {
341 /* VFS may delete the child */ 343 /* VFS may delete the child */
@@ -359,6 +361,9 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
359 int new_length = new_dentry->d_name.len; 361 int new_length = new_dentry->d_name.len;
360 int error; 362 int error;
361 363
364 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
365 dentry_unhash(new_dentry);
366
362 error = venus_rename(old_dir->i_sb, coda_i2f(old_dir), 367 error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
363 coda_i2f(new_dir), old_length, new_length, 368 coda_i2f(new_dir), old_length, new_length,
364 (const char *) old_name, (const char *)new_name); 369 (const char *) old_name, (const char *)new_name);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 9a37a9b6de3a..9d17d350abc5 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1359,6 +1359,8 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
1359 struct module *subsys_owner = NULL, *dead_item_owner = NULL; 1359 struct module *subsys_owner = NULL, *dead_item_owner = NULL;
1360 int ret; 1360 int ret;
1361 1361
1362 dentry_unhash(dentry);
1363
1362 if (dentry->d_parent == configfs_sb->s_root) 1364 if (dentry->d_parent == configfs_sb->s_root)
1363 return -EPERM; 1365 return -EPERM;
1364 1366
diff --git a/fs/dcache.c b/fs/dcache.c
index 18b2a1f10ed8..37f72ee5bf7c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1220,7 +1220,7 @@ void shrink_dcache_parent(struct dentry * parent)
1220EXPORT_SYMBOL(shrink_dcache_parent); 1220EXPORT_SYMBOL(shrink_dcache_parent);
1221 1221
1222/* 1222/*
1223 * Scan `nr' dentries and return the number which remain. 1223 * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain.
1224 * 1224 *
1225 * We need to avoid reentering the filesystem if the caller is performing a 1225 * We need to avoid reentering the filesystem if the caller is performing a
1226 * GFP_NOFS allocation attempt. One example deadlock is: 1226 * GFP_NOFS allocation attempt. One example deadlock is:
@@ -1231,8 +1231,12 @@ EXPORT_SYMBOL(shrink_dcache_parent);
1231 * 1231 *
1232 * In this case we return -1 to tell the caller that we baled. 1232 * In this case we return -1 to tell the caller that we baled.
1233 */ 1233 */
1234static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1234static int shrink_dcache_memory(struct shrinker *shrink,
1235 struct shrink_control *sc)
1235{ 1236{
1237 int nr = sc->nr_to_scan;
1238 gfp_t gfp_mask = sc->gfp_mask;
1239
1236 if (nr) { 1240 if (nr) {
1237 if (!(gfp_mask & __GFP_FS)) 1241 if (!(gfp_mask & __GFP_FS))
1238 return -1; 1242 return -1;
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index b80e0aa3cfa5..5a59efa0bb46 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -50,7 +50,7 @@ static int __init init_dlm(void)
50 if (error) 50 if (error)
51 goto out_netlink; 51 goto out_netlink;
52 52
53 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__); 53 printk("DLM installed\n");
54 54
55 return 0; 55 return 0;
56 56
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 98b77c89494c..c00e055b6282 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -40,9 +40,12 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
40static void drop_slab(void) 40static void drop_slab(void)
41{ 41{
42 int nr_objects; 42 int nr_objects;
43 struct shrink_control shrink = {
44 .gfp_mask = GFP_KERNEL,
45 };
43 46
44 do { 47 do {
45 nr_objects = shrink_slab(1000, GFP_KERNEL, 1000); 48 nr_objects = shrink_slab(&shrink, 1000, 1000);
46 } while (nr_objects > 10); 49 } while (nr_objects > 10);
47} 50}
48 51
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 4d4cc6a90cd5..227b409b8406 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -521,6 +521,8 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
521 struct dentry *lower_dir_dentry; 521 struct dentry *lower_dir_dentry;
522 int rc; 522 int rc;
523 523
524 dentry_unhash(dentry);
525
524 lower_dentry = ecryptfs_dentry_to_lower(dentry); 526 lower_dentry = ecryptfs_dentry_to_lower(dentry);
525 dget(dentry); 527 dget(dentry);
526 lower_dir_dentry = lock_parent(lower_dentry); 528 lower_dir_dentry = lock_parent(lower_dentry);
@@ -571,6 +573,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
571 struct dentry *lower_new_dir_dentry; 573 struct dentry *lower_new_dir_dentry;
572 struct dentry *trap = NULL; 574 struct dentry *trap = NULL;
573 575
576 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
577 dentry_unhash(new_dentry);
578
574 lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); 579 lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
575 lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); 580 lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
576 dget(lower_old_dentry); 581 dget(lower_old_dentry);
diff --git a/fs/exec.c b/fs/exec.c
index c1cf372f17a7..ea5f748906a8 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -42,7 +42,6 @@
42#include <linux/pid_namespace.h> 42#include <linux/pid_namespace.h>
43#include <linux/module.h> 43#include <linux/module.h>
44#include <linux/namei.h> 44#include <linux/namei.h>
45#include <linux/proc_fs.h>
46#include <linux/mount.h> 45#include <linux/mount.h>
47#include <linux/security.h> 46#include <linux/security.h>
48#include <linux/syscalls.h> 47#include <linux/syscalls.h>
@@ -200,7 +199,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
200 199
201#ifdef CONFIG_STACK_GROWSUP 200#ifdef CONFIG_STACK_GROWSUP
202 if (write) { 201 if (write) {
203 ret = expand_stack_downwards(bprm->vma, pos); 202 ret = expand_downwards(bprm->vma, pos);
204 if (ret < 0) 203 if (ret < 0)
205 return NULL; 204 return NULL;
206 } 205 }
@@ -600,7 +599,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
600 unsigned long length = old_end - old_start; 599 unsigned long length = old_end - old_start;
601 unsigned long new_start = old_start - shift; 600 unsigned long new_start = old_start - shift;
602 unsigned long new_end = old_end - shift; 601 unsigned long new_end = old_end - shift;
603 struct mmu_gather *tlb; 602 struct mmu_gather tlb;
604 603
605 BUG_ON(new_start > new_end); 604 BUG_ON(new_start > new_end);
606 605
@@ -626,12 +625,12 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
626 return -ENOMEM; 625 return -ENOMEM;
627 626
628 lru_add_drain(); 627 lru_add_drain();
629 tlb = tlb_gather_mmu(mm, 0); 628 tlb_gather_mmu(&tlb, mm, 0);
630 if (new_end > old_start) { 629 if (new_end > old_start) {
631 /* 630 /*
632 * when the old and new regions overlap clear from new_end. 631 * when the old and new regions overlap clear from new_end.
633 */ 632 */
634 free_pgd_range(tlb, new_end, old_end, new_end, 633 free_pgd_range(&tlb, new_end, old_end, new_end,
635 vma->vm_next ? vma->vm_next->vm_start : 0); 634 vma->vm_next ? vma->vm_next->vm_start : 0);
636 } else { 635 } else {
637 /* 636 /*
@@ -640,10 +639,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
640 * have constraints on va-space that make this illegal (IA64) - 639 * have constraints on va-space that make this illegal (IA64) -
641 * for the others its just a little faster. 640 * for the others its just a little faster.
642 */ 641 */
643 free_pgd_range(tlb, old_start, old_end, new_end, 642 free_pgd_range(&tlb, old_start, old_end, new_end,
644 vma->vm_next ? vma->vm_next->vm_start : 0); 643 vma->vm_next ? vma->vm_next->vm_start : 0);
645 } 644 }
646 tlb_finish_mmu(tlb, new_end, old_end); 645 tlb_finish_mmu(&tlb, new_end, old_end);
647 646
648 /* 647 /*
649 * Shrink the vma to just the new range. Always succeeds. 648 * Shrink the vma to just the new range. Always succeeds.
@@ -1624,6 +1623,41 @@ expand_fail:
1624 return ret; 1623 return ret;
1625} 1624}
1626 1625
1626static int cn_print_exe_file(struct core_name *cn)
1627{
1628 struct file *exe_file;
1629 char *pathbuf, *path, *p;
1630 int ret;
1631
1632 exe_file = get_mm_exe_file(current->mm);
1633 if (!exe_file)
1634 return cn_printf(cn, "(unknown)");
1635
1636 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
1637 if (!pathbuf) {
1638 ret = -ENOMEM;
1639 goto put_exe_file;
1640 }
1641
1642 path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
1643 if (IS_ERR(path)) {
1644 ret = PTR_ERR(path);
1645 goto free_buf;
1646 }
1647
1648 for (p = path; *p; p++)
1649 if (*p == '/')
1650 *p = '!';
1651
1652 ret = cn_printf(cn, "%s", path);
1653
1654free_buf:
1655 kfree(pathbuf);
1656put_exe_file:
1657 fput(exe_file);
1658 return ret;
1659}
1660
1627/* format_corename will inspect the pattern parameter, and output a 1661/* format_corename will inspect the pattern parameter, and output a
1628 * name into corename, which must have space for at least 1662 * name into corename, which must have space for at least
1629 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 1663 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
@@ -1695,6 +1729,9 @@ static int format_corename(struct core_name *cn, long signr)
1695 case 'e': 1729 case 'e':
1696 err = cn_printf(cn, "%s", current->comm); 1730 err = cn_printf(cn, "%s", current->comm);
1697 break; 1731 break;
1732 case 'E':
1733 err = cn_print_exe_file(cn);
1734 break;
1698 /* core limit size */ 1735 /* core limit size */
1699 case 'c': 1736 case 'c':
1700 err = cn_printf(cn, "%lu", 1737 err = cn_printf(cn, "%lu",
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 3c6a9e0eadc1..aad153ef6b78 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -36,6 +36,7 @@
36#include <linux/quotaops.h> 36#include <linux/quotaops.h>
37#include <linux/seq_file.h> 37#include <linux/seq_file.h>
38#include <linux/log2.h> 38#include <linux/log2.h>
39#include <linux/cleancache.h>
39 40
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41 42
@@ -1367,6 +1368,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
1367 } else { 1368 } else {
1368 ext3_msg(sb, KERN_INFO, "using internal journal"); 1369 ext3_msg(sb, KERN_INFO, "using internal journal");
1369 } 1370 }
1371 cleancache_init_fs(sb);
1370 return res; 1372 return res;
1371} 1373}
1372 1374
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index c947e36eda6c..04109460ba9e 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -6,7 +6,8 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
6 6
7ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \ 7ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ 8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
9 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o 9 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
10 mmp.o
10 11
11ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o 12ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
12ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o 13ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 1c67139ad4b4..264f6949511e 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -362,130 +362,6 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
362} 362}
363 363
364/** 364/**
365 * ext4_add_groupblocks() -- Add given blocks to an existing group
366 * @handle: handle to this transaction
367 * @sb: super block
368 * @block: start physcial block to add to the block group
369 * @count: number of blocks to free
370 *
371 * This marks the blocks as free in the bitmap. We ask the
372 * mballoc to reload the buddy after this by setting group
373 * EXT4_GROUP_INFO_NEED_INIT_BIT flag
374 */
375void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
376 ext4_fsblk_t block, unsigned long count)
377{
378 struct buffer_head *bitmap_bh = NULL;
379 struct buffer_head *gd_bh;
380 ext4_group_t block_group;
381 ext4_grpblk_t bit;
382 unsigned int i;
383 struct ext4_group_desc *desc;
384 struct ext4_sb_info *sbi = EXT4_SB(sb);
385 int err = 0, ret, blk_free_count;
386 ext4_grpblk_t blocks_freed;
387 struct ext4_group_info *grp;
388
389 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
390
391 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
392 grp = ext4_get_group_info(sb, block_group);
393 /*
394 * Check to see if we are freeing blocks across a group
395 * boundary.
396 */
397 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
398 goto error_return;
399 }
400 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
401 if (!bitmap_bh)
402 goto error_return;
403 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
404 if (!desc)
405 goto error_return;
406
407 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
408 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
409 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
410 in_range(block + count - 1, ext4_inode_table(sb, desc),
411 sbi->s_itb_per_group)) {
412 ext4_error(sb, "Adding blocks in system zones - "
413 "Block = %llu, count = %lu",
414 block, count);
415 goto error_return;
416 }
417
418 /*
419 * We are about to add blocks to the bitmap,
420 * so we need undo access.
421 */
422 BUFFER_TRACE(bitmap_bh, "getting undo access");
423 err = ext4_journal_get_undo_access(handle, bitmap_bh);
424 if (err)
425 goto error_return;
426
427 /*
428 * We are about to modify some metadata. Call the journal APIs
429 * to unshare ->b_data if a currently-committing transaction is
430 * using it
431 */
432 BUFFER_TRACE(gd_bh, "get_write_access");
433 err = ext4_journal_get_write_access(handle, gd_bh);
434 if (err)
435 goto error_return;
436 /*
437 * make sure we don't allow a parallel init on other groups in the
438 * same buddy cache
439 */
440 down_write(&grp->alloc_sem);
441 for (i = 0, blocks_freed = 0; i < count; i++) {
442 BUFFER_TRACE(bitmap_bh, "clear bit");
443 if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
444 bit + i, bitmap_bh->b_data)) {
445 ext4_error(sb, "bit already cleared for block %llu",
446 (ext4_fsblk_t)(block + i));
447 BUFFER_TRACE(bitmap_bh, "bit already cleared");
448 } else {
449 blocks_freed++;
450 }
451 }
452 ext4_lock_group(sb, block_group);
453 blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
454 ext4_free_blks_set(sb, desc, blk_free_count);
455 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
456 ext4_unlock_group(sb, block_group);
457 percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
458
459 if (sbi->s_log_groups_per_flex) {
460 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
461 atomic_add(blocks_freed,
462 &sbi->s_flex_groups[flex_group].free_blocks);
463 }
464 /*
465 * request to reload the buddy with the
466 * new bitmap information
467 */
468 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
469 grp->bb_free += blocks_freed;
470 up_write(&grp->alloc_sem);
471
472 /* We dirtied the bitmap block */
473 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
474 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
475
476 /* And the group descriptor block */
477 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
478 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
479 if (!err)
480 err = ret;
481
482error_return:
483 brelse(bitmap_bh);
484 ext4_std_error(sb, err);
485 return;
486}
487
488/**
489 * ext4_has_free_blocks() 365 * ext4_has_free_blocks()
490 * @sbi: in-core super block structure. 366 * @sbi: in-core super block structure.
491 * @nblocks: number of needed blocks 367 * @nblocks: number of needed blocks
@@ -493,7 +369,8 @@ error_return:
493 * Check if filesystem has nblocks free & available for allocation. 369 * Check if filesystem has nblocks free & available for allocation.
494 * On success return 1, return 0 on failure. 370 * On success return 1, return 0 on failure.
495 */ 371 */
496static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) 372static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
373 s64 nblocks, unsigned int flags)
497{ 374{
498 s64 free_blocks, dirty_blocks, root_blocks; 375 s64 free_blocks, dirty_blocks, root_blocks;
499 struct percpu_counter *fbc = &sbi->s_freeblocks_counter; 376 struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
@@ -507,11 +384,6 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
507 EXT4_FREEBLOCKS_WATERMARK) { 384 EXT4_FREEBLOCKS_WATERMARK) {
508 free_blocks = percpu_counter_sum_positive(fbc); 385 free_blocks = percpu_counter_sum_positive(fbc);
509 dirty_blocks = percpu_counter_sum_positive(dbc); 386 dirty_blocks = percpu_counter_sum_positive(dbc);
510 if (dirty_blocks < 0) {
511 printk(KERN_CRIT "Dirty block accounting "
512 "went wrong %lld\n",
513 (long long)dirty_blocks);
514 }
515 } 387 }
516 /* Check whether we have space after 388 /* Check whether we have space after
517 * accounting for current dirty blocks & root reserved blocks. 389 * accounting for current dirty blocks & root reserved blocks.
@@ -522,7 +394,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
522 /* Hm, nope. Are (enough) root reserved blocks available? */ 394 /* Hm, nope. Are (enough) root reserved blocks available? */
523 if (sbi->s_resuid == current_fsuid() || 395 if (sbi->s_resuid == current_fsuid() ||
524 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || 396 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
525 capable(CAP_SYS_RESOURCE)) { 397 capable(CAP_SYS_RESOURCE) ||
398 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
399
526 if (free_blocks >= (nblocks + dirty_blocks)) 400 if (free_blocks >= (nblocks + dirty_blocks))
527 return 1; 401 return 1;
528 } 402 }
@@ -531,9 +405,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
531} 405}
532 406
533int ext4_claim_free_blocks(struct ext4_sb_info *sbi, 407int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
534 s64 nblocks) 408 s64 nblocks, unsigned int flags)
535{ 409{
536 if (ext4_has_free_blocks(sbi, nblocks)) { 410 if (ext4_has_free_blocks(sbi, nblocks, flags)) {
537 percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks); 411 percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
538 return 0; 412 return 0;
539 } else 413 } else
@@ -554,7 +428,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
554 */ 428 */
555int ext4_should_retry_alloc(struct super_block *sb, int *retries) 429int ext4_should_retry_alloc(struct super_block *sb, int *retries)
556{ 430{
557 if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || 431 if (!ext4_has_free_blocks(EXT4_SB(sb), 1, 0) ||
558 (*retries)++ > 3 || 432 (*retries)++ > 3 ||
559 !EXT4_SB(sb)->s_journal) 433 !EXT4_SB(sb)->s_journal)
560 return 0; 434 return 0;
@@ -577,7 +451,8 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
577 * error stores in errp pointer 451 * error stores in errp pointer
578 */ 452 */
579ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 453ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
580 ext4_fsblk_t goal, unsigned long *count, int *errp) 454 ext4_fsblk_t goal, unsigned int flags,
455 unsigned long *count, int *errp)
581{ 456{
582 struct ext4_allocation_request ar; 457 struct ext4_allocation_request ar;
583 ext4_fsblk_t ret; 458 ext4_fsblk_t ret;
@@ -587,6 +462,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
587 ar.inode = inode; 462 ar.inode = inode;
588 ar.goal = goal; 463 ar.goal = goal;
589 ar.len = count ? *count : 1; 464 ar.len = count ? *count : 1;
465 ar.flags = flags;
590 466
591 ret = ext4_mb_new_blocks(handle, &ar, errp); 467 ret = ext4_mb_new_blocks(handle, &ar, errp);
592 if (count) 468 if (count)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 4daaf2b753f4..a74b89c09f90 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -108,7 +108,8 @@ typedef unsigned int ext4_group_t;
108#define EXT4_MB_DELALLOC_RESERVED 0x0400 108#define EXT4_MB_DELALLOC_RESERVED 0x0400
109/* We are doing stream allocation */ 109/* We are doing stream allocation */
110#define EXT4_MB_STREAM_ALLOC 0x0800 110#define EXT4_MB_STREAM_ALLOC 0x0800
111 111/* Use reserved root blocks if needed */
112#define EXT4_MB_USE_ROOT_BLOCKS 0x1000
112 113
113struct ext4_allocation_request { 114struct ext4_allocation_request {
114 /* target inode for block we're allocating */ 115 /* target inode for block we're allocating */
@@ -209,6 +210,8 @@ struct ext4_io_submit {
209 */ 210 */
210#define EXT4_BAD_INO 1 /* Bad blocks inode */ 211#define EXT4_BAD_INO 1 /* Bad blocks inode */
211#define EXT4_ROOT_INO 2 /* Root inode */ 212#define EXT4_ROOT_INO 2 /* Root inode */
213#define EXT4_USR_QUOTA_INO 3 /* User quota inode */
214#define EXT4_GRP_QUOTA_INO 4 /* Group quota inode */
212#define EXT4_BOOT_LOADER_INO 5 /* Boot loader inode */ 215#define EXT4_BOOT_LOADER_INO 5 /* Boot loader inode */
213#define EXT4_UNDEL_DIR_INO 6 /* Undelete directory inode */ 216#define EXT4_UNDEL_DIR_INO 6 /* Undelete directory inode */
214#define EXT4_RESIZE_INO 7 /* Reserved group descriptors inode */ 217#define EXT4_RESIZE_INO 7 /* Reserved group descriptors inode */
@@ -512,6 +515,10 @@ struct ext4_new_group_data {
512 /* Convert extent to initialized after IO complete */ 515 /* Convert extent to initialized after IO complete */
513#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ 516#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
514 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) 517 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
518 /* Punch out blocks of an extent */
519#define EXT4_GET_BLOCKS_PUNCH_OUT_EXT 0x0020
520 /* Don't normalize allocation size (used for fallocate) */
521#define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040
515 522
516/* 523/*
517 * Flags used by ext4_free_blocks 524 * Flags used by ext4_free_blocks
@@ -1028,7 +1035,7 @@ struct ext4_super_block {
1028 __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ 1035 __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
1029 __le32 s_flags; /* Miscellaneous flags */ 1036 __le32 s_flags; /* Miscellaneous flags */
1030 __le16 s_raid_stride; /* RAID stride */ 1037 __le16 s_raid_stride; /* RAID stride */
1031 __le16 s_mmp_interval; /* # seconds to wait in MMP checking */ 1038 __le16 s_mmp_update_interval; /* # seconds to wait in MMP checking */
1032 __le64 s_mmp_block; /* Block for multi-mount protection */ 1039 __le64 s_mmp_block; /* Block for multi-mount protection */
1033 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ 1040 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
1034 __u8 s_log_groups_per_flex; /* FLEX_BG group size */ 1041 __u8 s_log_groups_per_flex; /* FLEX_BG group size */
@@ -1144,6 +1151,9 @@ struct ext4_sb_info {
1144 unsigned long s_ext_blocks; 1151 unsigned long s_ext_blocks;
1145 unsigned long s_ext_extents; 1152 unsigned long s_ext_extents;
1146#endif 1153#endif
1154 /* ext4 extent cache stats */
1155 unsigned long extent_cache_hits;
1156 unsigned long extent_cache_misses;
1147 1157
1148 /* for buddy allocator */ 1158 /* for buddy allocator */
1149 struct ext4_group_info ***s_group_info; 1159 struct ext4_group_info ***s_group_info;
@@ -1201,6 +1211,9 @@ struct ext4_sb_info {
1201 struct ext4_li_request *s_li_request; 1211 struct ext4_li_request *s_li_request;
1202 /* Wait multiplier for lazy initialization thread */ 1212 /* Wait multiplier for lazy initialization thread */
1203 unsigned int s_li_wait_mult; 1213 unsigned int s_li_wait_mult;
1214
1215 /* Kernel thread for multiple mount protection */
1216 struct task_struct *s_mmp_tsk;
1204}; 1217};
1205 1218
1206static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) 1219static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1338,6 +1351,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1338#define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 1351#define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
1339#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 1352#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
1340#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 1353#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
1354#define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100
1341 1355
1342#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 1356#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
1343#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 1357#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
@@ -1351,13 +1365,29 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1351#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */ 1365#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */
1352#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */ 1366#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */
1353 1367
1368#define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR
1369#define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
1370 EXT4_FEATURE_INCOMPAT_META_BG)
1371#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
1372 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
1373 EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
1374
1375#define EXT3_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR
1376#define EXT3_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
1377 EXT4_FEATURE_INCOMPAT_RECOVER| \
1378 EXT4_FEATURE_INCOMPAT_META_BG)
1379#define EXT3_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
1380 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
1381 EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
1382
1354#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR 1383#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
1355#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ 1384#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
1356 EXT4_FEATURE_INCOMPAT_RECOVER| \ 1385 EXT4_FEATURE_INCOMPAT_RECOVER| \
1357 EXT4_FEATURE_INCOMPAT_META_BG| \ 1386 EXT4_FEATURE_INCOMPAT_META_BG| \
1358 EXT4_FEATURE_INCOMPAT_EXTENTS| \ 1387 EXT4_FEATURE_INCOMPAT_EXTENTS| \
1359 EXT4_FEATURE_INCOMPAT_64BIT| \ 1388 EXT4_FEATURE_INCOMPAT_64BIT| \
1360 EXT4_FEATURE_INCOMPAT_FLEX_BG) 1389 EXT4_FEATURE_INCOMPAT_FLEX_BG| \
1390 EXT4_FEATURE_INCOMPAT_MMP)
1361#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ 1391#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
1362 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ 1392 EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
1363 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ 1393 EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
@@ -1590,12 +1620,6 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
1590 */ 1620 */
1591struct ext4_lazy_init { 1621struct ext4_lazy_init {
1592 unsigned long li_state; 1622 unsigned long li_state;
1593
1594 wait_queue_head_t li_wait_daemon;
1595 wait_queue_head_t li_wait_task;
1596 struct timer_list li_timer;
1597 struct task_struct *li_task;
1598
1599 struct list_head li_request_list; 1623 struct list_head li_request_list;
1600 struct mutex li_list_mtx; 1624 struct mutex li_list_mtx;
1601}; 1625};
@@ -1615,6 +1639,67 @@ struct ext4_features {
1615}; 1639};
1616 1640
1617/* 1641/*
1642 * This structure will be used for multiple mount protection. It will be
1643 * written into the block number saved in the s_mmp_block field in the
1644 * superblock. Programs that check MMP should assume that if
1645 * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
1646 * to use the filesystem, regardless of how old the timestamp is.
1647 */
1648#define EXT4_MMP_MAGIC 0x004D4D50U /* ASCII for MMP */
1649#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
1650#define EXT4_MMP_SEQ_FSCK 0xE24D4D50U /* mmp_seq value when being fscked */
1651#define EXT4_MMP_SEQ_MAX 0xE24D4D4FU /* maximum valid mmp_seq value */
1652
1653struct mmp_struct {
1654 __le32 mmp_magic; /* Magic number for MMP */
1655 __le32 mmp_seq; /* Sequence no. updated periodically */
1656
1657 /*
1658 * mmp_time, mmp_nodename & mmp_bdevname are only used for information
1659 * purposes and do not affect the correctness of the algorithm
1660 */
1661 __le64 mmp_time; /* Time last updated */
1662 char mmp_nodename[64]; /* Node which last updated MMP block */
1663 char mmp_bdevname[32]; /* Bdev which last updated MMP block */
1664
1665 /*
1666 * mmp_check_interval is used to verify if the MMP block has been
1667 * updated on the block device. The value is updated based on the
1668 * maximum time to write the MMP block during an update cycle.
1669 */
1670 __le16 mmp_check_interval;
1671
1672 __le16 mmp_pad1;
1673 __le32 mmp_pad2[227];
1674};
1675
1676/* arguments passed to the mmp thread */
1677struct mmpd_data {
1678 struct buffer_head *bh; /* bh from initial read_mmp_block() */
1679 struct super_block *sb; /* super block of the fs */
1680};
1681
1682/*
1683 * Check interval multiplier
1684 * The MMP block is written every update interval and initially checked every
1685 * update interval x the multiplier (the value is then adapted based on the
1686 * write latency). The reason is that writes can be delayed under load and we
1687 * don't want readers to incorrectly assume that the filesystem is no longer
1688 * in use.
1689 */
1690#define EXT4_MMP_CHECK_MULT 2UL
1691
1692/*
1693 * Minimum interval for MMP checking in seconds.
1694 */
1695#define EXT4_MMP_MIN_CHECK_INTERVAL 5UL
1696
1697/*
1698 * Maximum interval for MMP checking in seconds.
1699 */
1700#define EXT4_MMP_MAX_CHECK_INTERVAL 300UL
1701
1702/*
1618 * Function prototypes 1703 * Function prototypes
1619 */ 1704 */
1620 1705
@@ -1638,10 +1723,12 @@ extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group);
1638extern unsigned long ext4_bg_num_gdb(struct super_block *sb, 1723extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
1639 ext4_group_t group); 1724 ext4_group_t group);
1640extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, 1725extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
1641 ext4_fsblk_t goal, unsigned long *count, int *errp); 1726 ext4_fsblk_t goal,
1642extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks); 1727 unsigned int flags,
1643extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, 1728 unsigned long *count,
1644 ext4_fsblk_t block, unsigned long count); 1729 int *errp);
1730extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
1731 s64 nblocks, unsigned int flags);
1645extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *); 1732extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
1646extern void ext4_check_blocks_bitmap(struct super_block *); 1733extern void ext4_check_blocks_bitmap(struct super_block *);
1647extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, 1734extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
@@ -1706,6 +1793,8 @@ extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
1706 unsigned long count, int flags); 1793 unsigned long count, int flags);
1707extern int ext4_mb_add_groupinfo(struct super_block *sb, 1794extern int ext4_mb_add_groupinfo(struct super_block *sb,
1708 ext4_group_t i, struct ext4_group_desc *desc); 1795 ext4_group_t i, struct ext4_group_desc *desc);
1796extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
1797 ext4_fsblk_t block, unsigned long count);
1709extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); 1798extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
1710 1799
1711/* inode.c */ 1800/* inode.c */
@@ -1729,6 +1818,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int);
1729extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); 1818extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
1730extern int ext4_can_truncate(struct inode *inode); 1819extern int ext4_can_truncate(struct inode *inode);
1731extern void ext4_truncate(struct inode *); 1820extern void ext4_truncate(struct inode *);
1821extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
1732extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks); 1822extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
1733extern void ext4_set_inode_flags(struct inode *); 1823extern void ext4_set_inode_flags(struct inode *);
1734extern void ext4_get_inode_flags(struct ext4_inode_info *); 1824extern void ext4_get_inode_flags(struct ext4_inode_info *);
@@ -1738,6 +1828,8 @@ extern int ext4_writepage_trans_blocks(struct inode *);
1738extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); 1828extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
1739extern int ext4_block_truncate_page(handle_t *handle, 1829extern int ext4_block_truncate_page(handle_t *handle,
1740 struct address_space *mapping, loff_t from); 1830 struct address_space *mapping, loff_t from);
1831extern int ext4_block_zero_page_range(handle_t *handle,
1832 struct address_space *mapping, loff_t from, loff_t length);
1741extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 1833extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1742extern qsize_t *ext4_get_reserved_space(struct inode *inode); 1834extern qsize_t *ext4_get_reserved_space(struct inode *inode);
1743extern void ext4_da_update_reserve_space(struct inode *inode, 1835extern void ext4_da_update_reserve_space(struct inode *inode,
@@ -1788,6 +1880,10 @@ extern void __ext4_warning(struct super_block *, const char *, unsigned int,
1788 __LINE__, ## message) 1880 __LINE__, ## message)
1789extern void ext4_msg(struct super_block *, const char *, const char *, ...) 1881extern void ext4_msg(struct super_block *, const char *, const char *, ...)
1790 __attribute__ ((format (printf, 3, 4))); 1882 __attribute__ ((format (printf, 3, 4)));
1883extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
1884 const char *, unsigned int, const char *);
1885#define dump_mmp_msg(sb, mmp, msg) __dump_mmp_msg(sb, mmp, __func__, \
1886 __LINE__, msg)
1791extern void __ext4_grp_locked_error(const char *, unsigned int, \ 1887extern void __ext4_grp_locked_error(const char *, unsigned int, \
1792 struct super_block *, ext4_group_t, \ 1888 struct super_block *, ext4_group_t, \
1793 unsigned long, ext4_fsblk_t, \ 1889 unsigned long, ext4_fsblk_t, \
@@ -2064,6 +2160,8 @@ extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
2064extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 2160extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
2065 struct ext4_map_blocks *map, int flags); 2161 struct ext4_map_blocks *map, int flags);
2066extern void ext4_ext_truncate(struct inode *); 2162extern void ext4_ext_truncate(struct inode *);
2163extern int ext4_ext_punch_hole(struct file *file, loff_t offset,
2164 loff_t length);
2067extern void ext4_ext_init(struct super_block *); 2165extern void ext4_ext_init(struct super_block *);
2068extern void ext4_ext_release(struct super_block *); 2166extern void ext4_ext_release(struct super_block *);
2069extern long ext4_fallocate(struct file *file, int mode, loff_t offset, 2167extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
@@ -2092,6 +2190,9 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
2092 int len, 2190 int len,
2093 struct writeback_control *wbc); 2191 struct writeback_control *wbc);
2094 2192
2193/* mmp.c */
2194extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
2195
2095/* BH_Uninit flag: blocks are allocated but uninitialized on disk */ 2196/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
2096enum ext4_state_bits { 2197enum ext4_state_bits {
2097 BH_Uninit /* blocks are allocated but uninitialized on disk */ 2198 BH_Uninit /* blocks are allocated but uninitialized on disk */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 6e272ef6ba96..f5240aa15601 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -6,20 +6,6 @@
6 6
7#include <trace/events/ext4.h> 7#include <trace/events/ext4.h>
8 8
9int __ext4_journal_get_undo_access(const char *where, unsigned int line,
10 handle_t *handle, struct buffer_head *bh)
11{
12 int err = 0;
13
14 if (ext4_handle_valid(handle)) {
15 err = jbd2_journal_get_undo_access(handle, bh);
16 if (err)
17 ext4_journal_abort_handle(where, line, __func__, bh,
18 handle, err);
19 }
20 return err;
21}
22
23int __ext4_journal_get_write_access(const char *where, unsigned int line, 9int __ext4_journal_get_write_access(const char *where, unsigned int line,
24 handle_t *handle, struct buffer_head *bh) 10 handle_t *handle, struct buffer_head *bh)
25{ 11{
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index d0f53538a57f..bb85757689b6 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -126,9 +126,6 @@ void ext4_journal_abort_handle(const char *caller, unsigned int line,
126 const char *err_fn, 126 const char *err_fn,
127 struct buffer_head *bh, handle_t *handle, int err); 127 struct buffer_head *bh, handle_t *handle, int err);
128 128
129int __ext4_journal_get_undo_access(const char *where, unsigned int line,
130 handle_t *handle, struct buffer_head *bh);
131
132int __ext4_journal_get_write_access(const char *where, unsigned int line, 129int __ext4_journal_get_write_access(const char *where, unsigned int line,
133 handle_t *handle, struct buffer_head *bh); 130 handle_t *handle, struct buffer_head *bh);
134 131
@@ -146,8 +143,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
146int __ext4_handle_dirty_super(const char *where, unsigned int line, 143int __ext4_handle_dirty_super(const char *where, unsigned int line,
147 handle_t *handle, struct super_block *sb); 144 handle_t *handle, struct super_block *sb);
148 145
149#define ext4_journal_get_undo_access(handle, bh) \
150 __ext4_journal_get_undo_access(__func__, __LINE__, (handle), (bh))
151#define ext4_journal_get_write_access(handle, bh) \ 146#define ext4_journal_get_write_access(handle, bh) \
152 __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh)) 147 __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
153#define ext4_forget(handle, is_metadata, inode, bh, block_nr) \ 148#define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 4890d6f3ad15..5199bac7fc62 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -46,6 +46,13 @@
46 46
47#include <trace/events/ext4.h> 47#include <trace/events/ext4.h>
48 48
49static int ext4_split_extent(handle_t *handle,
50 struct inode *inode,
51 struct ext4_ext_path *path,
52 struct ext4_map_blocks *map,
53 int split_flag,
54 int flags);
55
49static int ext4_ext_truncate_extend_restart(handle_t *handle, 56static int ext4_ext_truncate_extend_restart(handle_t *handle,
50 struct inode *inode, 57 struct inode *inode,
51 int needed) 58 int needed)
@@ -192,12 +199,13 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
192static ext4_fsblk_t 199static ext4_fsblk_t
193ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 200ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
194 struct ext4_ext_path *path, 201 struct ext4_ext_path *path,
195 struct ext4_extent *ex, int *err) 202 struct ext4_extent *ex, int *err, unsigned int flags)
196{ 203{
197 ext4_fsblk_t goal, newblock; 204 ext4_fsblk_t goal, newblock;
198 205
199 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 206 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
200 newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err); 207 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
208 NULL, err);
201 return newblock; 209 return newblock;
202} 210}
203 211
@@ -474,9 +482,43 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
474 } 482 }
475 ext_debug("\n"); 483 ext_debug("\n");
476} 484}
485
486static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
487 ext4_fsblk_t newblock, int level)
488{
489 int depth = ext_depth(inode);
490 struct ext4_extent *ex;
491
492 if (depth != level) {
493 struct ext4_extent_idx *idx;
494 idx = path[level].p_idx;
495 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
496 ext_debug("%d: move %d:%llu in new index %llu\n", level,
497 le32_to_cpu(idx->ei_block),
498 ext4_idx_pblock(idx),
499 newblock);
500 idx++;
501 }
502
503 return;
504 }
505
506 ex = path[depth].p_ext;
507 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
508 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
509 le32_to_cpu(ex->ee_block),
510 ext4_ext_pblock(ex),
511 ext4_ext_is_uninitialized(ex),
512 ext4_ext_get_actual_len(ex),
513 newblock);
514 ex++;
515 }
516}
517
477#else 518#else
478#define ext4_ext_show_path(inode, path) 519#define ext4_ext_show_path(inode, path)
479#define ext4_ext_show_leaf(inode, path) 520#define ext4_ext_show_leaf(inode, path)
521#define ext4_ext_show_move(inode, path, newblock, level)
480#endif 522#endif
481 523
482void ext4_ext_drop_refs(struct ext4_ext_path *path) 524void ext4_ext_drop_refs(struct ext4_ext_path *path)
@@ -792,14 +834,14 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
792 * - initializes subtree 834 * - initializes subtree
793 */ 835 */
794static int ext4_ext_split(handle_t *handle, struct inode *inode, 836static int ext4_ext_split(handle_t *handle, struct inode *inode,
795 struct ext4_ext_path *path, 837 unsigned int flags,
796 struct ext4_extent *newext, int at) 838 struct ext4_ext_path *path,
839 struct ext4_extent *newext, int at)
797{ 840{
798 struct buffer_head *bh = NULL; 841 struct buffer_head *bh = NULL;
799 int depth = ext_depth(inode); 842 int depth = ext_depth(inode);
800 struct ext4_extent_header *neh; 843 struct ext4_extent_header *neh;
801 struct ext4_extent_idx *fidx; 844 struct ext4_extent_idx *fidx;
802 struct ext4_extent *ex;
803 int i = at, k, m, a; 845 int i = at, k, m, a;
804 ext4_fsblk_t newblock, oldblock; 846 ext4_fsblk_t newblock, oldblock;
805 __le32 border; 847 __le32 border;
@@ -847,7 +889,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
847 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 889 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
848 for (a = 0; a < depth - at; a++) { 890 for (a = 0; a < depth - at; a++) {
849 newblock = ext4_ext_new_meta_block(handle, inode, path, 891 newblock = ext4_ext_new_meta_block(handle, inode, path,
850 newext, &err); 892 newext, &err, flags);
851 if (newblock == 0) 893 if (newblock == 0)
852 goto cleanup; 894 goto cleanup;
853 ablocks[a] = newblock; 895 ablocks[a] = newblock;
@@ -876,7 +918,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
876 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 918 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
877 neh->eh_magic = EXT4_EXT_MAGIC; 919 neh->eh_magic = EXT4_EXT_MAGIC;
878 neh->eh_depth = 0; 920 neh->eh_depth = 0;
879 ex = EXT_FIRST_EXTENT(neh);
880 921
881 /* move remainder of path[depth] to the new leaf */ 922 /* move remainder of path[depth] to the new leaf */
882 if (unlikely(path[depth].p_hdr->eh_entries != 923 if (unlikely(path[depth].p_hdr->eh_entries !=
@@ -888,25 +929,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
888 goto cleanup; 929 goto cleanup;
889 } 930 }
890 /* start copy from next extent */ 931 /* start copy from next extent */
891 /* TODO: we could do it by single memmove */ 932 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
892 m = 0; 933 ext4_ext_show_move(inode, path, newblock, depth);
893 path[depth].p_ext++;
894 while (path[depth].p_ext <=
895 EXT_MAX_EXTENT(path[depth].p_hdr)) {
896 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
897 le32_to_cpu(path[depth].p_ext->ee_block),
898 ext4_ext_pblock(path[depth].p_ext),
899 ext4_ext_is_uninitialized(path[depth].p_ext),
900 ext4_ext_get_actual_len(path[depth].p_ext),
901 newblock);
902 /*memmove(ex++, path[depth].p_ext++,
903 sizeof(struct ext4_extent));
904 neh->eh_entries++;*/
905 path[depth].p_ext++;
906 m++;
907 }
908 if (m) { 934 if (m) {
909 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); 935 struct ext4_extent *ex;
936 ex = EXT_FIRST_EXTENT(neh);
937 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
910 le16_add_cpu(&neh->eh_entries, m); 938 le16_add_cpu(&neh->eh_entries, m);
911 } 939 }
912 940
@@ -968,12 +996,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
968 996
969 ext_debug("int.index at %d (block %llu): %u -> %llu\n", 997 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
970 i, newblock, le32_to_cpu(border), oldblock); 998 i, newblock, le32_to_cpu(border), oldblock);
971 /* copy indexes */
972 m = 0;
973 path[i].p_idx++;
974 999
975 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 1000 /* move remainder of path[i] to the new index block */
976 EXT_MAX_INDEX(path[i].p_hdr));
977 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1001 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
978 EXT_LAST_INDEX(path[i].p_hdr))) { 1002 EXT_LAST_INDEX(path[i].p_hdr))) {
979 EXT4_ERROR_INODE(inode, 1003 EXT4_ERROR_INODE(inode,
@@ -982,20 +1006,13 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
982 err = -EIO; 1006 err = -EIO;
983 goto cleanup; 1007 goto cleanup;
984 } 1008 }
985 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { 1009 /* start copy indexes */
986 ext_debug("%d: move %d:%llu in new index %llu\n", i, 1010 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
987 le32_to_cpu(path[i].p_idx->ei_block), 1011 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
988 ext4_idx_pblock(path[i].p_idx), 1012 EXT_MAX_INDEX(path[i].p_hdr));
989 newblock); 1013 ext4_ext_show_move(inode, path, newblock, i);
990 /*memmove(++fidx, path[i].p_idx++,
991 sizeof(struct ext4_extent_idx));
992 neh->eh_entries++;
993 BUG_ON(neh->eh_entries > neh->eh_max);*/
994 path[i].p_idx++;
995 m++;
996 }
997 if (m) { 1014 if (m) {
998 memmove(++fidx, path[i].p_idx - m, 1015 memmove(++fidx, path[i].p_idx,
999 sizeof(struct ext4_extent_idx) * m); 1016 sizeof(struct ext4_extent_idx) * m);
1000 le16_add_cpu(&neh->eh_entries, m); 1017 le16_add_cpu(&neh->eh_entries, m);
1001 } 1018 }
@@ -1056,8 +1073,9 @@ cleanup:
1056 * just created block 1073 * just created block
1057 */ 1074 */
1058static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1075static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1059 struct ext4_ext_path *path, 1076 unsigned int flags,
1060 struct ext4_extent *newext) 1077 struct ext4_ext_path *path,
1078 struct ext4_extent *newext)
1061{ 1079{
1062 struct ext4_ext_path *curp = path; 1080 struct ext4_ext_path *curp = path;
1063 struct ext4_extent_header *neh; 1081 struct ext4_extent_header *neh;
@@ -1065,7 +1083,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1065 ext4_fsblk_t newblock; 1083 ext4_fsblk_t newblock;
1066 int err = 0; 1084 int err = 0;
1067 1085
1068 newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err); 1086 newblock = ext4_ext_new_meta_block(handle, inode, path,
1087 newext, &err, flags);
1069 if (newblock == 0) 1088 if (newblock == 0)
1070 return err; 1089 return err;
1071 1090
@@ -1140,8 +1159,9 @@ out:
1140 * if no free index is found, then it requests in-depth growing. 1159 * if no free index is found, then it requests in-depth growing.
1141 */ 1160 */
1142static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1161static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1143 struct ext4_ext_path *path, 1162 unsigned int flags,
1144 struct ext4_extent *newext) 1163 struct ext4_ext_path *path,
1164 struct ext4_extent *newext)
1145{ 1165{
1146 struct ext4_ext_path *curp; 1166 struct ext4_ext_path *curp;
1147 int depth, i, err = 0; 1167 int depth, i, err = 0;
@@ -1161,7 +1181,7 @@ repeat:
1161 if (EXT_HAS_FREE_INDEX(curp)) { 1181 if (EXT_HAS_FREE_INDEX(curp)) {
1162 /* if we found index with free entry, then use that 1182 /* if we found index with free entry, then use that
1163 * entry: create all needed subtree and add new leaf */ 1183 * entry: create all needed subtree and add new leaf */
1164 err = ext4_ext_split(handle, inode, path, newext, i); 1184 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1165 if (err) 1185 if (err)
1166 goto out; 1186 goto out;
1167 1187
@@ -1174,7 +1194,8 @@ repeat:
1174 err = PTR_ERR(path); 1194 err = PTR_ERR(path);
1175 } else { 1195 } else {
1176 /* tree is full, time to grow in depth */ 1196 /* tree is full, time to grow in depth */
1177 err = ext4_ext_grow_indepth(handle, inode, path, newext); 1197 err = ext4_ext_grow_indepth(handle, inode, flags,
1198 path, newext);
1178 if (err) 1199 if (err)
1179 goto out; 1200 goto out;
1180 1201
@@ -1563,7 +1584,7 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1563 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 1584 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1564 * 1 if they got merged. 1585 * 1 if they got merged.
1565 */ 1586 */
1566static int ext4_ext_try_to_merge(struct inode *inode, 1587static int ext4_ext_try_to_merge_right(struct inode *inode,
1567 struct ext4_ext_path *path, 1588 struct ext4_ext_path *path,
1568 struct ext4_extent *ex) 1589 struct ext4_extent *ex)
1569{ 1590{
@@ -1603,6 +1624,31 @@ static int ext4_ext_try_to_merge(struct inode *inode,
1603} 1624}
1604 1625
1605/* 1626/*
1627 * This function tries to merge the @ex extent to neighbours in the tree.
1628 * return 1 if merge left else 0.
1629 */
1630static int ext4_ext_try_to_merge(struct inode *inode,
1631 struct ext4_ext_path *path,
1632 struct ext4_extent *ex) {
1633 struct ext4_extent_header *eh;
1634 unsigned int depth;
1635 int merge_done = 0;
1636 int ret = 0;
1637
1638 depth = ext_depth(inode);
1639 BUG_ON(path[depth].p_hdr == NULL);
1640 eh = path[depth].p_hdr;
1641
1642 if (ex > EXT_FIRST_EXTENT(eh))
1643 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1644
1645 if (!merge_done)
1646 ret = ext4_ext_try_to_merge_right(inode, path, ex);
1647
1648 return ret;
1649}
1650
1651/*
1606 * check if a portion of the "newext" extent overlaps with an 1652 * check if a portion of the "newext" extent overlaps with an
1607 * existing extent. 1653 * existing extent.
1608 * 1654 *
@@ -1668,6 +1714,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1668 int depth, len, err; 1714 int depth, len, err;
1669 ext4_lblk_t next; 1715 ext4_lblk_t next;
1670 unsigned uninitialized = 0; 1716 unsigned uninitialized = 0;
1717 int flags = 0;
1671 1718
1672 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1719 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1673 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1720 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
@@ -1742,7 +1789,9 @@ repeat:
1742 * There is no free space in the found leaf. 1789 * There is no free space in the found leaf.
1743 * We're gonna add a new leaf in the tree. 1790 * We're gonna add a new leaf in the tree.
1744 */ 1791 */
1745 err = ext4_ext_create_new_leaf(handle, inode, path, newext); 1792 if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1793 flags = EXT4_MB_USE_ROOT_BLOCKS;
1794 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1746 if (err) 1795 if (err)
1747 goto cleanup; 1796 goto cleanup;
1748 depth = ext_depth(inode); 1797 depth = ext_depth(inode);
@@ -2003,13 +2052,25 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2003} 2052}
2004 2053
2005/* 2054/*
2055 * ext4_ext_in_cache()
2056 * Checks to see if the given block is in the cache.
2057 * If it is, the cached extent is stored in the given
2058 * cache extent pointer. If the cached extent is a hole,
2059 * this routine should be used instead of
2060 * ext4_ext_in_cache if the calling function needs to
2061 * know the size of the hole.
2062 *
2063 * @inode: The files inode
2064 * @block: The block to look for in the cache
2065 * @ex: Pointer where the cached extent will be stored
2066 * if it contains block
2067 *
2006 * Return 0 if cache is invalid; 1 if the cache is valid 2068 * Return 0 if cache is invalid; 1 if the cache is valid
2007 */ 2069 */
2008static int 2070static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2009ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, 2071 struct ext4_ext_cache *ex){
2010 struct ext4_extent *ex)
2011{
2012 struct ext4_ext_cache *cex; 2072 struct ext4_ext_cache *cex;
2073 struct ext4_sb_info *sbi;
2013 int ret = 0; 2074 int ret = 0;
2014 2075
2015 /* 2076 /*
@@ -2017,26 +2078,60 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2017 */ 2078 */
2018 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2079 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2019 cex = &EXT4_I(inode)->i_cached_extent; 2080 cex = &EXT4_I(inode)->i_cached_extent;
2081 sbi = EXT4_SB(inode->i_sb);
2020 2082
2021 /* has cache valid data? */ 2083 /* has cache valid data? */
2022 if (cex->ec_len == 0) 2084 if (cex->ec_len == 0)
2023 goto errout; 2085 goto errout;
2024 2086
2025 if (in_range(block, cex->ec_block, cex->ec_len)) { 2087 if (in_range(block, cex->ec_block, cex->ec_len)) {
2026 ex->ee_block = cpu_to_le32(cex->ec_block); 2088 memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2027 ext4_ext_store_pblock(ex, cex->ec_start);
2028 ex->ee_len = cpu_to_le16(cex->ec_len);
2029 ext_debug("%u cached by %u:%u:%llu\n", 2089 ext_debug("%u cached by %u:%u:%llu\n",
2030 block, 2090 block,
2031 cex->ec_block, cex->ec_len, cex->ec_start); 2091 cex->ec_block, cex->ec_len, cex->ec_start);
2032 ret = 1; 2092 ret = 1;
2033 } 2093 }
2034errout: 2094errout:
2095 if (!ret)
2096 sbi->extent_cache_misses++;
2097 else
2098 sbi->extent_cache_hits++;
2035 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 2099 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2036 return ret; 2100 return ret;
2037} 2101}
2038 2102
2039/* 2103/*
2104 * ext4_ext_in_cache()
2105 * Checks to see if the given block is in the cache.
2106 * If it is, the cached extent is stored in the given
2107 * extent pointer.
2108 *
2109 * @inode: The files inode
2110 * @block: The block to look for in the cache
2111 * @ex: Pointer where the cached extent will be stored
2112 * if it contains block
2113 *
2114 * Return 0 if cache is invalid; 1 if the cache is valid
2115 */
2116static int
2117ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2118 struct ext4_extent *ex)
2119{
2120 struct ext4_ext_cache cex;
2121 int ret = 0;
2122
2123 if (ext4_ext_check_cache(inode, block, &cex)) {
2124 ex->ee_block = cpu_to_le32(cex.ec_block);
2125 ext4_ext_store_pblock(ex, cex.ec_start);
2126 ex->ee_len = cpu_to_le16(cex.ec_len);
2127 ret = 1;
2128 }
2129
2130 return ret;
2131}
2132
2133
2134/*
2040 * ext4_ext_rm_idx: 2135 * ext4_ext_rm_idx:
2041 * removes index from the index block. 2136 * removes index from the index block.
2042 * It's used in truncate case only, thus all requests are for 2137 * It's used in truncate case only, thus all requests are for
@@ -2163,8 +2258,16 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2163 ext4_free_blocks(handle, inode, NULL, start, num, flags); 2258 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2164 } else if (from == le32_to_cpu(ex->ee_block) 2259 } else if (from == le32_to_cpu(ex->ee_block)
2165 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2260 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2166 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n", 2261 /* head removal */
2167 from, to, le32_to_cpu(ex->ee_block), ee_len); 2262 ext4_lblk_t num;
2263 ext4_fsblk_t start;
2264
2265 num = to - from;
2266 start = ext4_ext_pblock(ex);
2267
2268 ext_debug("free first %u blocks starting %llu\n", num, start);
2269 ext4_free_blocks(handle, inode, 0, start, num, flags);
2270
2168 } else { 2271 } else {
2169 printk(KERN_INFO "strange request: removal(2) " 2272 printk(KERN_INFO "strange request: removal(2) "
2170 "%u-%u from %u:%u\n", 2273 "%u-%u from %u:%u\n",
@@ -2173,9 +2276,22 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2173 return 0; 2276 return 0;
2174} 2277}
2175 2278
2279
2280/*
2281 * ext4_ext_rm_leaf() Removes the extents associated with the
2282 * blocks appearing between "start" and "end", and splits the extents
2283 * if "start" and "end" appear in the same extent
2284 *
2285 * @handle: The journal handle
2286 * @inode: The files inode
2287 * @path: The path to the leaf
2288 * @start: The first block to remove
2289 * @end: The last block to remove
2290 */
2176static int 2291static int
2177ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2292ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2178 struct ext4_ext_path *path, ext4_lblk_t start) 2293 struct ext4_ext_path *path, ext4_lblk_t start,
2294 ext4_lblk_t end)
2179{ 2295{
2180 int err = 0, correct_index = 0; 2296 int err = 0, correct_index = 0;
2181 int depth = ext_depth(inode), credits; 2297 int depth = ext_depth(inode), credits;
@@ -2186,6 +2302,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2186 unsigned short ex_ee_len; 2302 unsigned short ex_ee_len;
2187 unsigned uninitialized = 0; 2303 unsigned uninitialized = 0;
2188 struct ext4_extent *ex; 2304 struct ext4_extent *ex;
2305 struct ext4_map_blocks map;
2189 2306
2190 /* the header must be checked already in ext4_ext_remove_space() */ 2307 /* the header must be checked already in ext4_ext_remove_space() */
2191 ext_debug("truncate since %u in leaf\n", start); 2308 ext_debug("truncate since %u in leaf\n", start);
@@ -2215,31 +2332,95 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2215 path[depth].p_ext = ex; 2332 path[depth].p_ext = ex;
2216 2333
2217 a = ex_ee_block > start ? ex_ee_block : start; 2334 a = ex_ee_block > start ? ex_ee_block : start;
2218 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ? 2335 b = ex_ee_block+ex_ee_len - 1 < end ?
2219 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK; 2336 ex_ee_block+ex_ee_len - 1 : end;
2220 2337
2221 ext_debug(" border %u:%u\n", a, b); 2338 ext_debug(" border %u:%u\n", a, b);
2222 2339
2223 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) { 2340 /* If this extent is beyond the end of the hole, skip it */
2224 block = 0; 2341 if (end <= ex_ee_block) {
2225 num = 0; 2342 ex--;
2226 BUG(); 2343 ex_ee_block = le32_to_cpu(ex->ee_block);
2344 ex_ee_len = ext4_ext_get_actual_len(ex);
2345 continue;
2346 } else if (a != ex_ee_block &&
2347 b != ex_ee_block + ex_ee_len - 1) {
2348 /*
2349 * If this is a truncate, then this condition should
2350 * never happen because at least one of the end points
2351 * needs to be on the edge of the extent.
2352 */
2353 if (end == EXT_MAX_BLOCK) {
2354 ext_debug(" bad truncate %u:%u\n",
2355 start, end);
2356 block = 0;
2357 num = 0;
2358 err = -EIO;
2359 goto out;
2360 }
2361 /*
2362 * else this is a hole punch, so the extent needs to
2363 * be split since neither edge of the hole is on the
2364 * extent edge
2365 */
2366 else{
2367 map.m_pblk = ext4_ext_pblock(ex);
2368 map.m_lblk = ex_ee_block;
2369 map.m_len = b - ex_ee_block;
2370
2371 err = ext4_split_extent(handle,
2372 inode, path, &map, 0,
2373 EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
2374 EXT4_GET_BLOCKS_PRE_IO);
2375
2376 if (err < 0)
2377 goto out;
2378
2379 ex_ee_len = ext4_ext_get_actual_len(ex);
2380
2381 b = ex_ee_block+ex_ee_len - 1 < end ?
2382 ex_ee_block+ex_ee_len - 1 : end;
2383
2384 /* Then remove tail of this extent */
2385 block = ex_ee_block;
2386 num = a - block;
2387 }
2227 } else if (a != ex_ee_block) { 2388 } else if (a != ex_ee_block) {
2228 /* remove tail of the extent */ 2389 /* remove tail of the extent */
2229 block = ex_ee_block; 2390 block = ex_ee_block;
2230 num = a - block; 2391 num = a - block;
2231 } else if (b != ex_ee_block + ex_ee_len - 1) { 2392 } else if (b != ex_ee_block + ex_ee_len - 1) {
2232 /* remove head of the extent */ 2393 /* remove head of the extent */
2233 block = a; 2394 block = b;
2234 num = b - a; 2395 num = ex_ee_block + ex_ee_len - b;
2235 /* there is no "make a hole" API yet */ 2396
2236 BUG(); 2397 /*
2398 * If this is a truncate, this condition
2399 * should never happen
2400 */
2401 if (end == EXT_MAX_BLOCK) {
2402 ext_debug(" bad truncate %u:%u\n",
2403 start, end);
2404 err = -EIO;
2405 goto out;
2406 }
2237 } else { 2407 } else {
2238 /* remove whole extent: excellent! */ 2408 /* remove whole extent: excellent! */
2239 block = ex_ee_block; 2409 block = ex_ee_block;
2240 num = 0; 2410 num = 0;
2241 BUG_ON(a != ex_ee_block); 2411 if (a != ex_ee_block) {
2242 BUG_ON(b != ex_ee_block + ex_ee_len - 1); 2412 ext_debug(" bad truncate %u:%u\n",
2413 start, end);
2414 err = -EIO;
2415 goto out;
2416 }
2417
2418 if (b != ex_ee_block + ex_ee_len - 1) {
2419 ext_debug(" bad truncate %u:%u\n",
2420 start, end);
2421 err = -EIO;
2422 goto out;
2423 }
2243 } 2424 }
2244 2425
2245 /* 2426 /*
@@ -2270,7 +2451,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2270 if (num == 0) { 2451 if (num == 0) {
2271 /* this extent is removed; mark slot entirely unused */ 2452 /* this extent is removed; mark slot entirely unused */
2272 ext4_ext_store_pblock(ex, 0); 2453 ext4_ext_store_pblock(ex, 0);
2273 le16_add_cpu(&eh->eh_entries, -1); 2454 } else if (block != ex_ee_block) {
2455 /*
2456 * If this was a head removal, then we need to update
2457 * the physical block since it is now at a different
2458 * location
2459 */
2460 ext4_ext_store_pblock(ex, ext4_ext_pblock(ex) + (b-a));
2274 } 2461 }
2275 2462
2276 ex->ee_block = cpu_to_le32(block); 2463 ex->ee_block = cpu_to_le32(block);
@@ -2286,6 +2473,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2286 if (err) 2473 if (err)
2287 goto out; 2474 goto out;
2288 2475
2476 /*
2477 * If the extent was completely released,
2478 * we need to remove it from the leaf
2479 */
2480 if (num == 0) {
2481 if (end != EXT_MAX_BLOCK) {
2482 /*
2483 * For hole punching, we need to scoot all the
2484 * extents up when an extent is removed so that
2485 * we dont have blank extents in the middle
2486 */
2487 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2488 sizeof(struct ext4_extent));
2489
2490 /* Now get rid of the one at the end */
2491 memset(EXT_LAST_EXTENT(eh), 0,
2492 sizeof(struct ext4_extent));
2493 }
2494 le16_add_cpu(&eh->eh_entries, -1);
2495 }
2496
2289 ext_debug("new extent: %u:%u:%llu\n", block, num, 2497 ext_debug("new extent: %u:%u:%llu\n", block, num,
2290 ext4_ext_pblock(ex)); 2498 ext4_ext_pblock(ex));
2291 ex--; 2499 ex--;
@@ -2326,7 +2534,8 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
2326 return 1; 2534 return 1;
2327} 2535}
2328 2536
2329static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) 2537static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2538 ext4_lblk_t end)
2330{ 2539{
2331 struct super_block *sb = inode->i_sb; 2540 struct super_block *sb = inode->i_sb;
2332 int depth = ext_depth(inode); 2541 int depth = ext_depth(inode);
@@ -2365,7 +2574,8 @@ again:
2365 while (i >= 0 && err == 0) { 2574 while (i >= 0 && err == 0) {
2366 if (i == depth) { 2575 if (i == depth) {
2367 /* this is leaf block */ 2576 /* this is leaf block */
2368 err = ext4_ext_rm_leaf(handle, inode, path, start); 2577 err = ext4_ext_rm_leaf(handle, inode, path,
2578 start, end);
2369 /* root level has p_bh == NULL, brelse() eats this */ 2579 /* root level has p_bh == NULL, brelse() eats this */
2370 brelse(path[i].p_bh); 2580 brelse(path[i].p_bh);
2371 path[i].p_bh = NULL; 2581 path[i].p_bh = NULL;
@@ -2529,6 +2739,195 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2529 return ret; 2739 return ret;
2530} 2740}
2531 2741
2742/*
2743 * used by extent splitting.
2744 */
2745#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
2746 due to ENOSPC */
2747#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
2748#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
2749
2750/*
2751 * ext4_split_extent_at() splits an extent at given block.
2752 *
2753 * @handle: the journal handle
2754 * @inode: the file inode
2755 * @path: the path to the extent
2756 * @split: the logical block where the extent is splitted.
2757 * @split_flags: indicates if the extent could be zeroout if split fails, and
2758 * the states(init or uninit) of new extents.
2759 * @flags: flags used to insert new extent to extent tree.
2760 *
2761 *
2762 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2763 * of which are deterimined by split_flag.
2764 *
2765 * There are two cases:
2766 * a> the extent are splitted into two extent.
2767 * b> split is not needed, and just mark the extent.
2768 *
2769 * return 0 on success.
2770 */
2771static int ext4_split_extent_at(handle_t *handle,
2772 struct inode *inode,
2773 struct ext4_ext_path *path,
2774 ext4_lblk_t split,
2775 int split_flag,
2776 int flags)
2777{
2778 ext4_fsblk_t newblock;
2779 ext4_lblk_t ee_block;
2780 struct ext4_extent *ex, newex, orig_ex;
2781 struct ext4_extent *ex2 = NULL;
2782 unsigned int ee_len, depth;
2783 int err = 0;
2784
2785 ext_debug("ext4_split_extents_at: inode %lu, logical"
2786 "block %llu\n", inode->i_ino, (unsigned long long)split);
2787
2788 ext4_ext_show_leaf(inode, path);
2789
2790 depth = ext_depth(inode);
2791 ex = path[depth].p_ext;
2792 ee_block = le32_to_cpu(ex->ee_block);
2793 ee_len = ext4_ext_get_actual_len(ex);
2794 newblock = split - ee_block + ext4_ext_pblock(ex);
2795
2796 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2797
2798 err = ext4_ext_get_access(handle, inode, path + depth);
2799 if (err)
2800 goto out;
2801
2802 if (split == ee_block) {
2803 /*
2804 * case b: block @split is the block that the extent begins with
2805 * then we just change the state of the extent, and splitting
2806 * is not needed.
2807 */
2808 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2809 ext4_ext_mark_uninitialized(ex);
2810 else
2811 ext4_ext_mark_initialized(ex);
2812
2813 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2814 ext4_ext_try_to_merge(inode, path, ex);
2815
2816 err = ext4_ext_dirty(handle, inode, path + depth);
2817 goto out;
2818 }
2819
2820 /* case a */
2821 memcpy(&orig_ex, ex, sizeof(orig_ex));
2822 ex->ee_len = cpu_to_le16(split - ee_block);
2823 if (split_flag & EXT4_EXT_MARK_UNINIT1)
2824 ext4_ext_mark_uninitialized(ex);
2825
2826 /*
2827 * path may lead to new leaf, not to original leaf any more
2828 * after ext4_ext_insert_extent() returns,
2829 */
2830 err = ext4_ext_dirty(handle, inode, path + depth);
2831 if (err)
2832 goto fix_extent_len;
2833
2834 ex2 = &newex;
2835 ex2->ee_block = cpu_to_le32(split);
2836 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
2837 ext4_ext_store_pblock(ex2, newblock);
2838 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2839 ext4_ext_mark_uninitialized(ex2);
2840
2841 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2842 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2843 err = ext4_ext_zeroout(inode, &orig_ex);
2844 if (err)
2845 goto fix_extent_len;
2846 /* update the extent length and mark as initialized */
2847 ex->ee_len = cpu_to_le32(ee_len);
2848 ext4_ext_try_to_merge(inode, path, ex);
2849 err = ext4_ext_dirty(handle, inode, path + depth);
2850 goto out;
2851 } else if (err)
2852 goto fix_extent_len;
2853
2854out:
2855 ext4_ext_show_leaf(inode, path);
2856 return err;
2857
2858fix_extent_len:
2859 ex->ee_len = orig_ex.ee_len;
2860 ext4_ext_dirty(handle, inode, path + depth);
2861 return err;
2862}
2863
2864/*
2865 * ext4_split_extents() splits an extent and mark extent which is covered
2866 * by @map as split_flags indicates
2867 *
2868 * It may result in splitting the extent into multiple extents (upto three)
2869 * There are three possibilities:
2870 * a> There is no split required
2871 * b> Splits in two extents: Split is happening at either end of the extent
2872 * c> Splits in three extents: Somone is splitting in middle of the extent
2873 *
2874 */
2875static int ext4_split_extent(handle_t *handle,
2876 struct inode *inode,
2877 struct ext4_ext_path *path,
2878 struct ext4_map_blocks *map,
2879 int split_flag,
2880 int flags)
2881{
2882 ext4_lblk_t ee_block;
2883 struct ext4_extent *ex;
2884 unsigned int ee_len, depth;
2885 int err = 0;
2886 int uninitialized;
2887 int split_flag1, flags1;
2888
2889 depth = ext_depth(inode);
2890 ex = path[depth].p_ext;
2891 ee_block = le32_to_cpu(ex->ee_block);
2892 ee_len = ext4_ext_get_actual_len(ex);
2893 uninitialized = ext4_ext_is_uninitialized(ex);
2894
2895 if (map->m_lblk + map->m_len < ee_block + ee_len) {
2896 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2897 EXT4_EXT_MAY_ZEROOUT : 0;
2898 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
2899 if (uninitialized)
2900 split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
2901 EXT4_EXT_MARK_UNINIT2;
2902 err = ext4_split_extent_at(handle, inode, path,
2903 map->m_lblk + map->m_len, split_flag1, flags1);
2904 if (err)
2905 goto out;
2906 }
2907
2908 ext4_ext_drop_refs(path);
2909 path = ext4_ext_find_extent(inode, map->m_lblk, path);
2910 if (IS_ERR(path))
2911 return PTR_ERR(path);
2912
2913 if (map->m_lblk >= ee_block) {
2914 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2915 EXT4_EXT_MAY_ZEROOUT : 0;
2916 if (uninitialized)
2917 split_flag1 |= EXT4_EXT_MARK_UNINIT1;
2918 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2919 split_flag1 |= EXT4_EXT_MARK_UNINIT2;
2920 err = ext4_split_extent_at(handle, inode, path,
2921 map->m_lblk, split_flag1, flags);
2922 if (err)
2923 goto out;
2924 }
2925
2926 ext4_ext_show_leaf(inode, path);
2927out:
2928 return err ? err : map->m_len;
2929}
2930
2532#define EXT4_EXT_ZERO_LEN 7 2931#define EXT4_EXT_ZERO_LEN 7
2533/* 2932/*
2534 * This function is called by ext4_ext_map_blocks() if someone tries to write 2933 * This function is called by ext4_ext_map_blocks() if someone tries to write
@@ -2545,17 +2944,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2545 struct ext4_map_blocks *map, 2944 struct ext4_map_blocks *map,
2546 struct ext4_ext_path *path) 2945 struct ext4_ext_path *path)
2547{ 2946{
2548 struct ext4_extent *ex, newex, orig_ex; 2947 struct ext4_map_blocks split_map;
2549 struct ext4_extent *ex1 = NULL; 2948 struct ext4_extent zero_ex;
2550 struct ext4_extent *ex2 = NULL; 2949 struct ext4_extent *ex;
2551 struct ext4_extent *ex3 = NULL;
2552 struct ext4_extent_header *eh;
2553 ext4_lblk_t ee_block, eof_block; 2950 ext4_lblk_t ee_block, eof_block;
2554 unsigned int allocated, ee_len, depth; 2951 unsigned int allocated, ee_len, depth;
2555 ext4_fsblk_t newblock;
2556 int err = 0; 2952 int err = 0;
2557 int ret = 0; 2953 int split_flag = 0;
2558 int may_zeroout;
2559 2954
2560 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 2955 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2561 "block %llu, max_blocks %u\n", inode->i_ino, 2956 "block %llu, max_blocks %u\n", inode->i_ino,
@@ -2567,280 +2962,86 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2567 eof_block = map->m_lblk + map->m_len; 2962 eof_block = map->m_lblk + map->m_len;
2568 2963
2569 depth = ext_depth(inode); 2964 depth = ext_depth(inode);
2570 eh = path[depth].p_hdr;
2571 ex = path[depth].p_ext; 2965 ex = path[depth].p_ext;
2572 ee_block = le32_to_cpu(ex->ee_block); 2966 ee_block = le32_to_cpu(ex->ee_block);
2573 ee_len = ext4_ext_get_actual_len(ex); 2967 ee_len = ext4_ext_get_actual_len(ex);
2574 allocated = ee_len - (map->m_lblk - ee_block); 2968 allocated = ee_len - (map->m_lblk - ee_block);
2575 newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
2576
2577 ex2 = ex;
2578 orig_ex.ee_block = ex->ee_block;
2579 orig_ex.ee_len = cpu_to_le16(ee_len);
2580 ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
2581 2969
2970 WARN_ON(map->m_lblk < ee_block);
2582 /* 2971 /*
2583 * It is safe to convert extent to initialized via explicit 2972 * It is safe to convert extent to initialized via explicit
2584 * zeroout only if extent is fully insde i_size or new_size. 2973 * zeroout only if extent is fully insde i_size or new_size.
2585 */ 2974 */
2586 may_zeroout = ee_block + ee_len <= eof_block; 2975 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
2587 2976
2588 err = ext4_ext_get_access(handle, inode, path + depth);
2589 if (err)
2590 goto out;
2591 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ 2977 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2592 if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) { 2978 if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
2593 err = ext4_ext_zeroout(inode, &orig_ex); 2979 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2980 err = ext4_ext_zeroout(inode, ex);
2594 if (err) 2981 if (err)
2595 goto fix_extent_len;
2596 /* update the extent length and mark as initialized */
2597 ex->ee_block = orig_ex.ee_block;
2598 ex->ee_len = orig_ex.ee_len;
2599 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2600 ext4_ext_dirty(handle, inode, path + depth);
2601 /* zeroed the full extent */
2602 return allocated;
2603 }
2604
2605 /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
2606 if (map->m_lblk > ee_block) {
2607 ex1 = ex;
2608 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2609 ext4_ext_mark_uninitialized(ex1);
2610 ex2 = &newex;
2611 }
2612 /*
2613 * for sanity, update the length of the ex2 extent before
2614 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2615 * overlap of blocks.
2616 */
2617 if (!ex1 && allocated > map->m_len)
2618 ex2->ee_len = cpu_to_le16(map->m_len);
2619 /* ex3: to ee_block + ee_len : uninitialised */
2620 if (allocated > map->m_len) {
2621 unsigned int newdepth;
2622 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2623 if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
2624 /*
2625 * map->m_lblk == ee_block is handled by the zerouout
2626 * at the beginning.
2627 * Mark first half uninitialized.
2628 * Mark second half initialized and zero out the
2629 * initialized extent
2630 */
2631 ex->ee_block = orig_ex.ee_block;
2632 ex->ee_len = cpu_to_le16(ee_len - allocated);
2633 ext4_ext_mark_uninitialized(ex);
2634 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2635 ext4_ext_dirty(handle, inode, path + depth);
2636
2637 ex3 = &newex;
2638 ex3->ee_block = cpu_to_le32(map->m_lblk);
2639 ext4_ext_store_pblock(ex3, newblock);
2640 ex3->ee_len = cpu_to_le16(allocated);
2641 err = ext4_ext_insert_extent(handle, inode, path,
2642 ex3, 0);
2643 if (err == -ENOSPC) {
2644 err = ext4_ext_zeroout(inode, &orig_ex);
2645 if (err)
2646 goto fix_extent_len;
2647 ex->ee_block = orig_ex.ee_block;
2648 ex->ee_len = orig_ex.ee_len;
2649 ext4_ext_store_pblock(ex,
2650 ext4_ext_pblock(&orig_ex));
2651 ext4_ext_dirty(handle, inode, path + depth);
2652 /* blocks available from map->m_lblk */
2653 return allocated;
2654
2655 } else if (err)
2656 goto fix_extent_len;
2657
2658 /*
2659 * We need to zero out the second half because
2660 * an fallocate request can update file size and
2661 * converting the second half to initialized extent
2662 * implies that we can leak some junk data to user
2663 * space.
2664 */
2665 err = ext4_ext_zeroout(inode, ex3);
2666 if (err) {
2667 /*
2668 * We should actually mark the
2669 * second half as uninit and return error
2670 * Insert would have changed the extent
2671 */
2672 depth = ext_depth(inode);
2673 ext4_ext_drop_refs(path);
2674 path = ext4_ext_find_extent(inode, map->m_lblk,
2675 path);
2676 if (IS_ERR(path)) {
2677 err = PTR_ERR(path);
2678 return err;
2679 }
2680 /* get the second half extent details */
2681 ex = path[depth].p_ext;
2682 err = ext4_ext_get_access(handle, inode,
2683 path + depth);
2684 if (err)
2685 return err;
2686 ext4_ext_mark_uninitialized(ex);
2687 ext4_ext_dirty(handle, inode, path + depth);
2688 return err;
2689 }
2690
2691 /* zeroed the second half */
2692 return allocated;
2693 }
2694 ex3 = &newex;
2695 ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
2696 ext4_ext_store_pblock(ex3, newblock + map->m_len);
2697 ex3->ee_len = cpu_to_le16(allocated - map->m_len);
2698 ext4_ext_mark_uninitialized(ex3);
2699 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2700 if (err == -ENOSPC && may_zeroout) {
2701 err = ext4_ext_zeroout(inode, &orig_ex);
2702 if (err)
2703 goto fix_extent_len;
2704 /* update the extent length and mark as initialized */
2705 ex->ee_block = orig_ex.ee_block;
2706 ex->ee_len = orig_ex.ee_len;
2707 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2708 ext4_ext_dirty(handle, inode, path + depth);
2709 /* zeroed the full extent */
2710 /* blocks available from map->m_lblk */
2711 return allocated;
2712
2713 } else if (err)
2714 goto fix_extent_len;
2715 /*
2716 * The depth, and hence eh & ex might change
2717 * as part of the insert above.
2718 */
2719 newdepth = ext_depth(inode);
2720 /*
2721 * update the extent length after successful insert of the
2722 * split extent
2723 */
2724 ee_len -= ext4_ext_get_actual_len(ex3);
2725 orig_ex.ee_len = cpu_to_le16(ee_len);
2726 may_zeroout = ee_block + ee_len <= eof_block;
2727
2728 depth = newdepth;
2729 ext4_ext_drop_refs(path);
2730 path = ext4_ext_find_extent(inode, map->m_lblk, path);
2731 if (IS_ERR(path)) {
2732 err = PTR_ERR(path);
2733 goto out; 2982 goto out;
2734 }
2735 eh = path[depth].p_hdr;
2736 ex = path[depth].p_ext;
2737 if (ex2 != &newex)
2738 ex2 = ex;
2739 2983
2740 err = ext4_ext_get_access(handle, inode, path + depth); 2984 err = ext4_ext_get_access(handle, inode, path + depth);
2741 if (err) 2985 if (err)
2742 goto out; 2986 goto out;
2743 2987 ext4_ext_mark_initialized(ex);
2744 allocated = map->m_len; 2988 ext4_ext_try_to_merge(inode, path, ex);
2745 2989 err = ext4_ext_dirty(handle, inode, path + depth);
2746 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying 2990 goto out;
2747 * to insert a extent in the middle zerout directly
2748 * otherwise give the extent a chance to merge to left
2749 */
2750 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2751 map->m_lblk != ee_block && may_zeroout) {
2752 err = ext4_ext_zeroout(inode, &orig_ex);
2753 if (err)
2754 goto fix_extent_len;
2755 /* update the extent length and mark as initialized */
2756 ex->ee_block = orig_ex.ee_block;
2757 ex->ee_len = orig_ex.ee_len;
2758 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2759 ext4_ext_dirty(handle, inode, path + depth);
2760 /* zero out the first half */
2761 /* blocks available from map->m_lblk */
2762 return allocated;
2763 }
2764 }
2765 /*
2766 * If there was a change of depth as part of the
2767 * insertion of ex3 above, we need to update the length
2768 * of the ex1 extent again here
2769 */
2770 if (ex1 && ex1 != ex) {
2771 ex1 = ex;
2772 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2773 ext4_ext_mark_uninitialized(ex1);
2774 ex2 = &newex;
2775 }
2776 /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
2777 ex2->ee_block = cpu_to_le32(map->m_lblk);
2778 ext4_ext_store_pblock(ex2, newblock);
2779 ex2->ee_len = cpu_to_le16(allocated);
2780 if (ex2 != ex)
2781 goto insert;
2782 /*
2783 * New (initialized) extent starts from the first block
2784 * in the current extent. i.e., ex2 == ex
2785 * We have to see if it can be merged with the extent
2786 * on the left.
2787 */
2788 if (ex2 > EXT_FIRST_EXTENT(eh)) {
2789 /*
2790 * To merge left, pass "ex2 - 1" to try_to_merge(),
2791 * since it merges towards right _only_.
2792 */
2793 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2794 if (ret) {
2795 err = ext4_ext_correct_indexes(handle, inode, path);
2796 if (err)
2797 goto out;
2798 depth = ext_depth(inode);
2799 ex2--;
2800 }
2801 } 2991 }
2992
2802 /* 2993 /*
2803 * Try to Merge towards right. This might be required 2994 * four cases:
2804 * only when the whole extent is being written to. 2995 * 1. split the extent into three extents.
2805 * i.e. ex2 == ex and ex3 == NULL. 2996 * 2. split the extent into two extents, zeroout the first half.
2997 * 3. split the extent into two extents, zeroout the second half.
2998 * 4. split the extent into two extents with out zeroout.
2806 */ 2999 */
2807 if (!ex3) { 3000 split_map.m_lblk = map->m_lblk;
2808 ret = ext4_ext_try_to_merge(inode, path, ex2); 3001 split_map.m_len = map->m_len;
2809 if (ret) { 3002
2810 err = ext4_ext_correct_indexes(handle, inode, path); 3003 if (allocated > map->m_len) {
3004 if (allocated <= EXT4_EXT_ZERO_LEN &&
3005 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3006 /* case 3 */
3007 zero_ex.ee_block =
3008 cpu_to_le32(map->m_lblk);
3009 zero_ex.ee_len = cpu_to_le16(allocated);
3010 ext4_ext_store_pblock(&zero_ex,
3011 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3012 err = ext4_ext_zeroout(inode, &zero_ex);
2811 if (err) 3013 if (err)
2812 goto out; 3014 goto out;
3015 split_map.m_lblk = map->m_lblk;
3016 split_map.m_len = allocated;
3017 } else if ((map->m_lblk - ee_block + map->m_len <
3018 EXT4_EXT_ZERO_LEN) &&
3019 (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3020 /* case 2 */
3021 if (map->m_lblk != ee_block) {
3022 zero_ex.ee_block = ex->ee_block;
3023 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3024 ee_block);
3025 ext4_ext_store_pblock(&zero_ex,
3026 ext4_ext_pblock(ex));
3027 err = ext4_ext_zeroout(inode, &zero_ex);
3028 if (err)
3029 goto out;
3030 }
3031
3032 split_map.m_lblk = ee_block;
3033 split_map.m_len = map->m_lblk - ee_block + map->m_len;
3034 allocated = map->m_len;
2813 } 3035 }
2814 } 3036 }
2815 /* Mark modified extent as dirty */ 3037
2816 err = ext4_ext_dirty(handle, inode, path + depth); 3038 allocated = ext4_split_extent(handle, inode, path,
2817 goto out; 3039 &split_map, split_flag, 0);
2818insert: 3040 if (allocated < 0)
2819 err = ext4_ext_insert_extent(handle, inode, path, &newex, 0); 3041 err = allocated;
2820 if (err == -ENOSPC && may_zeroout) { 3042
2821 err = ext4_ext_zeroout(inode, &orig_ex);
2822 if (err)
2823 goto fix_extent_len;
2824 /* update the extent length and mark as initialized */
2825 ex->ee_block = orig_ex.ee_block;
2826 ex->ee_len = orig_ex.ee_len;
2827 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2828 ext4_ext_dirty(handle, inode, path + depth);
2829 /* zero out the first half */
2830 return allocated;
2831 } else if (err)
2832 goto fix_extent_len;
2833out: 3043out:
2834 ext4_ext_show_leaf(inode, path);
2835 return err ? err : allocated; 3044 return err ? err : allocated;
2836
2837fix_extent_len:
2838 ex->ee_block = orig_ex.ee_block;
2839 ex->ee_len = orig_ex.ee_len;
2840 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2841 ext4_ext_mark_uninitialized(ex);
2842 ext4_ext_dirty(handle, inode, path + depth);
2843 return err;
2844} 3045}
2845 3046
2846/* 3047/*
@@ -2871,15 +3072,11 @@ static int ext4_split_unwritten_extents(handle_t *handle,
2871 struct ext4_ext_path *path, 3072 struct ext4_ext_path *path,
2872 int flags) 3073 int flags)
2873{ 3074{
2874 struct ext4_extent *ex, newex, orig_ex; 3075 ext4_lblk_t eof_block;
2875 struct ext4_extent *ex1 = NULL; 3076 ext4_lblk_t ee_block;
2876 struct ext4_extent *ex2 = NULL; 3077 struct ext4_extent *ex;
2877 struct ext4_extent *ex3 = NULL; 3078 unsigned int ee_len;
2878 ext4_lblk_t ee_block, eof_block; 3079 int split_flag = 0, depth;
2879 unsigned int allocated, ee_len, depth;
2880 ext4_fsblk_t newblock;
2881 int err = 0;
2882 int may_zeroout;
2883 3080
2884 ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 3081 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
2885 "block %llu, max_blocks %u\n", inode->i_ino, 3082 "block %llu, max_blocks %u\n", inode->i_ino,
@@ -2889,156 +3086,22 @@ static int ext4_split_unwritten_extents(handle_t *handle,
2889 inode->i_sb->s_blocksize_bits; 3086 inode->i_sb->s_blocksize_bits;
2890 if (eof_block < map->m_lblk + map->m_len) 3087 if (eof_block < map->m_lblk + map->m_len)
2891 eof_block = map->m_lblk + map->m_len; 3088 eof_block = map->m_lblk + map->m_len;
2892
2893 depth = ext_depth(inode);
2894 ex = path[depth].p_ext;
2895 ee_block = le32_to_cpu(ex->ee_block);
2896 ee_len = ext4_ext_get_actual_len(ex);
2897 allocated = ee_len - (map->m_lblk - ee_block);
2898 newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
2899
2900 ex2 = ex;
2901 orig_ex.ee_block = ex->ee_block;
2902 orig_ex.ee_len = cpu_to_le16(ee_len);
2903 ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
2904
2905 /* 3089 /*
2906 * It is safe to convert extent to initialized via explicit 3090 * It is safe to convert extent to initialized via explicit
2907 * zeroout only if extent is fully insde i_size or new_size. 3091 * zeroout only if extent is fully insde i_size or new_size.
2908 */ 3092 */
2909 may_zeroout = ee_block + ee_len <= eof_block; 3093 depth = ext_depth(inode);
2910 3094 ex = path[depth].p_ext;
2911 /* 3095 ee_block = le32_to_cpu(ex->ee_block);
2912 * If the uninitialized extent begins at the same logical 3096 ee_len = ext4_ext_get_actual_len(ex);
2913 * block where the write begins, and the write completely
2914 * covers the extent, then we don't need to split it.
2915 */
2916 if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
2917 return allocated;
2918
2919 err = ext4_ext_get_access(handle, inode, path + depth);
2920 if (err)
2921 goto out;
2922 /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
2923 if (map->m_lblk > ee_block) {
2924 ex1 = ex;
2925 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2926 ext4_ext_mark_uninitialized(ex1);
2927 ex2 = &newex;
2928 }
2929 /*
2930 * for sanity, update the length of the ex2 extent before
2931 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2932 * overlap of blocks.
2933 */
2934 if (!ex1 && allocated > map->m_len)
2935 ex2->ee_len = cpu_to_le16(map->m_len);
2936 /* ex3: to ee_block + ee_len : uninitialised */
2937 if (allocated > map->m_len) {
2938 unsigned int newdepth;
2939 ex3 = &newex;
2940 ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
2941 ext4_ext_store_pblock(ex3, newblock + map->m_len);
2942 ex3->ee_len = cpu_to_le16(allocated - map->m_len);
2943 ext4_ext_mark_uninitialized(ex3);
2944 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
2945 if (err == -ENOSPC && may_zeroout) {
2946 err = ext4_ext_zeroout(inode, &orig_ex);
2947 if (err)
2948 goto fix_extent_len;
2949 /* update the extent length and mark as initialized */
2950 ex->ee_block = orig_ex.ee_block;
2951 ex->ee_len = orig_ex.ee_len;
2952 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2953 ext4_ext_dirty(handle, inode, path + depth);
2954 /* zeroed the full extent */
2955 /* blocks available from map->m_lblk */
2956 return allocated;
2957
2958 } else if (err)
2959 goto fix_extent_len;
2960 /*
2961 * The depth, and hence eh & ex might change
2962 * as part of the insert above.
2963 */
2964 newdepth = ext_depth(inode);
2965 /*
2966 * update the extent length after successful insert of the
2967 * split extent
2968 */
2969 ee_len -= ext4_ext_get_actual_len(ex3);
2970 orig_ex.ee_len = cpu_to_le16(ee_len);
2971 may_zeroout = ee_block + ee_len <= eof_block;
2972
2973 depth = newdepth;
2974 ext4_ext_drop_refs(path);
2975 path = ext4_ext_find_extent(inode, map->m_lblk, path);
2976 if (IS_ERR(path)) {
2977 err = PTR_ERR(path);
2978 goto out;
2979 }
2980 ex = path[depth].p_ext;
2981 if (ex2 != &newex)
2982 ex2 = ex;
2983 3097
2984 err = ext4_ext_get_access(handle, inode, path + depth); 3098 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
2985 if (err) 3099 split_flag |= EXT4_EXT_MARK_UNINIT2;
2986 goto out;
2987 3100
2988 allocated = map->m_len; 3101 flags |= EXT4_GET_BLOCKS_PRE_IO;
2989 } 3102 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
2990 /*
2991 * If there was a change of depth as part of the
2992 * insertion of ex3 above, we need to update the length
2993 * of the ex1 extent again here
2994 */
2995 if (ex1 && ex1 != ex) {
2996 ex1 = ex;
2997 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2998 ext4_ext_mark_uninitialized(ex1);
2999 ex2 = &newex;
3000 }
3001 /*
3002 * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
3003 * using direct I/O, uninitialised still.
3004 */
3005 ex2->ee_block = cpu_to_le32(map->m_lblk);
3006 ext4_ext_store_pblock(ex2, newblock);
3007 ex2->ee_len = cpu_to_le16(allocated);
3008 ext4_ext_mark_uninitialized(ex2);
3009 if (ex2 != ex)
3010 goto insert;
3011 /* Mark modified extent as dirty */
3012 err = ext4_ext_dirty(handle, inode, path + depth);
3013 ext_debug("out here\n");
3014 goto out;
3015insert:
3016 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3017 if (err == -ENOSPC && may_zeroout) {
3018 err = ext4_ext_zeroout(inode, &orig_ex);
3019 if (err)
3020 goto fix_extent_len;
3021 /* update the extent length and mark as initialized */
3022 ex->ee_block = orig_ex.ee_block;
3023 ex->ee_len = orig_ex.ee_len;
3024 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
3025 ext4_ext_dirty(handle, inode, path + depth);
3026 /* zero out the first half */
3027 return allocated;
3028 } else if (err)
3029 goto fix_extent_len;
3030out:
3031 ext4_ext_show_leaf(inode, path);
3032 return err ? err : allocated;
3033
3034fix_extent_len:
3035 ex->ee_block = orig_ex.ee_block;
3036 ex->ee_len = orig_ex.ee_len;
3037 ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
3038 ext4_ext_mark_uninitialized(ex);
3039 ext4_ext_dirty(handle, inode, path + depth);
3040 return err;
3041} 3103}
3104
3042static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3105static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3043 struct inode *inode, 3106 struct inode *inode,
3044 struct ext4_ext_path *path) 3107 struct ext4_ext_path *path)
@@ -3047,46 +3110,27 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3047 struct ext4_extent_header *eh; 3110 struct ext4_extent_header *eh;
3048 int depth; 3111 int depth;
3049 int err = 0; 3112 int err = 0;
3050 int ret = 0;
3051 3113
3052 depth = ext_depth(inode); 3114 depth = ext_depth(inode);
3053 eh = path[depth].p_hdr; 3115 eh = path[depth].p_hdr;
3054 ex = path[depth].p_ext; 3116 ex = path[depth].p_ext;
3055 3117
3118 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3119 "block %llu, max_blocks %u\n", inode->i_ino,
3120 (unsigned long long)le32_to_cpu(ex->ee_block),
3121 ext4_ext_get_actual_len(ex));
3122
3056 err = ext4_ext_get_access(handle, inode, path + depth); 3123 err = ext4_ext_get_access(handle, inode, path + depth);
3057 if (err) 3124 if (err)
3058 goto out; 3125 goto out;
3059 /* first mark the extent as initialized */ 3126 /* first mark the extent as initialized */
3060 ext4_ext_mark_initialized(ex); 3127 ext4_ext_mark_initialized(ex);
3061 3128
3062 /* 3129 /* note: ext4_ext_correct_indexes() isn't needed here because
3063 * We have to see if it can be merged with the extent 3130 * borders are not changed
3064 * on the left.
3065 */
3066 if (ex > EXT_FIRST_EXTENT(eh)) {
3067 /*
3068 * To merge left, pass "ex - 1" to try_to_merge(),
3069 * since it merges towards right _only_.
3070 */
3071 ret = ext4_ext_try_to_merge(inode, path, ex - 1);
3072 if (ret) {
3073 err = ext4_ext_correct_indexes(handle, inode, path);
3074 if (err)
3075 goto out;
3076 depth = ext_depth(inode);
3077 ex--;
3078 }
3079 }
3080 /*
3081 * Try to Merge towards right.
3082 */ 3131 */
3083 ret = ext4_ext_try_to_merge(inode, path, ex); 3132 ext4_ext_try_to_merge(inode, path, ex);
3084 if (ret) { 3133
3085 err = ext4_ext_correct_indexes(handle, inode, path);
3086 if (err)
3087 goto out;
3088 depth = ext_depth(inode);
3089 }
3090 /* Mark modified extent as dirty */ 3134 /* Mark modified extent as dirty */
3091 err = ext4_ext_dirty(handle, inode, path + depth); 3135 err = ext4_ext_dirty(handle, inode, path + depth);
3092out: 3136out:
@@ -3302,15 +3346,19 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3302 ext4_fsblk_t newblock = 0; 3346 ext4_fsblk_t newblock = 0;
3303 int err = 0, depth, ret; 3347 int err = 0, depth, ret;
3304 unsigned int allocated = 0; 3348 unsigned int allocated = 0;
3349 unsigned int punched_out = 0;
3350 unsigned int result = 0;
3305 struct ext4_allocation_request ar; 3351 struct ext4_allocation_request ar;
3306 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3352 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3353 struct ext4_map_blocks punch_map;
3307 3354
3308 ext_debug("blocks %u/%u requested for inode %lu\n", 3355 ext_debug("blocks %u/%u requested for inode %lu\n",
3309 map->m_lblk, map->m_len, inode->i_ino); 3356 map->m_lblk, map->m_len, inode->i_ino);
3310 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 3357 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3311 3358
3312 /* check in cache */ 3359 /* check in cache */
3313 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3360 if (ext4_ext_in_cache(inode, map->m_lblk, &newex) &&
3361 ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) {
3314 if (!newex.ee_start_lo && !newex.ee_start_hi) { 3362 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3315 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3363 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3316 /* 3364 /*
@@ -3375,16 +3423,84 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3375 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 3423 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3376 ee_block, ee_len, newblock); 3424 ee_block, ee_len, newblock);
3377 3425
3378 /* Do not put uninitialized extent in the cache */ 3426 if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) {
3379 if (!ext4_ext_is_uninitialized(ex)) { 3427 /*
3380 ext4_ext_put_in_cache(inode, ee_block, 3428 * Do not put uninitialized extent
3381 ee_len, ee_start); 3429 * in the cache
3382 goto out; 3430 */
3431 if (!ext4_ext_is_uninitialized(ex)) {
3432 ext4_ext_put_in_cache(inode, ee_block,
3433 ee_len, ee_start);
3434 goto out;
3435 }
3436 ret = ext4_ext_handle_uninitialized_extents(
3437 handle, inode, map, path, flags,
3438 allocated, newblock);
3439 return ret;
3383 } 3440 }
3384 ret = ext4_ext_handle_uninitialized_extents(handle, 3441
3385 inode, map, path, flags, allocated, 3442 /*
3386 newblock); 3443 * Punch out the map length, but only to the
3387 return ret; 3444 * end of the extent
3445 */
3446 punched_out = allocated < map->m_len ?
3447 allocated : map->m_len;
3448
3449 /*
3450 * Sense extents need to be converted to
3451 * uninitialized, they must fit in an
3452 * uninitialized extent
3453 */
3454 if (punched_out > EXT_UNINIT_MAX_LEN)
3455 punched_out = EXT_UNINIT_MAX_LEN;
3456
3457 punch_map.m_lblk = map->m_lblk;
3458 punch_map.m_pblk = newblock;
3459 punch_map.m_len = punched_out;
3460 punch_map.m_flags = 0;
3461
3462 /* Check to see if the extent needs to be split */
3463 if (punch_map.m_len != ee_len ||
3464 punch_map.m_lblk != ee_block) {
3465
3466 ret = ext4_split_extent(handle, inode,
3467 path, &punch_map, 0,
3468 EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
3469 EXT4_GET_BLOCKS_PRE_IO);
3470
3471 if (ret < 0) {
3472 err = ret;
3473 goto out2;
3474 }
3475 /*
3476 * find extent for the block at
3477 * the start of the hole
3478 */
3479 ext4_ext_drop_refs(path);
3480 kfree(path);
3481
3482 path = ext4_ext_find_extent(inode,
3483 map->m_lblk, NULL);
3484 if (IS_ERR(path)) {
3485 err = PTR_ERR(path);
3486 path = NULL;
3487 goto out2;
3488 }
3489
3490 depth = ext_depth(inode);
3491 ex = path[depth].p_ext;
3492 ee_len = ext4_ext_get_actual_len(ex);
3493 ee_block = le32_to_cpu(ex->ee_block);
3494 ee_start = ext4_ext_pblock(ex);
3495
3496 }
3497
3498 ext4_ext_mark_uninitialized(ex);
3499
3500 err = ext4_ext_remove_space(inode, map->m_lblk,
3501 map->m_lblk + punched_out);
3502
3503 goto out2;
3388 } 3504 }
3389 } 3505 }
3390 3506
@@ -3446,6 +3562,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3446 else 3562 else
3447 /* disable in-core preallocation for non-regular files */ 3563 /* disable in-core preallocation for non-regular files */
3448 ar.flags = 0; 3564 ar.flags = 0;
3565 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
3566 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
3449 newblock = ext4_mb_new_blocks(handle, &ar, &err); 3567 newblock = ext4_mb_new_blocks(handle, &ar, &err);
3450 if (!newblock) 3568 if (!newblock)
3451 goto out2; 3569 goto out2;
@@ -3529,7 +3647,11 @@ out2:
3529 } 3647 }
3530 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, 3648 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
3531 newblock, map->m_len, err ? err : allocated); 3649 newblock, map->m_len, err ? err : allocated);
3532 return err ? err : allocated; 3650
3651 result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ?
3652 punched_out : allocated;
3653
3654 return err ? err : result;
3533} 3655}
3534 3656
3535void ext4_ext_truncate(struct inode *inode) 3657void ext4_ext_truncate(struct inode *inode)
@@ -3577,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode)
3577 3699
3578 last_block = (inode->i_size + sb->s_blocksize - 1) 3700 last_block = (inode->i_size + sb->s_blocksize - 1)
3579 >> EXT4_BLOCK_SIZE_BITS(sb); 3701 >> EXT4_BLOCK_SIZE_BITS(sb);
3580 err = ext4_ext_remove_space(inode, last_block); 3702 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK);
3581 3703
3582 /* In a multi-transaction truncate, we only make the final 3704 /* In a multi-transaction truncate, we only make the final
3583 * transaction synchronous. 3705 * transaction synchronous.
@@ -3585,8 +3707,9 @@ void ext4_ext_truncate(struct inode *inode)
3585 if (IS_SYNC(inode)) 3707 if (IS_SYNC(inode))
3586 ext4_handle_sync(handle); 3708 ext4_handle_sync(handle);
3587 3709
3588out_stop:
3589 up_write(&EXT4_I(inode)->i_data_sem); 3710 up_write(&EXT4_I(inode)->i_data_sem);
3711
3712out_stop:
3590 /* 3713 /*
3591 * If this was a simple ftruncate() and the file will remain alive, 3714 * If this was a simple ftruncate() and the file will remain alive,
3592 * then we need to clear up the orphan record which we created above. 3715 * then we need to clear up the orphan record which we created above.
@@ -3651,10 +3774,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3651 struct ext4_map_blocks map; 3774 struct ext4_map_blocks map;
3652 unsigned int credits, blkbits = inode->i_blkbits; 3775 unsigned int credits, blkbits = inode->i_blkbits;
3653 3776
3654 /* We only support the FALLOC_FL_KEEP_SIZE mode */
3655 if (mode & ~FALLOC_FL_KEEP_SIZE)
3656 return -EOPNOTSUPP;
3657
3658 /* 3777 /*
3659 * currently supporting (pre)allocate mode for extent-based 3778 * currently supporting (pre)allocate mode for extent-based
3660 * files _only_ 3779 * files _only_
@@ -3662,6 +3781,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3662 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 3781 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3663 return -EOPNOTSUPP; 3782 return -EOPNOTSUPP;
3664 3783
3784 /* Return error if mode is not supported */
3785 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
3786 return -EOPNOTSUPP;
3787
3788 if (mode & FALLOC_FL_PUNCH_HOLE)
3789 return ext4_punch_hole(file, offset, len);
3790
3665 trace_ext4_fallocate_enter(inode, offset, len, mode); 3791 trace_ext4_fallocate_enter(inode, offset, len, mode);
3666 map.m_lblk = offset >> blkbits; 3792 map.m_lblk = offset >> blkbits;
3667 /* 3793 /*
@@ -3691,7 +3817,8 @@ retry:
3691 break; 3817 break;
3692 } 3818 }
3693 ret = ext4_map_blocks(handle, inode, &map, 3819 ret = ext4_map_blocks(handle, inode, &map,
3694 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); 3820 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT |
3821 EXT4_GET_BLOCKS_NO_NORMALIZE);
3695 if (ret <= 0) { 3822 if (ret <= 0) {
3696#ifdef EXT4FS_DEBUG 3823#ifdef EXT4FS_DEBUG
3697 WARN_ON(ret <= 0); 3824 WARN_ON(ret <= 0);
@@ -3822,6 +3949,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3822 pgoff_t last_offset; 3949 pgoff_t last_offset;
3823 pgoff_t offset; 3950 pgoff_t offset;
3824 pgoff_t index; 3951 pgoff_t index;
3952 pgoff_t start_index = 0;
3825 struct page **pages = NULL; 3953 struct page **pages = NULL;
3826 struct buffer_head *bh = NULL; 3954 struct buffer_head *bh = NULL;
3827 struct buffer_head *head = NULL; 3955 struct buffer_head *head = NULL;
@@ -3848,39 +3976,57 @@ out:
3848 kfree(pages); 3976 kfree(pages);
3849 return EXT_CONTINUE; 3977 return EXT_CONTINUE;
3850 } 3978 }
3979 index = 0;
3851 3980
3981next_page:
3852 /* Try to find the 1st mapped buffer. */ 3982 /* Try to find the 1st mapped buffer. */
3853 end = ((__u64)pages[0]->index << PAGE_SHIFT) >> 3983 end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
3854 blksize_bits; 3984 blksize_bits;
3855 if (!page_has_buffers(pages[0])) 3985 if (!page_has_buffers(pages[index]))
3856 goto out; 3986 goto out;
3857 head = page_buffers(pages[0]); 3987 head = page_buffers(pages[index]);
3858 if (!head) 3988 if (!head)
3859 goto out; 3989 goto out;
3860 3990
3991 index++;
3861 bh = head; 3992 bh = head;
3862 do { 3993 do {
3863 if (buffer_mapped(bh)) { 3994 if (end >= newex->ec_block +
3995 newex->ec_len)
3996 /* The buffer is out of
3997 * the request range.
3998 */
3999 goto out;
4000
4001 if (buffer_mapped(bh) &&
4002 end >= newex->ec_block) {
4003 start_index = index - 1;
3864 /* get the 1st mapped buffer. */ 4004 /* get the 1st mapped buffer. */
3865 if (end > newex->ec_block +
3866 newex->ec_len)
3867 /* The buffer is out of
3868 * the request range.
3869 */
3870 goto out;
3871 goto found_mapped_buffer; 4005 goto found_mapped_buffer;
3872 } 4006 }
4007
3873 bh = bh->b_this_page; 4008 bh = bh->b_this_page;
3874 end++; 4009 end++;
3875 } while (bh != head); 4010 } while (bh != head);
3876 4011
3877 /* No mapped buffer found. */ 4012 /* No mapped buffer in the range found in this page,
3878 goto out; 4013 * We need to look up next page.
4014 */
4015 if (index >= ret) {
4016 /* There is no page left, but we need to limit
4017 * newex->ec_len.
4018 */
4019 newex->ec_len = end - newex->ec_block;
4020 goto out;
4021 }
4022 goto next_page;
3879 } else { 4023 } else {
3880 /*Find contiguous delayed buffers. */ 4024 /*Find contiguous delayed buffers. */
3881 if (ret > 0 && pages[0]->index == last_offset) 4025 if (ret > 0 && pages[0]->index == last_offset)
3882 head = page_buffers(pages[0]); 4026 head = page_buffers(pages[0]);
3883 bh = head; 4027 bh = head;
4028 index = 1;
4029 start_index = 0;
3884 } 4030 }
3885 4031
3886found_mapped_buffer: 4032found_mapped_buffer:
@@ -3903,7 +4049,7 @@ found_mapped_buffer:
3903 end++; 4049 end++;
3904 } while (bh != head); 4050 } while (bh != head);
3905 4051
3906 for (index = 1; index < ret; index++) { 4052 for (; index < ret; index++) {
3907 if (!page_has_buffers(pages[index])) { 4053 if (!page_has_buffers(pages[index])) {
3908 bh = NULL; 4054 bh = NULL;
3909 break; 4055 break;
@@ -3913,8 +4059,10 @@ found_mapped_buffer:
3913 bh = NULL; 4059 bh = NULL;
3914 break; 4060 break;
3915 } 4061 }
4062
3916 if (pages[index]->index != 4063 if (pages[index]->index !=
3917 pages[0]->index + index) { 4064 pages[start_index]->index + index
4065 - start_index) {
3918 /* Blocks are not contiguous. */ 4066 /* Blocks are not contiguous. */
3919 bh = NULL; 4067 bh = NULL;
3920 break; 4068 break;
@@ -4006,6 +4154,177 @@ static int ext4_xattr_fiemap(struct inode *inode,
4006 return (error < 0 ? error : 0); 4154 return (error < 0 ? error : 0);
4007} 4155}
4008 4156
4157/*
4158 * ext4_ext_punch_hole
4159 *
4160 * Punches a hole of "length" bytes in a file starting
4161 * at byte "offset"
4162 *
4163 * @inode: The inode of the file to punch a hole in
4164 * @offset: The starting byte offset of the hole
4165 * @length: The length of the hole
4166 *
4167 * Returns the number of blocks removed or negative on err
4168 */
4169int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4170{
4171 struct inode *inode = file->f_path.dentry->d_inode;
4172 struct super_block *sb = inode->i_sb;
4173 struct ext4_ext_cache cache_ex;
4174 ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks;
4175 struct address_space *mapping = inode->i_mapping;
4176 struct ext4_map_blocks map;
4177 handle_t *handle;
4178 loff_t first_block_offset, last_block_offset, block_len;
4179 loff_t first_page, last_page, first_page_offset, last_page_offset;
4180 int ret, credits, blocks_released, err = 0;
4181
4182 first_block = (offset + sb->s_blocksize - 1) >>
4183 EXT4_BLOCK_SIZE_BITS(sb);
4184 last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4185
4186 first_block_offset = first_block << EXT4_BLOCK_SIZE_BITS(sb);
4187 last_block_offset = last_block << EXT4_BLOCK_SIZE_BITS(sb);
4188
4189 first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4190 last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4191
4192 first_page_offset = first_page << PAGE_CACHE_SHIFT;
4193 last_page_offset = last_page << PAGE_CACHE_SHIFT;
4194
4195 /*
4196 * Write out all dirty pages to avoid race conditions
4197 * Then release them.
4198 */
4199 if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4200 err = filemap_write_and_wait_range(mapping,
4201 first_page_offset == 0 ? 0 : first_page_offset-1,
4202 last_page_offset);
4203
4204 if (err)
4205 return err;
4206 }
4207
4208 /* Now release the pages */
4209 if (last_page_offset > first_page_offset) {
4210 truncate_inode_pages_range(mapping, first_page_offset,
4211 last_page_offset-1);
4212 }
4213
4214 /* finish any pending end_io work */
4215 ext4_flush_completed_IO(inode);
4216
4217 credits = ext4_writepage_trans_blocks(inode);
4218 handle = ext4_journal_start(inode, credits);
4219 if (IS_ERR(handle))
4220 return PTR_ERR(handle);
4221
4222 err = ext4_orphan_add(handle, inode);
4223 if (err)
4224 goto out;
4225
4226 /*
4227 * Now we need to zero out the un block aligned data.
4228 * If the file is smaller than a block, just
4229 * zero out the middle
4230 */
4231 if (first_block > last_block)
4232 ext4_block_zero_page_range(handle, mapping, offset, length);
4233 else {
4234 /* zero out the head of the hole before the first block */
4235 block_len = first_block_offset - offset;
4236 if (block_len > 0)
4237 ext4_block_zero_page_range(handle, mapping,
4238 offset, block_len);
4239
4240 /* zero out the tail of the hole after the last block */
4241 block_len = offset + length - last_block_offset;
4242 if (block_len > 0) {
4243 ext4_block_zero_page_range(handle, mapping,
4244 last_block_offset, block_len);
4245 }
4246 }
4247
4248 /* If there are no blocks to remove, return now */
4249 if (first_block >= last_block)
4250 goto out;
4251
4252 down_write(&EXT4_I(inode)->i_data_sem);
4253 ext4_ext_invalidate_cache(inode);
4254 ext4_discard_preallocations(inode);
4255
4256 /*
4257 * Loop over all the blocks and identify blocks
4258 * that need to be punched out
4259 */
4260 iblock = first_block;
4261 blocks_released = 0;
4262 while (iblock < last_block) {
4263 max_blocks = last_block - iblock;
4264 num_blocks = 1;
4265 memset(&map, 0, sizeof(map));
4266 map.m_lblk = iblock;
4267 map.m_len = max_blocks;
4268 ret = ext4_ext_map_blocks(handle, inode, &map,
4269 EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
4270
4271 if (ret > 0) {
4272 blocks_released += ret;
4273 num_blocks = ret;
4274 } else if (ret == 0) {
4275 /*
4276 * If map blocks could not find the block,
4277 * then it is in a hole. If the hole was
4278 * not already cached, then map blocks should
4279 * put it in the cache. So we can get the hole
4280 * out of the cache
4281 */
4282 memset(&cache_ex, 0, sizeof(cache_ex));
4283 if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) &&
4284 !cache_ex.ec_start) {
4285
4286 /* The hole is cached */
4287 num_blocks = cache_ex.ec_block +
4288 cache_ex.ec_len - iblock;
4289
4290 } else {
4291 /* The block could not be identified */
4292 err = -EIO;
4293 break;
4294 }
4295 } else {
4296 /* Map blocks error */
4297 err = ret;
4298 break;
4299 }
4300
4301 if (num_blocks == 0) {
4302 /* This condition should never happen */
4303 ext_debug("Block lookup failed");
4304 err = -EIO;
4305 break;
4306 }
4307
4308 iblock += num_blocks;
4309 }
4310
4311 if (blocks_released > 0) {
4312 ext4_ext_invalidate_cache(inode);
4313 ext4_discard_preallocations(inode);
4314 }
4315
4316 if (IS_SYNC(inode))
4317 ext4_handle_sync(handle);
4318
4319 up_write(&EXT4_I(inode)->i_data_sem);
4320
4321out:
4322 ext4_orphan_del(handle, inode);
4323 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4324 ext4_mark_inode_dirty(handle, inode);
4325 ext4_journal_stop(handle);
4326 return err;
4327}
4009int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4328int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4010 __u64 start, __u64 len) 4329 __u64 start, __u64 len)
4011{ 4330{
@@ -4042,4 +4361,3 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4042 4361
4043 return error; 4362 return error;
4044} 4363}
4045
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 7b80d543b89e..2c0972322009 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -272,7 +272,6 @@ const struct file_operations ext4_file_operations = {
272}; 272};
273 273
274const struct inode_operations ext4_file_inode_operations = { 274const struct inode_operations ext4_file_inode_operations = {
275 .truncate = ext4_truncate,
276 .setattr = ext4_setattr, 275 .setattr = ext4_setattr,
277 .getattr = ext4_getattr, 276 .getattr = ext4_getattr,
278#ifdef CONFIG_EXT4_FS_XATTR 277#ifdef CONFIG_EXT4_FS_XATTR
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index e9473cbe80df..ce66d2fe826c 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -36,7 +36,7 @@
36 36
37static void dump_completed_IO(struct inode * inode) 37static void dump_completed_IO(struct inode * inode)
38{ 38{
39#ifdef EXT4_DEBUG 39#ifdef EXT4FS_DEBUG
40 struct list_head *cur, *before, *after; 40 struct list_head *cur, *before, *after;
41 ext4_io_end_t *io, *io0, *io1; 41 ext4_io_end_t *io, *io0, *io1;
42 unsigned long flags; 42 unsigned long flags;
@@ -172,6 +172,7 @@ int ext4_sync_file(struct file *file, int datasync)
172 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 172 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
173 int ret; 173 int ret;
174 tid_t commit_tid; 174 tid_t commit_tid;
175 bool needs_barrier = false;
175 176
176 J_ASSERT(ext4_journal_current_handle() == NULL); 177 J_ASSERT(ext4_journal_current_handle() == NULL);
177 178
@@ -211,22 +212,12 @@ int ext4_sync_file(struct file *file, int datasync)
211 } 212 }
212 213
213 commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; 214 commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
214 if (jbd2_log_start_commit(journal, commit_tid)) { 215 if (journal->j_flags & JBD2_BARRIER &&
215 /* 216 !jbd2_trans_will_send_data_barrier(journal, commit_tid))
216 * When the journal is on a different device than the 217 needs_barrier = true;
217 * fs data disk, we need to issue the barrier in 218 jbd2_log_start_commit(journal, commit_tid);
218 * writeback mode. (In ordered mode, the jbd2 layer 219 ret = jbd2_log_wait_commit(journal, commit_tid);
219 * will take care of issuing the barrier. In 220 if (needs_barrier)
220 * data=journal, all of the data blocks are written to
221 * the journal device.)
222 */
223 if (ext4_should_writeback_data(inode) &&
224 (journal->j_fs_dev != journal->j_dev) &&
225 (journal->j_flags & JBD2_BARRIER))
226 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
227 NULL);
228 ret = jbd2_log_wait_commit(journal, commit_tid);
229 } else if (journal->j_flags & JBD2_BARRIER)
230 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 221 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
231 out: 222 out:
232 trace_ext4_sync_file_exit(inode, ret); 223 trace_ext4_sync_file_exit(inode, ret);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index f2fa5e8a582c..50d0e9c64584 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -639,8 +639,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
639 while (target > 0) { 639 while (target > 0) {
640 count = target; 640 count = target;
641 /* allocating blocks for indirect blocks and direct blocks */ 641 /* allocating blocks for indirect blocks and direct blocks */
642 current_block = ext4_new_meta_blocks(handle, inode, 642 current_block = ext4_new_meta_blocks(handle, inode, goal,
643 goal, &count, err); 643 0, &count, err);
644 if (*err) 644 if (*err)
645 goto failed_out; 645 goto failed_out;
646 646
@@ -1930,7 +1930,7 @@ repeat:
1930 * We do still charge estimated metadata to the sb though; 1930 * We do still charge estimated metadata to the sb though;
1931 * we cannot afford to run out of free blocks. 1931 * we cannot afford to run out of free blocks.
1932 */ 1932 */
1933 if (ext4_claim_free_blocks(sbi, md_needed + 1)) { 1933 if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) {
1934 dquot_release_reservation_block(inode, 1); 1934 dquot_release_reservation_block(inode, 1);
1935 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1935 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1936 yield(); 1936 yield();
@@ -2796,9 +2796,7 @@ static int write_cache_pages_da(struct address_space *mapping,
2796 continue; 2796 continue;
2797 } 2797 }
2798 2798
2799 if (PageWriteback(page)) 2799 wait_on_page_writeback(page);
2800 wait_on_page_writeback(page);
2801
2802 BUG_ON(PageWriteback(page)); 2800 BUG_ON(PageWriteback(page));
2803 2801
2804 if (mpd->next_page != page->index) 2802 if (mpd->next_page != page->index)
@@ -3513,7 +3511,7 @@ retry:
3513 loff_t end = offset + iov_length(iov, nr_segs); 3511 loff_t end = offset + iov_length(iov, nr_segs);
3514 3512
3515 if (end > isize) 3513 if (end > isize)
3516 vmtruncate(inode, isize); 3514 ext4_truncate_failed_write(inode);
3517 } 3515 }
3518 } 3516 }
3519 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3517 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -3916,9 +3914,30 @@ void ext4_set_aops(struct inode *inode)
3916int ext4_block_truncate_page(handle_t *handle, 3914int ext4_block_truncate_page(handle_t *handle,
3917 struct address_space *mapping, loff_t from) 3915 struct address_space *mapping, loff_t from)
3918{ 3916{
3917 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3918 unsigned length;
3919 unsigned blocksize;
3920 struct inode *inode = mapping->host;
3921
3922 blocksize = inode->i_sb->s_blocksize;
3923 length = blocksize - (offset & (blocksize - 1));
3924
3925 return ext4_block_zero_page_range(handle, mapping, from, length);
3926}
3927
3928/*
3929 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3930 * starting from file offset 'from'. The range to be zero'd must
3931 * be contained with in one block. If the specified range exceeds
3932 * the end of the block it will be shortened to end of the block
3933 * that cooresponds to 'from'
3934 */
3935int ext4_block_zero_page_range(handle_t *handle,
3936 struct address_space *mapping, loff_t from, loff_t length)
3937{
3919 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3938 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3920 unsigned offset = from & (PAGE_CACHE_SIZE-1); 3939 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3921 unsigned blocksize, length, pos; 3940 unsigned blocksize, max, pos;
3922 ext4_lblk_t iblock; 3941 ext4_lblk_t iblock;
3923 struct inode *inode = mapping->host; 3942 struct inode *inode = mapping->host;
3924 struct buffer_head *bh; 3943 struct buffer_head *bh;
@@ -3931,7 +3950,15 @@ int ext4_block_truncate_page(handle_t *handle,
3931 return -EINVAL; 3950 return -EINVAL;
3932 3951
3933 blocksize = inode->i_sb->s_blocksize; 3952 blocksize = inode->i_sb->s_blocksize;
3934 length = blocksize - (offset & (blocksize - 1)); 3953 max = blocksize - (offset & (blocksize - 1));
3954
3955 /*
3956 * correct length if it does not fall between
3957 * 'from' and the end of the block
3958 */
3959 if (length > max || length < 0)
3960 length = max;
3961
3935 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3962 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3936 3963
3937 if (!page_has_buffers(page)) 3964 if (!page_has_buffers(page))
@@ -4380,8 +4407,6 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
4380 4407
4381int ext4_can_truncate(struct inode *inode) 4408int ext4_can_truncate(struct inode *inode)
4382{ 4409{
4383 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4384 return 0;
4385 if (S_ISREG(inode->i_mode)) 4410 if (S_ISREG(inode->i_mode))
4386 return 1; 4411 return 1;
4387 if (S_ISDIR(inode->i_mode)) 4412 if (S_ISDIR(inode->i_mode))
@@ -4392,6 +4417,31 @@ int ext4_can_truncate(struct inode *inode)
4392} 4417}
4393 4418
4394/* 4419/*
4420 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
4421 * associated with the given offset and length
4422 *
4423 * @inode: File inode
4424 * @offset: The offset where the hole will begin
4425 * @len: The length of the hole
4426 *
4427 * Returns: 0 on sucess or negative on failure
4428 */
4429
4430int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
4431{
4432 struct inode *inode = file->f_path.dentry->d_inode;
4433 if (!S_ISREG(inode->i_mode))
4434 return -ENOTSUPP;
4435
4436 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4437 /* TODO: Add support for non extent hole punching */
4438 return -ENOTSUPP;
4439 }
4440
4441 return ext4_ext_punch_hole(file, offset, length);
4442}
4443
4444/*
4395 * ext4_truncate() 4445 * ext4_truncate()
4396 * 4446 *
4397 * We block out ext4_get_block() block instantiations across the entire 4447 * We block out ext4_get_block() block instantiations across the entire
@@ -4617,7 +4667,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
4617 /* 4667 /*
4618 * Figure out the offset within the block group inode table 4668 * Figure out the offset within the block group inode table
4619 */ 4669 */
4620 inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); 4670 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4621 inode_offset = ((inode->i_ino - 1) % 4671 inode_offset = ((inode->i_ino - 1) %
4622 EXT4_INODES_PER_GROUP(sb)); 4672 EXT4_INODES_PER_GROUP(sb));
4623 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4673 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
@@ -5311,8 +5361,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5311 5361
5312 if (S_ISREG(inode->i_mode) && 5362 if (S_ISREG(inode->i_mode) &&
5313 attr->ia_valid & ATTR_SIZE && 5363 attr->ia_valid & ATTR_SIZE &&
5314 (attr->ia_size < inode->i_size || 5364 (attr->ia_size < inode->i_size)) {
5315 (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
5316 handle_t *handle; 5365 handle_t *handle;
5317 5366
5318 handle = ext4_journal_start(inode, 3); 5367 handle = ext4_journal_start(inode, 3);
@@ -5346,14 +5395,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5346 goto err_out; 5395 goto err_out;
5347 } 5396 }
5348 } 5397 }
5349 /* ext4_truncate will clear the flag */
5350 if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
5351 ext4_truncate(inode);
5352 } 5398 }
5353 5399
5354 if ((attr->ia_valid & ATTR_SIZE) && 5400 if (attr->ia_valid & ATTR_SIZE) {
5355 attr->ia_size != i_size_read(inode)) 5401 if (attr->ia_size != i_size_read(inode)) {
5356 rc = vmtruncate(inode, attr->ia_size); 5402 truncate_setsize(inode, attr->ia_size);
5403 ext4_truncate(inode);
5404 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
5405 ext4_truncate(inode);
5406 }
5357 5407
5358 if (!rc) { 5408 if (!rc) {
5359 setattr_copy(inode, attr); 5409 setattr_copy(inode, attr);
@@ -5811,15 +5861,19 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5811 goto out_unlock; 5861 goto out_unlock;
5812 } 5862 }
5813 ret = 0; 5863 ret = 0;
5814 if (PageMappedToDisk(page)) 5864
5815 goto out_unlock; 5865 lock_page(page);
5866 wait_on_page_writeback(page);
5867 if (PageMappedToDisk(page)) {
5868 up_read(&inode->i_alloc_sem);
5869 return VM_FAULT_LOCKED;
5870 }
5816 5871
5817 if (page->index == size >> PAGE_CACHE_SHIFT) 5872 if (page->index == size >> PAGE_CACHE_SHIFT)
5818 len = size & ~PAGE_CACHE_MASK; 5873 len = size & ~PAGE_CACHE_MASK;
5819 else 5874 else
5820 len = PAGE_CACHE_SIZE; 5875 len = PAGE_CACHE_SIZE;
5821 5876
5822 lock_page(page);
5823 /* 5877 /*
5824 * return if we have all the buffers mapped. This avoid 5878 * return if we have all the buffers mapped. This avoid
5825 * the need to call write_begin/write_end which does a 5879 * the need to call write_begin/write_end which does a
@@ -5829,8 +5883,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5829 if (page_has_buffers(page)) { 5883 if (page_has_buffers(page)) {
5830 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5884 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
5831 ext4_bh_unmapped)) { 5885 ext4_bh_unmapped)) {
5832 unlock_page(page); 5886 up_read(&inode->i_alloc_sem);
5833 goto out_unlock; 5887 return VM_FAULT_LOCKED;
5834 } 5888 }
5835 } 5889 }
5836 unlock_page(page); 5890 unlock_page(page);
@@ -5850,6 +5904,16 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5850 if (ret < 0) 5904 if (ret < 0)
5851 goto out_unlock; 5905 goto out_unlock;
5852 ret = 0; 5906 ret = 0;
5907
5908 /*
5909 * write_begin/end might have created a dirty page and someone
5910 * could wander in and start the IO. Make sure that hasn't
5911 * happened.
5912 */
5913 lock_page(page);
5914 wait_on_page_writeback(page);
5915 up_read(&inode->i_alloc_sem);
5916 return VM_FAULT_LOCKED;
5853out_unlock: 5917out_unlock:
5854 if (ret) 5918 if (ret)
5855 ret = VM_FAULT_SIGBUS; 5919 ret = VM_FAULT_SIGBUS;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index d8a16eecf1d5..859f2ae8864e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -787,6 +787,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
787 struct inode *inode; 787 struct inode *inode;
788 char *data; 788 char *data;
789 char *bitmap; 789 char *bitmap;
790 struct ext4_group_info *grinfo;
790 791
791 mb_debug(1, "init page %lu\n", page->index); 792 mb_debug(1, "init page %lu\n", page->index);
792 793
@@ -819,6 +820,18 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
819 if (first_group + i >= ngroups) 820 if (first_group + i >= ngroups)
820 break; 821 break;
821 822
823 grinfo = ext4_get_group_info(sb, first_group + i);
824 /*
825 * If page is uptodate then we came here after online resize
826 * which added some new uninitialized group info structs, so
827 * we must skip all initialized uptodate buddies on the page,
828 * which may be currently in use by an allocating task.
829 */
830 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
831 bh[i] = NULL;
832 continue;
833 }
834
822 err = -EIO; 835 err = -EIO;
823 desc = ext4_get_group_desc(sb, first_group + i, NULL); 836 desc = ext4_get_group_desc(sb, first_group + i, NULL);
824 if (desc == NULL) 837 if (desc == NULL)
@@ -871,26 +884,28 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
871 } 884 }
872 885
873 /* wait for I/O completion */ 886 /* wait for I/O completion */
874 for (i = 0; i < groups_per_page && bh[i]; i++) 887 for (i = 0; i < groups_per_page; i++)
875 wait_on_buffer(bh[i]); 888 if (bh[i])
889 wait_on_buffer(bh[i]);
876 890
877 err = -EIO; 891 err = -EIO;
878 for (i = 0; i < groups_per_page && bh[i]; i++) 892 for (i = 0; i < groups_per_page; i++)
879 if (!buffer_uptodate(bh[i])) 893 if (bh[i] && !buffer_uptodate(bh[i]))
880 goto out; 894 goto out;
881 895
882 err = 0; 896 err = 0;
883 first_block = page->index * blocks_per_page; 897 first_block = page->index * blocks_per_page;
884 /* init the page */
885 memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
886 for (i = 0; i < blocks_per_page; i++) { 898 for (i = 0; i < blocks_per_page; i++) {
887 int group; 899 int group;
888 struct ext4_group_info *grinfo;
889 900
890 group = (first_block + i) >> 1; 901 group = (first_block + i) >> 1;
891 if (group >= ngroups) 902 if (group >= ngroups)
892 break; 903 break;
893 904
905 if (!bh[group - first_group])
906 /* skip initialized uptodate buddy */
907 continue;
908
894 /* 909 /*
895 * data carry information regarding this 910 * data carry information regarding this
896 * particular group in the format specified 911 * particular group in the format specified
@@ -919,6 +934,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
919 * incore got set to the group block bitmap below 934 * incore got set to the group block bitmap below
920 */ 935 */
921 ext4_lock_group(sb, group); 936 ext4_lock_group(sb, group);
937 /* init the buddy */
938 memset(data, 0xff, blocksize);
922 ext4_mb_generate_buddy(sb, data, incore, group); 939 ext4_mb_generate_buddy(sb, data, incore, group);
923 ext4_unlock_group(sb, group); 940 ext4_unlock_group(sb, group);
924 incore = NULL; 941 incore = NULL;
@@ -948,7 +965,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
948 965
949out: 966out:
950 if (bh) { 967 if (bh) {
951 for (i = 0; i < groups_per_page && bh[i]; i++) 968 for (i = 0; i < groups_per_page; i++)
952 brelse(bh[i]); 969 brelse(bh[i]);
953 if (bh != &bhs) 970 if (bh != &bhs)
954 kfree(bh); 971 kfree(bh);
@@ -957,22 +974,21 @@ out:
957} 974}
958 975
959/* 976/*
960 * lock the group_info alloc_sem of all the groups 977 * Lock the buddy and bitmap pages. This make sure other parallel init_group
961 * belonging to the same buddy cache page. This 978 * on the same buddy page doesn't happen whild holding the buddy page lock.
962 * make sure other parallel operation on the buddy 979 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
963 * cache doesn't happen whild holding the buddy cache 980 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
964 * lock
965 */ 981 */
966static int ext4_mb_get_buddy_cache_lock(struct super_block *sb, 982static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
967 ext4_group_t group) 983 ext4_group_t group, struct ext4_buddy *e4b)
968{ 984{
969 int i; 985 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
970 int block, pnum; 986 int block, pnum, poff;
971 int blocks_per_page; 987 int blocks_per_page;
972 int groups_per_page; 988 struct page *page;
973 ext4_group_t ngroups = ext4_get_groups_count(sb); 989
974 ext4_group_t first_group; 990 e4b->bd_buddy_page = NULL;
975 struct ext4_group_info *grp; 991 e4b->bd_bitmap_page = NULL;
976 992
977 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 993 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
978 /* 994 /*
@@ -982,57 +998,40 @@ static int ext4_mb_get_buddy_cache_lock(struct super_block *sb,
982 */ 998 */
983 block = group * 2; 999 block = group * 2;
984 pnum = block / blocks_per_page; 1000 pnum = block / blocks_per_page;
985 first_group = pnum * blocks_per_page / 2; 1001 poff = block % blocks_per_page;
986 1002 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
987 groups_per_page = blocks_per_page >> 1; 1003 if (!page)
988 if (groups_per_page == 0) 1004 return -EIO;
989 groups_per_page = 1; 1005 BUG_ON(page->mapping != inode->i_mapping);
990 /* read all groups the page covers into the cache */ 1006 e4b->bd_bitmap_page = page;
991 for (i = 0; i < groups_per_page; i++) { 1007 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
992 1008
993 if ((first_group + i) >= ngroups) 1009 if (blocks_per_page >= 2) {
994 break; 1010 /* buddy and bitmap are on the same page */
995 grp = ext4_get_group_info(sb, first_group + i); 1011 return 0;
996 /* take all groups write allocation
997 * semaphore. This make sure there is
998 * no block allocation going on in any
999 * of that groups
1000 */
1001 down_write_nested(&grp->alloc_sem, i);
1002 } 1012 }
1003 return i; 1013
1014 block++;
1015 pnum = block / blocks_per_page;
1016 poff = block % blocks_per_page;
1017 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1018 if (!page)
1019 return -EIO;
1020 BUG_ON(page->mapping != inode->i_mapping);
1021 e4b->bd_buddy_page = page;
1022 return 0;
1004} 1023}
1005 1024
1006static void ext4_mb_put_buddy_cache_lock(struct super_block *sb, 1025static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1007 ext4_group_t group, int locked_group)
1008{ 1026{
1009 int i; 1027 if (e4b->bd_bitmap_page) {
1010 int block, pnum; 1028 unlock_page(e4b->bd_bitmap_page);
1011 int blocks_per_page; 1029 page_cache_release(e4b->bd_bitmap_page);
1012 ext4_group_t first_group; 1030 }
1013 struct ext4_group_info *grp; 1031 if (e4b->bd_buddy_page) {
1014 1032 unlock_page(e4b->bd_buddy_page);
1015 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1033 page_cache_release(e4b->bd_buddy_page);
1016 /*
1017 * the buddy cache inode stores the block bitmap
1018 * and buddy information in consecutive blocks.
1019 * So for each group we need two blocks.
1020 */
1021 block = group * 2;
1022 pnum = block / blocks_per_page;
1023 first_group = pnum * blocks_per_page / 2;
1024 /* release locks on all the groups */
1025 for (i = 0; i < locked_group; i++) {
1026
1027 grp = ext4_get_group_info(sb, first_group + i);
1028 /* take all groups write allocation
1029 * semaphore. This make sure there is
1030 * no block allocation going on in any
1031 * of that groups
1032 */
1033 up_write(&grp->alloc_sem);
1034 } 1034 }
1035
1036} 1035}
1037 1036
1038/* 1037/*
@@ -1044,93 +1043,60 @@ static noinline_for_stack
1044int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) 1043int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1045{ 1044{
1046 1045
1047 int ret = 0;
1048 void *bitmap;
1049 int blocks_per_page;
1050 int block, pnum, poff;
1051 int num_grp_locked = 0;
1052 struct ext4_group_info *this_grp; 1046 struct ext4_group_info *this_grp;
1053 struct ext4_sb_info *sbi = EXT4_SB(sb); 1047 struct ext4_buddy e4b;
1054 struct inode *inode = sbi->s_buddy_cache; 1048 struct page *page;
1055 struct page *page = NULL, *bitmap_page = NULL; 1049 int ret = 0;
1056 1050
1057 mb_debug(1, "init group %u\n", group); 1051 mb_debug(1, "init group %u\n", group);
1058 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1059 this_grp = ext4_get_group_info(sb, group); 1052 this_grp = ext4_get_group_info(sb, group);
1060 /* 1053 /*
1061 * This ensures that we don't reinit the buddy cache 1054 * This ensures that we don't reinit the buddy cache
1062 * page which map to the group from which we are already 1055 * page which map to the group from which we are already
1063 * allocating. If we are looking at the buddy cache we would 1056 * allocating. If we are looking at the buddy cache we would
1064 * have taken a reference using ext4_mb_load_buddy and that 1057 * have taken a reference using ext4_mb_load_buddy and that
1065 * would have taken the alloc_sem lock. 1058 * would have pinned buddy page to page cache.
1066 */ 1059 */
1067 num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group); 1060 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1068 if (!EXT4_MB_GRP_NEED_INIT(this_grp)) { 1061 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1069 /* 1062 /*
1070 * somebody initialized the group 1063 * somebody initialized the group
1071 * return without doing anything 1064 * return without doing anything
1072 */ 1065 */
1073 ret = 0;
1074 goto err; 1066 goto err;
1075 } 1067 }
1076 /* 1068
1077 * the buddy cache inode stores the block bitmap 1069 page = e4b.bd_bitmap_page;
1078 * and buddy information in consecutive blocks. 1070 ret = ext4_mb_init_cache(page, NULL);
1079 * So for each group we need two blocks. 1071 if (ret)
1080 */ 1072 goto err;
1081 block = group * 2; 1073 if (!PageUptodate(page)) {
1082 pnum = block / blocks_per_page;
1083 poff = block % blocks_per_page;
1084 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1085 if (page) {
1086 BUG_ON(page->mapping != inode->i_mapping);
1087 ret = ext4_mb_init_cache(page, NULL);
1088 if (ret) {
1089 unlock_page(page);
1090 goto err;
1091 }
1092 unlock_page(page);
1093 }
1094 if (page == NULL || !PageUptodate(page)) {
1095 ret = -EIO; 1074 ret = -EIO;
1096 goto err; 1075 goto err;
1097 } 1076 }
1098 mark_page_accessed(page); 1077 mark_page_accessed(page);
1099 bitmap_page = page;
1100 bitmap = page_address(page) + (poff * sb->s_blocksize);
1101 1078
1102 /* init buddy cache */ 1079 if (e4b.bd_buddy_page == NULL) {
1103 block++;
1104 pnum = block / blocks_per_page;
1105 poff = block % blocks_per_page;
1106 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1107 if (page == bitmap_page) {
1108 /* 1080 /*
1109 * If both the bitmap and buddy are in 1081 * If both the bitmap and buddy are in
1110 * the same page we don't need to force 1082 * the same page we don't need to force
1111 * init the buddy 1083 * init the buddy
1112 */ 1084 */
1113 unlock_page(page); 1085 ret = 0;
1114 } else if (page) { 1086 goto err;
1115 BUG_ON(page->mapping != inode->i_mapping);
1116 ret = ext4_mb_init_cache(page, bitmap);
1117 if (ret) {
1118 unlock_page(page);
1119 goto err;
1120 }
1121 unlock_page(page);
1122 } 1087 }
1123 if (page == NULL || !PageUptodate(page)) { 1088 /* init buddy cache */
1089 page = e4b.bd_buddy_page;
1090 ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1091 if (ret)
1092 goto err;
1093 if (!PageUptodate(page)) {
1124 ret = -EIO; 1094 ret = -EIO;
1125 goto err; 1095 goto err;
1126 } 1096 }
1127 mark_page_accessed(page); 1097 mark_page_accessed(page);
1128err: 1098err:
1129 ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked); 1099 ext4_mb_put_buddy_page_lock(&e4b);
1130 if (bitmap_page)
1131 page_cache_release(bitmap_page);
1132 if (page)
1133 page_cache_release(page);
1134 return ret; 1100 return ret;
1135} 1101}
1136 1102
@@ -1164,24 +1130,8 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1164 e4b->bd_group = group; 1130 e4b->bd_group = group;
1165 e4b->bd_buddy_page = NULL; 1131 e4b->bd_buddy_page = NULL;
1166 e4b->bd_bitmap_page = NULL; 1132 e4b->bd_bitmap_page = NULL;
1167 e4b->alloc_semp = &grp->alloc_sem;
1168
1169 /* Take the read lock on the group alloc
1170 * sem. This would make sure a parallel
1171 * ext4_mb_init_group happening on other
1172 * groups mapped by the page is blocked
1173 * till we are done with allocation
1174 */
1175repeat_load_buddy:
1176 down_read(e4b->alloc_semp);
1177 1133
1178 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1134 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1179 /* we need to check for group need init flag
1180 * with alloc_semp held so that we can be sure
1181 * that new blocks didn't get added to the group
1182 * when we are loading the buddy cache
1183 */
1184 up_read(e4b->alloc_semp);
1185 /* 1135 /*
1186 * we need full data about the group 1136 * we need full data about the group
1187 * to make a good selection 1137 * to make a good selection
@@ -1189,7 +1139,6 @@ repeat_load_buddy:
1189 ret = ext4_mb_init_group(sb, group); 1139 ret = ext4_mb_init_group(sb, group);
1190 if (ret) 1140 if (ret)
1191 return ret; 1141 return ret;
1192 goto repeat_load_buddy;
1193 } 1142 }
1194 1143
1195 /* 1144 /*
@@ -1273,15 +1222,14 @@ repeat_load_buddy:
1273 return 0; 1222 return 0;
1274 1223
1275err: 1224err:
1225 if (page)
1226 page_cache_release(page);
1276 if (e4b->bd_bitmap_page) 1227 if (e4b->bd_bitmap_page)
1277 page_cache_release(e4b->bd_bitmap_page); 1228 page_cache_release(e4b->bd_bitmap_page);
1278 if (e4b->bd_buddy_page) 1229 if (e4b->bd_buddy_page)
1279 page_cache_release(e4b->bd_buddy_page); 1230 page_cache_release(e4b->bd_buddy_page);
1280 e4b->bd_buddy = NULL; 1231 e4b->bd_buddy = NULL;
1281 e4b->bd_bitmap = NULL; 1232 e4b->bd_bitmap = NULL;
1282
1283 /* Done with the buddy cache */
1284 up_read(e4b->alloc_semp);
1285 return ret; 1233 return ret;
1286} 1234}
1287 1235
@@ -1291,9 +1239,6 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1291 page_cache_release(e4b->bd_bitmap_page); 1239 page_cache_release(e4b->bd_bitmap_page);
1292 if (e4b->bd_buddy_page) 1240 if (e4b->bd_buddy_page)
1293 page_cache_release(e4b->bd_buddy_page); 1241 page_cache_release(e4b->bd_buddy_page);
1294 /* Done with the buddy cache */
1295 if (e4b->alloc_semp)
1296 up_read(e4b->alloc_semp);
1297} 1242}
1298 1243
1299 1244
@@ -1606,9 +1551,6 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1606 get_page(ac->ac_bitmap_page); 1551 get_page(ac->ac_bitmap_page);
1607 ac->ac_buddy_page = e4b->bd_buddy_page; 1552 ac->ac_buddy_page = e4b->bd_buddy_page;
1608 get_page(ac->ac_buddy_page); 1553 get_page(ac->ac_buddy_page);
1609 /* on allocation we use ac to track the held semaphore */
1610 ac->alloc_semp = e4b->alloc_semp;
1611 e4b->alloc_semp = NULL;
1612 /* store last allocated for subsequent stream allocation */ 1554 /* store last allocated for subsequent stream allocation */
1613 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1555 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1614 spin_lock(&sbi->s_md_lock); 1556 spin_lock(&sbi->s_md_lock);
@@ -2659,7 +2601,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2659 struct super_block *sb = journal->j_private; 2601 struct super_block *sb = journal->j_private;
2660 struct ext4_buddy e4b; 2602 struct ext4_buddy e4b;
2661 struct ext4_group_info *db; 2603 struct ext4_group_info *db;
2662 int err, ret, count = 0, count2 = 0; 2604 int err, count = 0, count2 = 0;
2663 struct ext4_free_data *entry; 2605 struct ext4_free_data *entry;
2664 struct list_head *l, *ltmp; 2606 struct list_head *l, *ltmp;
2665 2607
@@ -2669,15 +2611,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2669 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", 2611 mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2670 entry->count, entry->group, entry); 2612 entry->count, entry->group, entry);
2671 2613
2672 if (test_opt(sb, DISCARD)) { 2614 if (test_opt(sb, DISCARD))
2673 ret = ext4_issue_discard(sb, entry->group, 2615 ext4_issue_discard(sb, entry->group,
2674 entry->start_blk, entry->count); 2616 entry->start_blk, entry->count);
2675 if (unlikely(ret == -EOPNOTSUPP)) {
2676 ext4_warning(sb, "discard not supported, "
2677 "disabling");
2678 clear_opt(sb, DISCARD);
2679 }
2680 }
2681 2617
2682 err = ext4_mb_load_buddy(sb, entry->group, &e4b); 2618 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
2683 /* we expect to find existing buddy because it's pinned */ 2619 /* we expect to find existing buddy because it's pinned */
@@ -4226,15 +4162,12 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4226 spin_unlock(&pa->pa_lock); 4162 spin_unlock(&pa->pa_lock);
4227 } 4163 }
4228 } 4164 }
4229 if (ac->alloc_semp)
4230 up_read(ac->alloc_semp);
4231 if (pa) { 4165 if (pa) {
4232 /* 4166 /*
4233 * We want to add the pa to the right bucket. 4167 * We want to add the pa to the right bucket.
4234 * Remove it from the list and while adding 4168 * Remove it from the list and while adding
4235 * make sure the list to which we are adding 4169 * make sure the list to which we are adding
4236 * doesn't grow big. We need to release 4170 * doesn't grow big.
4237 * alloc_semp before calling ext4_mb_add_n_trim()
4238 */ 4171 */
4239 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { 4172 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
4240 spin_lock(pa->pa_obj_lock); 4173 spin_lock(pa->pa_obj_lock);
@@ -4303,7 +4236,9 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4303 * there is enough free blocks to do block allocation 4236 * there is enough free blocks to do block allocation
4304 * and verify allocation doesn't exceed the quota limits. 4237 * and verify allocation doesn't exceed the quota limits.
4305 */ 4238 */
4306 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { 4239 while (ar->len &&
4240 ext4_claim_free_blocks(sbi, ar->len, ar->flags)) {
4241
4307 /* let others to free the space */ 4242 /* let others to free the space */
4308 yield(); 4243 yield();
4309 ar->len = ar->len >> 1; 4244 ar->len = ar->len >> 1;
@@ -4313,9 +4248,15 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4313 return 0; 4248 return 0;
4314 } 4249 }
4315 reserv_blks = ar->len; 4250 reserv_blks = ar->len;
4316 while (ar->len && dquot_alloc_block(ar->inode, ar->len)) { 4251 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
4317 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4252 dquot_alloc_block_nofail(ar->inode, ar->len);
4318 ar->len--; 4253 } else {
4254 while (ar->len &&
4255 dquot_alloc_block(ar->inode, ar->len)) {
4256
4257 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4258 ar->len--;
4259 }
4319 } 4260 }
4320 inquota = ar->len; 4261 inquota = ar->len;
4321 if (ar->len == 0) { 4262 if (ar->len == 0) {
@@ -4704,6 +4645,127 @@ error_return:
4704} 4645}
4705 4646
4706/** 4647/**
4648 * ext4_add_groupblocks() -- Add given blocks to an existing group
4649 * @handle: handle to this transaction
4650 * @sb: super block
4651 * @block: start physcial block to add to the block group
4652 * @count: number of blocks to free
4653 *
4654 * This marks the blocks as free in the bitmap and buddy.
4655 */
4656void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
4657 ext4_fsblk_t block, unsigned long count)
4658{
4659 struct buffer_head *bitmap_bh = NULL;
4660 struct buffer_head *gd_bh;
4661 ext4_group_t block_group;
4662 ext4_grpblk_t bit;
4663 unsigned int i;
4664 struct ext4_group_desc *desc;
4665 struct ext4_sb_info *sbi = EXT4_SB(sb);
4666 struct ext4_buddy e4b;
4667 int err = 0, ret, blk_free_count;
4668 ext4_grpblk_t blocks_freed;
4669 struct ext4_group_info *grp;
4670
4671 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4672
4673 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4674 grp = ext4_get_group_info(sb, block_group);
4675 /*
4676 * Check to see if we are freeing blocks across a group
4677 * boundary.
4678 */
4679 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb))
4680 goto error_return;
4681
4682 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4683 if (!bitmap_bh)
4684 goto error_return;
4685 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
4686 if (!desc)
4687 goto error_return;
4688
4689 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4690 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4691 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4692 in_range(block + count - 1, ext4_inode_table(sb, desc),
4693 sbi->s_itb_per_group)) {
4694 ext4_error(sb, "Adding blocks in system zones - "
4695 "Block = %llu, count = %lu",
4696 block, count);
4697 goto error_return;
4698 }
4699
4700 BUFFER_TRACE(bitmap_bh, "getting write access");
4701 err = ext4_journal_get_write_access(handle, bitmap_bh);
4702 if (err)
4703 goto error_return;
4704
4705 /*
4706 * We are about to modify some metadata. Call the journal APIs
4707 * to unshare ->b_data if a currently-committing transaction is
4708 * using it
4709 */
4710 BUFFER_TRACE(gd_bh, "get_write_access");
4711 err = ext4_journal_get_write_access(handle, gd_bh);
4712 if (err)
4713 goto error_return;
4714
4715 for (i = 0, blocks_freed = 0; i < count; i++) {
4716 BUFFER_TRACE(bitmap_bh, "clear bit");
4717 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
4718 ext4_error(sb, "bit already cleared for block %llu",
4719 (ext4_fsblk_t)(block + i));
4720 BUFFER_TRACE(bitmap_bh, "bit already cleared");
4721 } else {
4722 blocks_freed++;
4723 }
4724 }
4725
4726 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4727 if (err)
4728 goto error_return;
4729
4730 /*
4731 * need to update group_info->bb_free and bitmap
4732 * with group lock held. generate_buddy look at
4733 * them with group lock_held
4734 */
4735 ext4_lock_group(sb, block_group);
4736 mb_clear_bits(bitmap_bh->b_data, bit, count);
4737 mb_free_blocks(NULL, &e4b, bit, count);
4738 blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
4739 ext4_free_blks_set(sb, desc, blk_free_count);
4740 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
4741 ext4_unlock_group(sb, block_group);
4742 percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
4743
4744 if (sbi->s_log_groups_per_flex) {
4745 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4746 atomic_add(blocks_freed,
4747 &sbi->s_flex_groups[flex_group].free_blocks);
4748 }
4749
4750 ext4_mb_unload_buddy(&e4b);
4751
4752 /* We dirtied the bitmap block */
4753 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4754 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4755
4756 /* And the group descriptor block */
4757 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4758 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4759 if (!err)
4760 err = ret;
4761
4762error_return:
4763 brelse(bitmap_bh);
4764 ext4_std_error(sb, err);
4765 return;
4766}
4767
4768/**
4707 * ext4_trim_extent -- function to TRIM one single free extent in the group 4769 * ext4_trim_extent -- function to TRIM one single free extent in the group
4708 * @sb: super block for the file system 4770 * @sb: super block for the file system
4709 * @start: starting block of the free extent in the alloc. group 4771 * @start: starting block of the free extent in the alloc. group
@@ -4715,11 +4777,10 @@ error_return:
4715 * one will allocate those blocks, mark it as used in buddy bitmap. This must 4777 * one will allocate those blocks, mark it as used in buddy bitmap. This must
4716 * be called with under the group lock. 4778 * be called with under the group lock.
4717 */ 4779 */
4718static int ext4_trim_extent(struct super_block *sb, int start, int count, 4780static void ext4_trim_extent(struct super_block *sb, int start, int count,
4719 ext4_group_t group, struct ext4_buddy *e4b) 4781 ext4_group_t group, struct ext4_buddy *e4b)
4720{ 4782{
4721 struct ext4_free_extent ex; 4783 struct ext4_free_extent ex;
4722 int ret = 0;
4723 4784
4724 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 4785 assert_spin_locked(ext4_group_lock_ptr(sb, group));
4725 4786
@@ -4733,12 +4794,9 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
4733 */ 4794 */
4734 mb_mark_used(e4b, &ex); 4795 mb_mark_used(e4b, &ex);
4735 ext4_unlock_group(sb, group); 4796 ext4_unlock_group(sb, group);
4736 4797 ext4_issue_discard(sb, group, start, count);
4737 ret = ext4_issue_discard(sb, group, start, count);
4738
4739 ext4_lock_group(sb, group); 4798 ext4_lock_group(sb, group);
4740 mb_free_blocks(NULL, e4b, start, ex.fe_len); 4799 mb_free_blocks(NULL, e4b, start, ex.fe_len);
4741 return ret;
4742} 4800}
4743 4801
4744/** 4802/**
@@ -4760,21 +4818,26 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
4760 * the group buddy bitmap. This is done until whole group is scanned. 4818 * the group buddy bitmap. This is done until whole group is scanned.
4761 */ 4819 */
4762static ext4_grpblk_t 4820static ext4_grpblk_t
4763ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, 4821ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4764 ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) 4822 ext4_grpblk_t start, ext4_grpblk_t max,
4823 ext4_grpblk_t minblocks)
4765{ 4824{
4766 void *bitmap; 4825 void *bitmap;
4767 ext4_grpblk_t next, count = 0; 4826 ext4_grpblk_t next, count = 0;
4768 ext4_group_t group; 4827 struct ext4_buddy e4b;
4769 int ret = 0; 4828 int ret;
4770 4829
4771 BUG_ON(e4b == NULL); 4830 ret = ext4_mb_load_buddy(sb, group, &e4b);
4831 if (ret) {
4832 ext4_error(sb, "Error in loading buddy "
4833 "information for %u", group);
4834 return ret;
4835 }
4836 bitmap = e4b.bd_bitmap;
4772 4837
4773 bitmap = e4b->bd_bitmap;
4774 group = e4b->bd_group;
4775 start = (e4b->bd_info->bb_first_free > start) ?
4776 e4b->bd_info->bb_first_free : start;
4777 ext4_lock_group(sb, group); 4838 ext4_lock_group(sb, group);
4839 start = (e4b.bd_info->bb_first_free > start) ?
4840 e4b.bd_info->bb_first_free : start;
4778 4841
4779 while (start < max) { 4842 while (start < max) {
4780 start = mb_find_next_zero_bit(bitmap, max, start); 4843 start = mb_find_next_zero_bit(bitmap, max, start);
@@ -4783,10 +4846,8 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
4783 next = mb_find_next_bit(bitmap, max, start); 4846 next = mb_find_next_bit(bitmap, max, start);
4784 4847
4785 if ((next - start) >= minblocks) { 4848 if ((next - start) >= minblocks) {
4786 ret = ext4_trim_extent(sb, start, 4849 ext4_trim_extent(sb, start,
4787 next - start, group, e4b); 4850 next - start, group, &e4b);
4788 if (ret < 0)
4789 break;
4790 count += next - start; 4851 count += next - start;
4791 } 4852 }
4792 start = next + 1; 4853 start = next + 1;
@@ -4802,17 +4863,15 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
4802 ext4_lock_group(sb, group); 4863 ext4_lock_group(sb, group);
4803 } 4864 }
4804 4865
4805 if ((e4b->bd_info->bb_free - count) < minblocks) 4866 if ((e4b.bd_info->bb_free - count) < minblocks)
4806 break; 4867 break;
4807 } 4868 }
4808 ext4_unlock_group(sb, group); 4869 ext4_unlock_group(sb, group);
4870 ext4_mb_unload_buddy(&e4b);
4809 4871
4810 ext4_debug("trimmed %d blocks in the group %d\n", 4872 ext4_debug("trimmed %d blocks in the group %d\n",
4811 count, group); 4873 count, group);
4812 4874
4813 if (ret < 0)
4814 count = ret;
4815
4816 return count; 4875 return count;
4817} 4876}
4818 4877
@@ -4830,11 +4889,11 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
4830 */ 4889 */
4831int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 4890int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4832{ 4891{
4833 struct ext4_buddy e4b; 4892 struct ext4_group_info *grp;
4834 ext4_group_t first_group, last_group; 4893 ext4_group_t first_group, last_group;
4835 ext4_group_t group, ngroups = ext4_get_groups_count(sb); 4894 ext4_group_t group, ngroups = ext4_get_groups_count(sb);
4836 ext4_grpblk_t cnt = 0, first_block, last_block; 4895 ext4_grpblk_t cnt = 0, first_block, last_block;
4837 uint64_t start, len, minlen, trimmed; 4896 uint64_t start, len, minlen, trimmed = 0;
4838 ext4_fsblk_t first_data_blk = 4897 ext4_fsblk_t first_data_blk =
4839 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 4898 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
4840 int ret = 0; 4899 int ret = 0;
@@ -4842,7 +4901,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4842 start = range->start >> sb->s_blocksize_bits; 4901 start = range->start >> sb->s_blocksize_bits;
4843 len = range->len >> sb->s_blocksize_bits; 4902 len = range->len >> sb->s_blocksize_bits;
4844 minlen = range->minlen >> sb->s_blocksize_bits; 4903 minlen = range->minlen >> sb->s_blocksize_bits;
4845 trimmed = 0;
4846 4904
4847 if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb))) 4905 if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
4848 return -EINVAL; 4906 return -EINVAL;
@@ -4863,11 +4921,12 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4863 return -EINVAL; 4921 return -EINVAL;
4864 4922
4865 for (group = first_group; group <= last_group; group++) { 4923 for (group = first_group; group <= last_group; group++) {
4866 ret = ext4_mb_load_buddy(sb, group, &e4b); 4924 grp = ext4_get_group_info(sb, group);
4867 if (ret) { 4925 /* We only do this if the grp has never been initialized */
4868 ext4_error(sb, "Error in loading buddy " 4926 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
4869 "information for %u", group); 4927 ret = ext4_mb_init_group(sb, group);
4870 break; 4928 if (ret)
4929 break;
4871 } 4930 }
4872 4931
4873 /* 4932 /*
@@ -4880,16 +4939,14 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4880 last_block = first_block + len; 4939 last_block = first_block + len;
4881 len -= last_block - first_block; 4940 len -= last_block - first_block;
4882 4941
4883 if (e4b.bd_info->bb_free >= minlen) { 4942 if (grp->bb_free >= minlen) {
4884 cnt = ext4_trim_all_free(sb, &e4b, first_block, 4943 cnt = ext4_trim_all_free(sb, group, first_block,
4885 last_block, minlen); 4944 last_block, minlen);
4886 if (cnt < 0) { 4945 if (cnt < 0) {
4887 ret = cnt; 4946 ret = cnt;
4888 ext4_mb_unload_buddy(&e4b);
4889 break; 4947 break;
4890 } 4948 }
4891 } 4949 }
4892 ext4_mb_unload_buddy(&e4b);
4893 trimmed += cnt; 4950 trimmed += cnt;
4894 first_block = 0; 4951 first_block = 0;
4895 } 4952 }
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 22bd4d7f289b..20b5e7bfebd1 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -193,11 +193,6 @@ struct ext4_allocation_context {
193 __u8 ac_op; /* operation, for history only */ 193 __u8 ac_op; /* operation, for history only */
194 struct page *ac_bitmap_page; 194 struct page *ac_bitmap_page;
195 struct page *ac_buddy_page; 195 struct page *ac_buddy_page;
196 /*
197 * pointer to the held semaphore upon successful
198 * block allocation
199 */
200 struct rw_semaphore *alloc_semp;
201 struct ext4_prealloc_space *ac_pa; 196 struct ext4_prealloc_space *ac_pa;
202 struct ext4_locality_group *ac_lg; 197 struct ext4_locality_group *ac_lg;
203}; 198};
@@ -215,7 +210,6 @@ struct ext4_buddy {
215 struct super_block *bd_sb; 210 struct super_block *bd_sb;
216 __u16 bd_blkbits; 211 __u16 bd_blkbits;
217 ext4_group_t bd_group; 212 ext4_group_t bd_group;
218 struct rw_semaphore *alloc_semp;
219}; 213};
220#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) 214#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
221#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) 215#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 92816b4e0f16..b57b98fb44d1 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -376,7 +376,7 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
376 * We have the extent map build with the tmp inode. 376 * We have the extent map build with the tmp inode.
377 * Now copy the i_data across 377 * Now copy the i_data across
378 */ 378 */
379 ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS); 379 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
380 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data)); 380 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
381 381
382 /* 382 /*
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
new file mode 100644
index 000000000000..9bdef3f537c5
--- /dev/null
+++ b/fs/ext4/mmp.c
@@ -0,0 +1,351 @@
1#include <linux/fs.h>
2#include <linux/random.h>
3#include <linux/buffer_head.h>
4#include <linux/utsname.h>
5#include <linux/kthread.h>
6
7#include "ext4.h"
8
9/*
10 * Write the MMP block using WRITE_SYNC to try to get the block on-disk
11 * faster.
12 */
13static int write_mmp_block(struct buffer_head *bh)
14{
15 mark_buffer_dirty(bh);
16 lock_buffer(bh);
17 bh->b_end_io = end_buffer_write_sync;
18 get_bh(bh);
19 submit_bh(WRITE_SYNC, bh);
20 wait_on_buffer(bh);
21 if (unlikely(!buffer_uptodate(bh)))
22 return 1;
23
24 return 0;
25}
26
27/*
28 * Read the MMP block. It _must_ be read from disk and hence we clear the
29 * uptodate flag on the buffer.
30 */
31static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
32 ext4_fsblk_t mmp_block)
33{
34 struct mmp_struct *mmp;
35
36 if (*bh)
37 clear_buffer_uptodate(*bh);
38
39 /* This would be sb_bread(sb, mmp_block), except we need to be sure
40 * that the MD RAID device cache has been bypassed, and that the read
41 * is not blocked in the elevator. */
42 if (!*bh)
43 *bh = sb_getblk(sb, mmp_block);
44 if (*bh) {
45 get_bh(*bh);
46 lock_buffer(*bh);
47 (*bh)->b_end_io = end_buffer_read_sync;
48 submit_bh(READ_SYNC, *bh);
49 wait_on_buffer(*bh);
50 if (!buffer_uptodate(*bh)) {
51 brelse(*bh);
52 *bh = NULL;
53 }
54 }
55 if (!*bh) {
56 ext4_warning(sb, "Error while reading MMP block %llu",
57 mmp_block);
58 return -EIO;
59 }
60
61 mmp = (struct mmp_struct *)((*bh)->b_data);
62 if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
63 return -EINVAL;
64
65 return 0;
66}
67
68/*
69 * Dump as much information as possible to help the admin.
70 */
71void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
72 const char *function, unsigned int line, const char *msg)
73{
74 __ext4_warning(sb, function, line, msg);
75 __ext4_warning(sb, function, line,
76 "MMP failure info: last update time: %llu, last update "
77 "node: %s, last update device: %s\n",
78 (long long unsigned int) le64_to_cpu(mmp->mmp_time),
79 mmp->mmp_nodename, mmp->mmp_bdevname);
80}
81
82/*
83 * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
84 */
85static int kmmpd(void *data)
86{
87 struct super_block *sb = ((struct mmpd_data *) data)->sb;
88 struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
89 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
90 struct mmp_struct *mmp;
91 ext4_fsblk_t mmp_block;
92 u32 seq = 0;
93 unsigned long failed_writes = 0;
94 int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
95 unsigned mmp_check_interval;
96 unsigned long last_update_time;
97 unsigned long diff;
98 int retval;
99
100 mmp_block = le64_to_cpu(es->s_mmp_block);
101 mmp = (struct mmp_struct *)(bh->b_data);
102 mmp->mmp_time = cpu_to_le64(get_seconds());
103 /*
104 * Start with the higher mmp_check_interval and reduce it if
105 * the MMP block is being updated on time.
106 */
107 mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
108 EXT4_MMP_MIN_CHECK_INTERVAL);
109 mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
110 bdevname(bh->b_bdev, mmp->mmp_bdevname);
111
112 memcpy(mmp->mmp_nodename, init_utsname()->sysname,
113 sizeof(mmp->mmp_nodename));
114
115 while (!kthread_should_stop()) {
116 if (++seq > EXT4_MMP_SEQ_MAX)
117 seq = 1;
118
119 mmp->mmp_seq = cpu_to_le32(seq);
120 mmp->mmp_time = cpu_to_le64(get_seconds());
121 last_update_time = jiffies;
122
123 retval = write_mmp_block(bh);
124 /*
125 * Don't spew too many error messages. Print one every
126 * (s_mmp_update_interval * 60) seconds.
127 */
128 if (retval && (failed_writes % 60) == 0) {
129 ext4_error(sb, "Error writing to MMP block");
130 failed_writes++;
131 }
132
133 if (!(le32_to_cpu(es->s_feature_incompat) &
134 EXT4_FEATURE_INCOMPAT_MMP)) {
135 ext4_warning(sb, "kmmpd being stopped since MMP feature"
136 " has been disabled.");
137 EXT4_SB(sb)->s_mmp_tsk = NULL;
138 goto failed;
139 }
140
141 if (sb->s_flags & MS_RDONLY) {
142 ext4_warning(sb, "kmmpd being stopped since filesystem "
143 "has been remounted as readonly.");
144 EXT4_SB(sb)->s_mmp_tsk = NULL;
145 goto failed;
146 }
147
148 diff = jiffies - last_update_time;
149 if (diff < mmp_update_interval * HZ)
150 schedule_timeout_interruptible(mmp_update_interval *
151 HZ - diff);
152
153 /*
154 * We need to make sure that more than mmp_check_interval
155 * seconds have not passed since writing. If that has happened
156 * we need to check if the MMP block is as we left it.
157 */
158 diff = jiffies - last_update_time;
159 if (diff > mmp_check_interval * HZ) {
160 struct buffer_head *bh_check = NULL;
161 struct mmp_struct *mmp_check;
162
163 retval = read_mmp_block(sb, &bh_check, mmp_block);
164 if (retval) {
165 ext4_error(sb, "error reading MMP data: %d",
166 retval);
167
168 EXT4_SB(sb)->s_mmp_tsk = NULL;
169 goto failed;
170 }
171
172 mmp_check = (struct mmp_struct *)(bh_check->b_data);
173 if (mmp->mmp_seq != mmp_check->mmp_seq ||
174 memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
175 sizeof(mmp->mmp_nodename))) {
176 dump_mmp_msg(sb, mmp_check,
177 "Error while updating MMP info. "
178 "The filesystem seems to have been"
179 " multiply mounted.");
180 ext4_error(sb, "abort");
181 goto failed;
182 }
183 put_bh(bh_check);
184 }
185
186 /*
187 * Adjust the mmp_check_interval depending on how much time
188 * it took for the MMP block to be written.
189 */
190 mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
191 EXT4_MMP_MAX_CHECK_INTERVAL),
192 EXT4_MMP_MIN_CHECK_INTERVAL);
193 mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
194 }
195
196 /*
197 * Unmount seems to be clean.
198 */
199 mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
200 mmp->mmp_time = cpu_to_le64(get_seconds());
201
202 retval = write_mmp_block(bh);
203
204failed:
205 kfree(data);
206 brelse(bh);
207 return retval;
208}
209
210/*
211 * Get a random new sequence number but make sure it is not greater than
212 * EXT4_MMP_SEQ_MAX.
213 */
214static unsigned int mmp_new_seq(void)
215{
216 u32 new_seq;
217
218 do {
219 get_random_bytes(&new_seq, sizeof(u32));
220 } while (new_seq > EXT4_MMP_SEQ_MAX);
221
222 return new_seq;
223}
224
225/*
226 * Protect the filesystem from being mounted more than once.
227 */
228int ext4_multi_mount_protect(struct super_block *sb,
229 ext4_fsblk_t mmp_block)
230{
231 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
232 struct buffer_head *bh = NULL;
233 struct mmp_struct *mmp = NULL;
234 struct mmpd_data *mmpd_data;
235 u32 seq;
236 unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
237 unsigned int wait_time = 0;
238 int retval;
239
240 if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
241 mmp_block >= ext4_blocks_count(es)) {
242 ext4_warning(sb, "Invalid MMP block in superblock");
243 goto failed;
244 }
245
246 retval = read_mmp_block(sb, &bh, mmp_block);
247 if (retval)
248 goto failed;
249
250 mmp = (struct mmp_struct *)(bh->b_data);
251
252 if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
253 mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
254
255 /*
256 * If check_interval in MMP block is larger, use that instead of
257 * update_interval from the superblock.
258 */
259 if (mmp->mmp_check_interval > mmp_check_interval)
260 mmp_check_interval = mmp->mmp_check_interval;
261
262 seq = le32_to_cpu(mmp->mmp_seq);
263 if (seq == EXT4_MMP_SEQ_CLEAN)
264 goto skip;
265
266 if (seq == EXT4_MMP_SEQ_FSCK) {
267 dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
268 goto failed;
269 }
270
271 wait_time = min(mmp_check_interval * 2 + 1,
272 mmp_check_interval + 60);
273
274 /* Print MMP interval if more than 20 secs. */
275 if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
276 ext4_warning(sb, "MMP interval %u higher than expected, please"
277 " wait.\n", wait_time * 2);
278
279 if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
280 ext4_warning(sb, "MMP startup interrupted, failing mount\n");
281 goto failed;
282 }
283
284 retval = read_mmp_block(sb, &bh, mmp_block);
285 if (retval)
286 goto failed;
287 mmp = (struct mmp_struct *)(bh->b_data);
288 if (seq != le32_to_cpu(mmp->mmp_seq)) {
289 dump_mmp_msg(sb, mmp,
290 "Device is already active on another node.");
291 goto failed;
292 }
293
294skip:
295 /*
296 * write a new random sequence number.
297 */
298 mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
299
300 retval = write_mmp_block(bh);
301 if (retval)
302 goto failed;
303
304 /*
305 * wait for MMP interval and check mmp_seq.
306 */
307 if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
308 ext4_warning(sb, "MMP startup interrupted, failing mount\n");
309 goto failed;
310 }
311
312 retval = read_mmp_block(sb, &bh, mmp_block);
313 if (retval)
314 goto failed;
315 mmp = (struct mmp_struct *)(bh->b_data);
316 if (seq != le32_to_cpu(mmp->mmp_seq)) {
317 dump_mmp_msg(sb, mmp,
318 "Device is already active on another node.");
319 goto failed;
320 }
321
322 mmpd_data = kmalloc(sizeof(struct mmpd_data), GFP_KERNEL);
323 if (!mmpd_data) {
324 ext4_warning(sb, "not enough memory for mmpd_data");
325 goto failed;
326 }
327 mmpd_data->sb = sb;
328 mmpd_data->bh = bh;
329
330 /*
331 * Start a kernel thread to update the MMP block periodically.
332 */
333 EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
334 bdevname(bh->b_bdev,
335 mmp->mmp_bdevname));
336 if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
337 EXT4_SB(sb)->s_mmp_tsk = NULL;
338 kfree(mmpd_data);
339 ext4_warning(sb, "Unable to create kmmpd thread for %s.",
340 sb->s_id);
341 goto failed;
342 }
343
344 return 0;
345
346failed:
347 brelse(bh);
348 return 1;
349}
350
351
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index b9f3e7862f13..2b8304bf3c50 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -876,8 +876,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
876 * It needs to call wait_on_page_writeback() to wait for the 876 * It needs to call wait_on_page_writeback() to wait for the
877 * writeback of the page. 877 * writeback of the page.
878 */ 878 */
879 if (PageWriteback(page)) 879 wait_on_page_writeback(page);
880 wait_on_page_writeback(page);
881 880
882 /* Release old bh and drop refs */ 881 /* Release old bh and drop refs */
883 try_to_release_page(page, 0); 882 try_to_release_page(page, 0);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 67fd0b025858..b754b7721f51 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1413,10 +1413,22 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1413 frame->at = entries; 1413 frame->at = entries;
1414 frame->bh = bh; 1414 frame->bh = bh;
1415 bh = bh2; 1415 bh = bh2;
1416
1417 ext4_handle_dirty_metadata(handle, dir, frame->bh);
1418 ext4_handle_dirty_metadata(handle, dir, bh);
1419
1416 de = do_split(handle,dir, &bh, frame, &hinfo, &retval); 1420 de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
1417 dx_release (frames); 1421 if (!de) {
1418 if (!(de)) 1422 /*
1423 * Even if the block split failed, we have to properly write
1424 * out all the changes we did so far. Otherwise we can end up
1425 * with corrupted filesystem.
1426 */
1427 ext4_mark_inode_dirty(handle, dir);
1428 dx_release(frames);
1419 return retval; 1429 return retval;
1430 }
1431 dx_release(frames);
1420 1432
1421 retval = add_dirent_to_buf(handle, dentry, inode, de, bh); 1433 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1422 brelse(bh); 1434 brelse(bh);
@@ -2240,6 +2252,7 @@ static int ext4_symlink(struct inode *dir,
2240 handle_t *handle; 2252 handle_t *handle;
2241 struct inode *inode; 2253 struct inode *inode;
2242 int l, err, retries = 0; 2254 int l, err, retries = 0;
2255 int credits;
2243 2256
2244 l = strlen(symname)+1; 2257 l = strlen(symname)+1;
2245 if (l > dir->i_sb->s_blocksize) 2258 if (l > dir->i_sb->s_blocksize)
@@ -2247,10 +2260,26 @@ static int ext4_symlink(struct inode *dir,
2247 2260
2248 dquot_initialize(dir); 2261 dquot_initialize(dir);
2249 2262
2263 if (l > EXT4_N_BLOCKS * 4) {
2264 /*
2265 * For non-fast symlinks, we just allocate inode and put it on
2266 * orphan list in the first transaction => we need bitmap,
2267 * group descriptor, sb, inode block, quota blocks.
2268 */
2269 credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
2270 } else {
2271 /*
2272 * Fast symlink. We have to add entry to directory
2273 * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS),
2274 * allocate new inode (bitmap, group descriptor, inode block,
2275 * quota blocks, sb is already counted in previous macros).
2276 */
2277 credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2278 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2279 EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
2280 }
2250retry: 2281retry:
2251 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + 2282 handle = ext4_journal_start(dir, credits);
2252 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 +
2253 EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
2254 if (IS_ERR(handle)) 2283 if (IS_ERR(handle))
2255 return PTR_ERR(handle); 2284 return PTR_ERR(handle);
2256 2285
@@ -2263,21 +2292,44 @@ retry:
2263 if (IS_ERR(inode)) 2292 if (IS_ERR(inode))
2264 goto out_stop; 2293 goto out_stop;
2265 2294
2266 if (l > sizeof(EXT4_I(inode)->i_data)) { 2295 if (l > EXT4_N_BLOCKS * 4) {
2267 inode->i_op = &ext4_symlink_inode_operations; 2296 inode->i_op = &ext4_symlink_inode_operations;
2268 ext4_set_aops(inode); 2297 ext4_set_aops(inode);
2269 /* 2298 /*
2270 * page_symlink() calls into ext4_prepare/commit_write. 2299 * We cannot call page_symlink() with transaction started
2271 * We have a transaction open. All is sweetness. It also sets 2300 * because it calls into ext4_write_begin() which can wait
2272 * i_size in generic_commit_write(). 2301 * for transaction commit if we are running out of space
2302 * and thus we deadlock. So we have to stop transaction now
2303 * and restart it when symlink contents is written.
2304 *
2305 * To keep fs consistent in case of crash, we have to put inode
2306 * to orphan list in the mean time.
2273 */ 2307 */
2308 drop_nlink(inode);
2309 err = ext4_orphan_add(handle, inode);
2310 ext4_journal_stop(handle);
2311 if (err)
2312 goto err_drop_inode;
2274 err = __page_symlink(inode, symname, l, 1); 2313 err = __page_symlink(inode, symname, l, 1);
2314 if (err)
2315 goto err_drop_inode;
2316 /*
2317 * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
2318 * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
2319 */
2320 handle = ext4_journal_start(dir,
2321 EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
2322 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
2323 if (IS_ERR(handle)) {
2324 err = PTR_ERR(handle);
2325 goto err_drop_inode;
2326 }
2327 inc_nlink(inode);
2328 err = ext4_orphan_del(handle, inode);
2275 if (err) { 2329 if (err) {
2330 ext4_journal_stop(handle);
2276 clear_nlink(inode); 2331 clear_nlink(inode);
2277 unlock_new_inode(inode); 2332 goto err_drop_inode;
2278 ext4_mark_inode_dirty(handle, inode);
2279 iput(inode);
2280 goto out_stop;
2281 } 2333 }
2282 } else { 2334 } else {
2283 /* clear the extent format for fast symlink */ 2335 /* clear the extent format for fast symlink */
@@ -2293,6 +2345,10 @@ out_stop:
2293 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 2345 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
2294 goto retry; 2346 goto retry;
2295 return err; 2347 return err;
2348err_drop_inode:
2349 unlock_new_inode(inode);
2350 iput(inode);
2351 return err;
2296} 2352}
2297 2353
2298static int ext4_link(struct dentry *old_dentry, 2354static int ext4_link(struct dentry *old_dentry,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index b6dbd056fcb1..7bb8f76d470a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -203,46 +203,29 @@ static void ext4_end_bio(struct bio *bio, int error)
203 for (i = 0; i < io_end->num_io_pages; i++) { 203 for (i = 0; i < io_end->num_io_pages; i++) {
204 struct page *page = io_end->pages[i]->p_page; 204 struct page *page = io_end->pages[i]->p_page;
205 struct buffer_head *bh, *head; 205 struct buffer_head *bh, *head;
206 int partial_write = 0; 206 loff_t offset;
207 loff_t io_end_offset;
207 208
208 head = page_buffers(page); 209 if (error) {
209 if (error)
210 SetPageError(page); 210 SetPageError(page);
211 BUG_ON(!head); 211 set_bit(AS_EIO, &page->mapping->flags);
212 if (head->b_size != PAGE_CACHE_SIZE) { 212 head = page_buffers(page);
213 loff_t offset; 213 BUG_ON(!head);
214 loff_t io_end_offset = io_end->offset + io_end->size; 214
215 io_end_offset = io_end->offset + io_end->size;
215 216
216 offset = (sector_t) page->index << PAGE_CACHE_SHIFT; 217 offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
217 bh = head; 218 bh = head;
218 do { 219 do {
219 if ((offset >= io_end->offset) && 220 if ((offset >= io_end->offset) &&
220 (offset+bh->b_size <= io_end_offset)) { 221 (offset+bh->b_size <= io_end_offset))
221 if (error) 222 buffer_io_error(bh);
222 buffer_io_error(bh); 223
223
224 }
225 if (buffer_delay(bh))
226 partial_write = 1;
227 else if (!buffer_mapped(bh))
228 clear_buffer_dirty(bh);
229 else if (buffer_dirty(bh))
230 partial_write = 1;
231 offset += bh->b_size; 224 offset += bh->b_size;
232 bh = bh->b_this_page; 225 bh = bh->b_this_page;
233 } while (bh != head); 226 } while (bh != head);
234 } 227 }
235 228
236 /*
237 * If this is a partial write which happened to make
238 * all buffers uptodate then we can optimize away a
239 * bogus readpage() for the next read(). Here we
240 * 'discover' whether the page went uptodate as a
241 * result of this (potentially partial) write.
242 */
243 if (!partial_write)
244 SetPageUptodate(page);
245
246 put_io_page(io_end->pages[i]); 229 put_io_page(io_end->pages[i]);
247 } 230 }
248 io_end->num_io_pages = 0; 231 io_end->num_io_pages = 0;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8553dfb310af..cc5c157aa11d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -38,6 +38,7 @@
38#include <linux/ctype.h> 38#include <linux/ctype.h>
39#include <linux/log2.h> 39#include <linux/log2.h>
40#include <linux/crc16.h> 40#include <linux/crc16.h>
41#include <linux/cleancache.h>
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42 43
43#include <linux/kthread.h> 44#include <linux/kthread.h>
@@ -75,11 +76,27 @@ static void ext4_write_super(struct super_block *sb);
75static int ext4_freeze(struct super_block *sb); 76static int ext4_freeze(struct super_block *sb);
76static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, 77static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
77 const char *dev_name, void *data); 78 const char *dev_name, void *data);
79static inline int ext2_feature_set_ok(struct super_block *sb);
80static inline int ext3_feature_set_ok(struct super_block *sb);
78static int ext4_feature_set_ok(struct super_block *sb, int readonly); 81static int ext4_feature_set_ok(struct super_block *sb, int readonly);
79static void ext4_destroy_lazyinit_thread(void); 82static void ext4_destroy_lazyinit_thread(void);
80static void ext4_unregister_li_request(struct super_block *sb); 83static void ext4_unregister_li_request(struct super_block *sb);
81static void ext4_clear_request_list(void); 84static void ext4_clear_request_list(void);
82 85
86#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
87static struct file_system_type ext2_fs_type = {
88 .owner = THIS_MODULE,
89 .name = "ext2",
90 .mount = ext4_mount,
91 .kill_sb = kill_block_super,
92 .fs_flags = FS_REQUIRES_DEV,
93};
94#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
95#else
96#define IS_EXT2_SB(sb) (0)
97#endif
98
99
83#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) 100#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
84static struct file_system_type ext3_fs_type = { 101static struct file_system_type ext3_fs_type = {
85 .owner = THIS_MODULE, 102 .owner = THIS_MODULE,
@@ -806,6 +823,8 @@ static void ext4_put_super(struct super_block *sb)
806 invalidate_bdev(sbi->journal_bdev); 823 invalidate_bdev(sbi->journal_bdev);
807 ext4_blkdev_remove(sbi); 824 ext4_blkdev_remove(sbi);
808 } 825 }
826 if (sbi->s_mmp_tsk)
827 kthread_stop(sbi->s_mmp_tsk);
809 sb->s_fs_info = NULL; 828 sb->s_fs_info = NULL;
810 /* 829 /*
811 * Now that we are completely done shutting down the 830 * Now that we are completely done shutting down the
@@ -1096,7 +1115,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
1096 1115
1097 if (!test_opt(sb, INIT_INODE_TABLE)) 1116 if (!test_opt(sb, INIT_INODE_TABLE))
1098 seq_puts(seq, ",noinit_inode_table"); 1117 seq_puts(seq, ",noinit_inode_table");
1099 else if (sbi->s_li_wait_mult) 1118 else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
1100 seq_printf(seq, ",init_inode_table=%u", 1119 seq_printf(seq, ",init_inode_table=%u",
1101 (unsigned) sbi->s_li_wait_mult); 1120 (unsigned) sbi->s_li_wait_mult);
1102 1121
@@ -1187,9 +1206,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
1187 const char *data, size_t len, loff_t off); 1206 const char *data, size_t len, loff_t off);
1188 1207
1189static const struct dquot_operations ext4_quota_operations = { 1208static const struct dquot_operations ext4_quota_operations = {
1190#ifdef CONFIG_QUOTA
1191 .get_reserved_space = ext4_get_reserved_space, 1209 .get_reserved_space = ext4_get_reserved_space,
1192#endif
1193 .write_dquot = ext4_write_dquot, 1210 .write_dquot = ext4_write_dquot,
1194 .acquire_dquot = ext4_acquire_dquot, 1211 .acquire_dquot = ext4_acquire_dquot,
1195 .release_dquot = ext4_release_dquot, 1212 .release_dquot = ext4_release_dquot,
@@ -1900,7 +1917,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1900 ext4_msg(sb, KERN_WARNING, 1917 ext4_msg(sb, KERN_WARNING,
1901 "warning: mounting fs with errors, " 1918 "warning: mounting fs with errors, "
1902 "running e2fsck is recommended"); 1919 "running e2fsck is recommended");
1903 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 && 1920 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
1904 le16_to_cpu(es->s_mnt_count) >= 1921 le16_to_cpu(es->s_mnt_count) >=
1905 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) 1922 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
1906 ext4_msg(sb, KERN_WARNING, 1923 ext4_msg(sb, KERN_WARNING,
@@ -1932,6 +1949,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1932 EXT4_INODES_PER_GROUP(sb), 1949 EXT4_INODES_PER_GROUP(sb),
1933 sbi->s_mount_opt, sbi->s_mount_opt2); 1950 sbi->s_mount_opt, sbi->s_mount_opt2);
1934 1951
1952 cleancache_init_fs(sb);
1935 return res; 1953 return res;
1936} 1954}
1937 1955
@@ -2425,6 +2443,18 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a,
2425 EXT4_SB(sb)->s_sectors_written_start) >> 1))); 2443 EXT4_SB(sb)->s_sectors_written_start) >> 1)));
2426} 2444}
2427 2445
2446static ssize_t extent_cache_hits_show(struct ext4_attr *a,
2447 struct ext4_sb_info *sbi, char *buf)
2448{
2449 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits);
2450}
2451
2452static ssize_t extent_cache_misses_show(struct ext4_attr *a,
2453 struct ext4_sb_info *sbi, char *buf)
2454{
2455 return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses);
2456}
2457
2428static ssize_t inode_readahead_blks_store(struct ext4_attr *a, 2458static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
2429 struct ext4_sb_info *sbi, 2459 struct ext4_sb_info *sbi,
2430 const char *buf, size_t count) 2460 const char *buf, size_t count)
@@ -2482,6 +2512,8 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
2482EXT4_RO_ATTR(delayed_allocation_blocks); 2512EXT4_RO_ATTR(delayed_allocation_blocks);
2483EXT4_RO_ATTR(session_write_kbytes); 2513EXT4_RO_ATTR(session_write_kbytes);
2484EXT4_RO_ATTR(lifetime_write_kbytes); 2514EXT4_RO_ATTR(lifetime_write_kbytes);
2515EXT4_RO_ATTR(extent_cache_hits);
2516EXT4_RO_ATTR(extent_cache_misses);
2485EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show, 2517EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
2486 inode_readahead_blks_store, s_inode_readahead_blks); 2518 inode_readahead_blks_store, s_inode_readahead_blks);
2487EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); 2519EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
@@ -2497,6 +2529,8 @@ static struct attribute *ext4_attrs[] = {
2497 ATTR_LIST(delayed_allocation_blocks), 2529 ATTR_LIST(delayed_allocation_blocks),
2498 ATTR_LIST(session_write_kbytes), 2530 ATTR_LIST(session_write_kbytes),
2499 ATTR_LIST(lifetime_write_kbytes), 2531 ATTR_LIST(lifetime_write_kbytes),
2532 ATTR_LIST(extent_cache_hits),
2533 ATTR_LIST(extent_cache_misses),
2500 ATTR_LIST(inode_readahead_blks), 2534 ATTR_LIST(inode_readahead_blks),
2501 ATTR_LIST(inode_goal), 2535 ATTR_LIST(inode_goal),
2502 ATTR_LIST(mb_stats), 2536 ATTR_LIST(mb_stats),
@@ -2659,12 +2693,6 @@ static void print_daily_error_info(unsigned long arg)
2659 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ 2693 mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
2660} 2694}
2661 2695
2662static void ext4_lazyinode_timeout(unsigned long data)
2663{
2664 struct task_struct *p = (struct task_struct *)data;
2665 wake_up_process(p);
2666}
2667
2668/* Find next suitable group and run ext4_init_inode_table */ 2696/* Find next suitable group and run ext4_init_inode_table */
2669static int ext4_run_li_request(struct ext4_li_request *elr) 2697static int ext4_run_li_request(struct ext4_li_request *elr)
2670{ 2698{
@@ -2696,11 +2724,8 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
2696 ret = ext4_init_inode_table(sb, group, 2724 ret = ext4_init_inode_table(sb, group,
2697 elr->lr_timeout ? 0 : 1); 2725 elr->lr_timeout ? 0 : 1);
2698 if (elr->lr_timeout == 0) { 2726 if (elr->lr_timeout == 0) {
2699 timeout = jiffies - timeout; 2727 timeout = (jiffies - timeout) *
2700 if (elr->lr_sbi->s_li_wait_mult) 2728 elr->lr_sbi->s_li_wait_mult;
2701 timeout *= elr->lr_sbi->s_li_wait_mult;
2702 else
2703 timeout *= 20;
2704 elr->lr_timeout = timeout; 2729 elr->lr_timeout = timeout;
2705 } 2730 }
2706 elr->lr_next_sched = jiffies + elr->lr_timeout; 2731 elr->lr_next_sched = jiffies + elr->lr_timeout;
@@ -2712,7 +2737,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
2712 2737
2713/* 2738/*
2714 * Remove lr_request from the list_request and free the 2739 * Remove lr_request from the list_request and free the
2715 * request tructure. Should be called with li_list_mtx held 2740 * request structure. Should be called with li_list_mtx held
2716 */ 2741 */
2717static void ext4_remove_li_request(struct ext4_li_request *elr) 2742static void ext4_remove_li_request(struct ext4_li_request *elr)
2718{ 2743{
@@ -2730,14 +2755,16 @@ static void ext4_remove_li_request(struct ext4_li_request *elr)
2730 2755
2731static void ext4_unregister_li_request(struct super_block *sb) 2756static void ext4_unregister_li_request(struct super_block *sb)
2732{ 2757{
2733 struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request; 2758 mutex_lock(&ext4_li_mtx);
2734 2759 if (!ext4_li_info) {
2735 if (!ext4_li_info) 2760 mutex_unlock(&ext4_li_mtx);
2736 return; 2761 return;
2762 }
2737 2763
2738 mutex_lock(&ext4_li_info->li_list_mtx); 2764 mutex_lock(&ext4_li_info->li_list_mtx);
2739 ext4_remove_li_request(elr); 2765 ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
2740 mutex_unlock(&ext4_li_info->li_list_mtx); 2766 mutex_unlock(&ext4_li_info->li_list_mtx);
2767 mutex_unlock(&ext4_li_mtx);
2741} 2768}
2742 2769
2743static struct task_struct *ext4_lazyinit_task; 2770static struct task_struct *ext4_lazyinit_task;
@@ -2756,17 +2783,10 @@ static int ext4_lazyinit_thread(void *arg)
2756 struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; 2783 struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
2757 struct list_head *pos, *n; 2784 struct list_head *pos, *n;
2758 struct ext4_li_request *elr; 2785 struct ext4_li_request *elr;
2759 unsigned long next_wakeup; 2786 unsigned long next_wakeup, cur;
2760 DEFINE_WAIT(wait);
2761 2787
2762 BUG_ON(NULL == eli); 2788 BUG_ON(NULL == eli);
2763 2789
2764 eli->li_timer.data = (unsigned long)current;
2765 eli->li_timer.function = ext4_lazyinode_timeout;
2766
2767 eli->li_task = current;
2768 wake_up(&eli->li_wait_task);
2769
2770cont_thread: 2790cont_thread:
2771 while (true) { 2791 while (true) {
2772 next_wakeup = MAX_JIFFY_OFFSET; 2792 next_wakeup = MAX_JIFFY_OFFSET;
@@ -2797,19 +2817,15 @@ cont_thread:
2797 if (freezing(current)) 2817 if (freezing(current))
2798 refrigerator(); 2818 refrigerator();
2799 2819
2800 if ((time_after_eq(jiffies, next_wakeup)) || 2820 cur = jiffies;
2821 if ((time_after_eq(cur, next_wakeup)) ||
2801 (MAX_JIFFY_OFFSET == next_wakeup)) { 2822 (MAX_JIFFY_OFFSET == next_wakeup)) {
2802 cond_resched(); 2823 cond_resched();
2803 continue; 2824 continue;
2804 } 2825 }
2805 2826
2806 eli->li_timer.expires = next_wakeup; 2827 schedule_timeout_interruptible(next_wakeup - cur);
2807 add_timer(&eli->li_timer); 2828
2808 prepare_to_wait(&eli->li_wait_daemon, &wait,
2809 TASK_INTERRUPTIBLE);
2810 if (time_before(jiffies, next_wakeup))
2811 schedule();
2812 finish_wait(&eli->li_wait_daemon, &wait);
2813 if (kthread_should_stop()) { 2829 if (kthread_should_stop()) {
2814 ext4_clear_request_list(); 2830 ext4_clear_request_list();
2815 goto exit_thread; 2831 goto exit_thread;
@@ -2833,12 +2849,7 @@ exit_thread:
2833 goto cont_thread; 2849 goto cont_thread;
2834 } 2850 }
2835 mutex_unlock(&eli->li_list_mtx); 2851 mutex_unlock(&eli->li_list_mtx);
2836 del_timer_sync(&ext4_li_info->li_timer);
2837 eli->li_task = NULL;
2838 wake_up(&eli->li_wait_task);
2839
2840 kfree(ext4_li_info); 2852 kfree(ext4_li_info);
2841 ext4_lazyinit_task = NULL;
2842 ext4_li_info = NULL; 2853 ext4_li_info = NULL;
2843 mutex_unlock(&ext4_li_mtx); 2854 mutex_unlock(&ext4_li_mtx);
2844 2855
@@ -2866,7 +2877,6 @@ static int ext4_run_lazyinit_thread(void)
2866 if (IS_ERR(ext4_lazyinit_task)) { 2877 if (IS_ERR(ext4_lazyinit_task)) {
2867 int err = PTR_ERR(ext4_lazyinit_task); 2878 int err = PTR_ERR(ext4_lazyinit_task);
2868 ext4_clear_request_list(); 2879 ext4_clear_request_list();
2869 del_timer_sync(&ext4_li_info->li_timer);
2870 kfree(ext4_li_info); 2880 kfree(ext4_li_info);
2871 ext4_li_info = NULL; 2881 ext4_li_info = NULL;
2872 printk(KERN_CRIT "EXT4: error %d creating inode table " 2882 printk(KERN_CRIT "EXT4: error %d creating inode table "
@@ -2875,8 +2885,6 @@ static int ext4_run_lazyinit_thread(void)
2875 return err; 2885 return err;
2876 } 2886 }
2877 ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; 2887 ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
2878
2879 wait_event(ext4_li_info->li_wait_task, ext4_li_info->li_task != NULL);
2880 return 0; 2888 return 0;
2881} 2889}
2882 2890
@@ -2911,13 +2919,9 @@ static int ext4_li_info_new(void)
2911 if (!eli) 2919 if (!eli)
2912 return -ENOMEM; 2920 return -ENOMEM;
2913 2921
2914 eli->li_task = NULL;
2915 INIT_LIST_HEAD(&eli->li_request_list); 2922 INIT_LIST_HEAD(&eli->li_request_list);
2916 mutex_init(&eli->li_list_mtx); 2923 mutex_init(&eli->li_list_mtx);
2917 2924
2918 init_waitqueue_head(&eli->li_wait_daemon);
2919 init_waitqueue_head(&eli->li_wait_task);
2920 init_timer(&eli->li_timer);
2921 eli->li_state |= EXT4_LAZYINIT_QUIT; 2925 eli->li_state |= EXT4_LAZYINIT_QUIT;
2922 2926
2923 ext4_li_info = eli; 2927 ext4_li_info = eli;
@@ -2960,20 +2964,19 @@ static int ext4_register_li_request(struct super_block *sb,
2960 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 2964 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
2961 int ret = 0; 2965 int ret = 0;
2962 2966
2963 if (sbi->s_li_request != NULL) 2967 if (sbi->s_li_request != NULL) {
2968 /*
2969 * Reset timeout so it can be computed again, because
2970 * s_li_wait_mult might have changed.
2971 */
2972 sbi->s_li_request->lr_timeout = 0;
2964 return 0; 2973 return 0;
2974 }
2965 2975
2966 if (first_not_zeroed == ngroups || 2976 if (first_not_zeroed == ngroups ||
2967 (sb->s_flags & MS_RDONLY) || 2977 (sb->s_flags & MS_RDONLY) ||
2968 !test_opt(sb, INIT_INODE_TABLE)) { 2978 !test_opt(sb, INIT_INODE_TABLE))
2969 sbi->s_li_request = NULL;
2970 return 0; 2979 return 0;
2971 }
2972
2973 if (first_not_zeroed == ngroups) {
2974 sbi->s_li_request = NULL;
2975 return 0;
2976 }
2977 2980
2978 elr = ext4_li_request_new(sb, first_not_zeroed); 2981 elr = ext4_li_request_new(sb, first_not_zeroed);
2979 if (!elr) 2982 if (!elr)
@@ -3166,6 +3169,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3166 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) 3169 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3167 set_opt(sb, DELALLOC); 3170 set_opt(sb, DELALLOC);
3168 3171
3172 /*
3173 * set default s_li_wait_mult for lazyinit, for the case there is
3174 * no mount option specified.
3175 */
3176 sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
3177
3169 if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, 3178 if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
3170 &journal_devnum, &journal_ioprio, NULL, 0)) { 3179 &journal_devnum, &journal_ioprio, NULL, 0)) {
3171 ext4_msg(sb, KERN_WARNING, 3180 ext4_msg(sb, KERN_WARNING,
@@ -3187,6 +3196,28 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3187 "feature flags set on rev 0 fs, " 3196 "feature flags set on rev 0 fs, "
3188 "running e2fsck is recommended"); 3197 "running e2fsck is recommended");
3189 3198
3199 if (IS_EXT2_SB(sb)) {
3200 if (ext2_feature_set_ok(sb))
3201 ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
3202 "using the ext4 subsystem");
3203 else {
3204 ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
3205 "to feature incompatibilities");
3206 goto failed_mount;
3207 }
3208 }
3209
3210 if (IS_EXT3_SB(sb)) {
3211 if (ext3_feature_set_ok(sb))
3212 ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
3213 "using the ext4 subsystem");
3214 else {
3215 ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
3216 "to feature incompatibilities");
3217 goto failed_mount;
3218 }
3219 }
3220
3190 /* 3221 /*
3191 * Check feature flags regardless of the revision level, since we 3222 * Check feature flags regardless of the revision level, since we
3192 * previously didn't change the revision level when setting the flags, 3223 * previously didn't change the revision level when setting the flags,
@@ -3459,6 +3490,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3459 EXT4_HAS_INCOMPAT_FEATURE(sb, 3490 EXT4_HAS_INCOMPAT_FEATURE(sb,
3460 EXT4_FEATURE_INCOMPAT_RECOVER)); 3491 EXT4_FEATURE_INCOMPAT_RECOVER));
3461 3492
3493 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
3494 !(sb->s_flags & MS_RDONLY))
3495 if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
3496 goto failed_mount3;
3497
3462 /* 3498 /*
3463 * The first inode we look at is the journal inode. Don't try 3499 * The first inode we look at is the journal inode. Don't try
3464 * root first: it may be modified in the journal! 3500 * root first: it may be modified in the journal!
@@ -3474,7 +3510,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3474 goto failed_mount_wq; 3510 goto failed_mount_wq;
3475 } else { 3511 } else {
3476 clear_opt(sb, DATA_FLAGS); 3512 clear_opt(sb, DATA_FLAGS);
3477 set_opt(sb, WRITEBACK_DATA);
3478 sbi->s_journal = NULL; 3513 sbi->s_journal = NULL;
3479 needs_recovery = 0; 3514 needs_recovery = 0;
3480 goto no_journal; 3515 goto no_journal;
@@ -3707,6 +3742,8 @@ failed_mount3:
3707 percpu_counter_destroy(&sbi->s_freeinodes_counter); 3742 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3708 percpu_counter_destroy(&sbi->s_dirs_counter); 3743 percpu_counter_destroy(&sbi->s_dirs_counter);
3709 percpu_counter_destroy(&sbi->s_dirtyblocks_counter); 3744 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3745 if (sbi->s_mmp_tsk)
3746 kthread_stop(sbi->s_mmp_tsk);
3710failed_mount2: 3747failed_mount2:
3711 for (i = 0; i < db_count; i++) 3748 for (i = 0; i < db_count; i++)
3712 brelse(sbi->s_group_desc[i]); 3749 brelse(sbi->s_group_desc[i]);
@@ -4242,7 +4279,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4242 int enable_quota = 0; 4279 int enable_quota = 0;
4243 ext4_group_t g; 4280 ext4_group_t g;
4244 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 4281 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
4245 int err; 4282 int err = 0;
4246#ifdef CONFIG_QUOTA 4283#ifdef CONFIG_QUOTA
4247 int i; 4284 int i;
4248#endif 4285#endif
@@ -4368,6 +4405,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4368 goto restore_opts; 4405 goto restore_opts;
4369 if (!ext4_setup_super(sb, es, 0)) 4406 if (!ext4_setup_super(sb, es, 0))
4370 sb->s_flags &= ~MS_RDONLY; 4407 sb->s_flags &= ~MS_RDONLY;
4408 if (EXT4_HAS_INCOMPAT_FEATURE(sb,
4409 EXT4_FEATURE_INCOMPAT_MMP))
4410 if (ext4_multi_mount_protect(sb,
4411 le64_to_cpu(es->s_mmp_block))) {
4412 err = -EROFS;
4413 goto restore_opts;
4414 }
4371 enable_quota = 1; 4415 enable_quota = 1;
4372 } 4416 }
4373 } 4417 }
@@ -4432,6 +4476,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
4432 struct ext4_sb_info *sbi = EXT4_SB(sb); 4476 struct ext4_sb_info *sbi = EXT4_SB(sb);
4433 struct ext4_super_block *es = sbi->s_es; 4477 struct ext4_super_block *es = sbi->s_es;
4434 u64 fsid; 4478 u64 fsid;
4479 s64 bfree;
4435 4480
4436 if (test_opt(sb, MINIX_DF)) { 4481 if (test_opt(sb, MINIX_DF)) {
4437 sbi->s_overhead_last = 0; 4482 sbi->s_overhead_last = 0;
@@ -4475,8 +4520,10 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
4475 buf->f_type = EXT4_SUPER_MAGIC; 4520 buf->f_type = EXT4_SUPER_MAGIC;
4476 buf->f_bsize = sb->s_blocksize; 4521 buf->f_bsize = sb->s_blocksize;
4477 buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last; 4522 buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last;
4478 buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) - 4523 bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) -
4479 percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter); 4524 percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter);
4525 /* prevent underflow in case that few free space is available */
4526 buf->f_bfree = max_t(s64, bfree, 0);
4480 buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es); 4527 buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
4481 if (buf->f_bfree < ext4_r_blocks_count(es)) 4528 if (buf->f_bfree < ext4_r_blocks_count(es))
4482 buf->f_bavail = 0; 4529 buf->f_bavail = 0;
@@ -4652,6 +4699,9 @@ static int ext4_quota_off(struct super_block *sb, int type)
4652 if (test_opt(sb, DELALLOC)) 4699 if (test_opt(sb, DELALLOC))
4653 sync_filesystem(sb); 4700 sync_filesystem(sb);
4654 4701
4702 if (!inode)
4703 goto out;
4704
4655 /* Update modification times of quota files when userspace can 4705 /* Update modification times of quota files when userspace can
4656 * start looking at them */ 4706 * start looking at them */
4657 handle = ext4_journal_start(inode, 1); 4707 handle = ext4_journal_start(inode, 1);
@@ -4772,14 +4822,6 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
4772} 4822}
4773 4823
4774#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) 4824#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
4775static struct file_system_type ext2_fs_type = {
4776 .owner = THIS_MODULE,
4777 .name = "ext2",
4778 .mount = ext4_mount,
4779 .kill_sb = kill_block_super,
4780 .fs_flags = FS_REQUIRES_DEV,
4781};
4782
4783static inline void register_as_ext2(void) 4825static inline void register_as_ext2(void)
4784{ 4826{
4785 int err = register_filesystem(&ext2_fs_type); 4827 int err = register_filesystem(&ext2_fs_type);
@@ -4792,10 +4834,22 @@ static inline void unregister_as_ext2(void)
4792{ 4834{
4793 unregister_filesystem(&ext2_fs_type); 4835 unregister_filesystem(&ext2_fs_type);
4794} 4836}
4837
4838static inline int ext2_feature_set_ok(struct super_block *sb)
4839{
4840 if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP))
4841 return 0;
4842 if (sb->s_flags & MS_RDONLY)
4843 return 1;
4844 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))
4845 return 0;
4846 return 1;
4847}
4795MODULE_ALIAS("ext2"); 4848MODULE_ALIAS("ext2");
4796#else 4849#else
4797static inline void register_as_ext2(void) { } 4850static inline void register_as_ext2(void) { }
4798static inline void unregister_as_ext2(void) { } 4851static inline void unregister_as_ext2(void) { }
4852static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
4799#endif 4853#endif
4800 4854
4801#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) 4855#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
@@ -4811,10 +4865,24 @@ static inline void unregister_as_ext3(void)
4811{ 4865{
4812 unregister_filesystem(&ext3_fs_type); 4866 unregister_filesystem(&ext3_fs_type);
4813} 4867}
4868
4869static inline int ext3_feature_set_ok(struct super_block *sb)
4870{
4871 if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP))
4872 return 0;
4873 if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
4874 return 0;
4875 if (sb->s_flags & MS_RDONLY)
4876 return 1;
4877 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))
4878 return 0;
4879 return 1;
4880}
4814MODULE_ALIAS("ext3"); 4881MODULE_ALIAS("ext3");
4815#else 4882#else
4816static inline void register_as_ext3(void) { } 4883static inline void register_as_ext3(void) { }
4817static inline void unregister_as_ext3(void) { } 4884static inline void unregister_as_ext3(void) { }
4885static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; }
4818#endif 4886#endif
4819 4887
4820static struct file_system_type ext4_fs_type = { 4888static struct file_system_type ext4_fs_type = {
@@ -4898,8 +4966,8 @@ static int __init ext4_init_fs(void)
4898 err = init_inodecache(); 4966 err = init_inodecache();
4899 if (err) 4967 if (err)
4900 goto out1; 4968 goto out1;
4901 register_as_ext2();
4902 register_as_ext3(); 4969 register_as_ext3();
4970 register_as_ext2();
4903 err = register_filesystem(&ext4_fs_type); 4971 err = register_filesystem(&ext4_fs_type);
4904 if (err) 4972 if (err)
4905 goto out; 4973 goto out;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index b545ca1c459c..c757adc97250 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -820,8 +820,8 @@ inserted:
820 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 820 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
821 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 821 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
822 822
823 block = ext4_new_meta_blocks(handle, inode, 823 block = ext4_new_meta_blocks(handle, inode, goal, 0,
824 goal, NULL, &error); 824 NULL, &error);
825 if (error) 825 if (error)
826 goto cleanup; 826 goto cleanup;
827 827
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 3b222dafd15b..be15437c272e 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -326,6 +326,8 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
326 struct fat_slot_info sinfo; 326 struct fat_slot_info sinfo;
327 int err; 327 int err;
328 328
329 dentry_unhash(dentry);
330
329 lock_super(sb); 331 lock_super(sb);
330 /* 332 /*
331 * Check whether the directory is not in use, then check 333 * Check whether the directory is not in use, then check
@@ -457,6 +459,9 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
457 old_inode = old_dentry->d_inode; 459 old_inode = old_dentry->d_inode;
458 new_inode = new_dentry->d_inode; 460 new_inode = new_dentry->d_inode;
459 461
462 if (new_inode && S_ISDIR(new_inode->i_mode))
463 dentry_unhash(new_dentry);
464
460 err = fat_scan(old_dir, old_name, &old_sinfo); 465 err = fat_scan(old_dir, old_name, &old_sinfo);
461 if (err) { 466 if (err) {
462 err = -EIO; 467 err = -EIO;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 20b4ea53fdc4..c61a6789f36c 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -824,6 +824,8 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
824 struct fat_slot_info sinfo; 824 struct fat_slot_info sinfo;
825 int err; 825 int err;
826 826
827 dentry_unhash(dentry);
828
827 lock_super(sb); 829 lock_super(sb);
828 830
829 err = fat_dir_empty(inode); 831 err = fat_dir_empty(inode);
@@ -931,6 +933,9 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
931 int err, is_dir, update_dotdot, corrupt = 0; 933 int err, is_dir, update_dotdot, corrupt = 0;
932 struct super_block *sb = old_dir->i_sb; 934 struct super_block *sb = old_dir->i_sb;
933 935
936 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
937 dentry_unhash(new_dentry);
938
934 old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; 939 old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
935 old_inode = old_dentry->d_inode; 940 old_inode = old_dentry->d_inode;
936 new_inode = new_dentry->d_inode; 941 new_inode = new_dentry->d_inode;
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index 48a18f184d50..30afdfa7aec7 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -33,8 +33,6 @@ void fscache_enqueue_operation(struct fscache_operation *op)
33 _enter("{OBJ%x OP%x,%u}", 33 _enter("{OBJ%x OP%x,%u}",
34 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 34 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
35 35
36 fscache_set_op_state(op, "EnQ");
37
38 ASSERT(list_empty(&op->pend_link)); 36 ASSERT(list_empty(&op->pend_link));
39 ASSERT(op->processor != NULL); 37 ASSERT(op->processor != NULL);
40 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); 38 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
@@ -66,8 +64,6 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
66static void fscache_run_op(struct fscache_object *object, 64static void fscache_run_op(struct fscache_object *object,
67 struct fscache_operation *op) 65 struct fscache_operation *op)
68{ 66{
69 fscache_set_op_state(op, "Run");
70
71 object->n_in_progress++; 67 object->n_in_progress++;
72 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 68 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
73 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 69 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -88,8 +84,6 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
88 84
89 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); 85 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
90 86
91 fscache_set_op_state(op, "SubmitX");
92
93 spin_lock(&object->lock); 87 spin_lock(&object->lock);
94 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 88 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
95 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 89 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
@@ -194,8 +188,6 @@ int fscache_submit_op(struct fscache_object *object,
194 188
195 ASSERTCMP(atomic_read(&op->usage), >, 0); 189 ASSERTCMP(atomic_read(&op->usage), >, 0);
196 190
197 fscache_set_op_state(op, "Submit");
198
199 spin_lock(&object->lock); 191 spin_lock(&object->lock);
200 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 192 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
201 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 193 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
@@ -335,8 +327,6 @@ void fscache_put_operation(struct fscache_operation *op)
335 if (!atomic_dec_and_test(&op->usage)) 327 if (!atomic_dec_and_test(&op->usage))
336 return; 328 return;
337 329
338 fscache_set_op_state(op, "Put");
339
340 _debug("PUT OP"); 330 _debug("PUT OP");
341 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) 331 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
342 BUG(); 332 BUG();
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 41c441c2058d..a2a5d19ece6a 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -155,11 +155,9 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
155 fscache_stat(&fscache_n_attr_changed_calls); 155 fscache_stat(&fscache_n_attr_changed_calls);
156 156
157 if (fscache_object_is_active(object)) { 157 if (fscache_object_is_active(object)) {
158 fscache_set_op_state(op, "CallFS");
159 fscache_stat(&fscache_n_cop_attr_changed); 158 fscache_stat(&fscache_n_cop_attr_changed);
160 ret = object->cache->ops->attr_changed(object); 159 ret = object->cache->ops->attr_changed(object);
161 fscache_stat_d(&fscache_n_cop_attr_changed); 160 fscache_stat_d(&fscache_n_cop_attr_changed);
162 fscache_set_op_state(op, "Done");
163 if (ret < 0) 161 if (ret < 0)
164 fscache_abort_object(object); 162 fscache_abort_object(object);
165 } 163 }
@@ -190,7 +188,6 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
190 188
191 fscache_operation_init(op, fscache_attr_changed_op, NULL); 189 fscache_operation_init(op, fscache_attr_changed_op, NULL);
192 op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); 190 op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
193 fscache_set_op_name(op, "Attr");
194 191
195 spin_lock(&cookie->lock); 192 spin_lock(&cookie->lock);
196 193
@@ -257,7 +254,6 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
257 op->context = context; 254 op->context = context;
258 op->start_time = jiffies; 255 op->start_time = jiffies;
259 INIT_LIST_HEAD(&op->to_do); 256 INIT_LIST_HEAD(&op->to_do);
260 fscache_set_op_name(&op->op, "Retr");
261 return op; 257 return op;
262} 258}
263 259
@@ -368,7 +364,6 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
368 _leave(" = -ENOMEM"); 364 _leave(" = -ENOMEM");
369 return -ENOMEM; 365 return -ENOMEM;
370 } 366 }
371 fscache_set_op_name(&op->op, "RetrRA1");
372 367
373 spin_lock(&cookie->lock); 368 spin_lock(&cookie->lock);
374 369
@@ -487,7 +482,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
487 op = fscache_alloc_retrieval(mapping, end_io_func, context); 482 op = fscache_alloc_retrieval(mapping, end_io_func, context);
488 if (!op) 483 if (!op)
489 return -ENOMEM; 484 return -ENOMEM;
490 fscache_set_op_name(&op->op, "RetrRAN");
491 485
492 spin_lock(&cookie->lock); 486 spin_lock(&cookie->lock);
493 487
@@ -589,7 +583,6 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
589 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 583 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
590 if (!op) 584 if (!op)
591 return -ENOMEM; 585 return -ENOMEM;
592 fscache_set_op_name(&op->op, "RetrAL1");
593 586
594 spin_lock(&cookie->lock); 587 spin_lock(&cookie->lock);
595 588
@@ -662,8 +655,6 @@ static void fscache_write_op(struct fscache_operation *_op)
662 655
663 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 656 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
664 657
665 fscache_set_op_state(&op->op, "GetPage");
666
667 spin_lock(&object->lock); 658 spin_lock(&object->lock);
668 cookie = object->cookie; 659 cookie = object->cookie;
669 660
@@ -698,15 +689,12 @@ static void fscache_write_op(struct fscache_operation *_op)
698 spin_unlock(&cookie->stores_lock); 689 spin_unlock(&cookie->stores_lock);
699 spin_unlock(&object->lock); 690 spin_unlock(&object->lock);
700 691
701 fscache_set_op_state(&op->op, "Store");
702 fscache_stat(&fscache_n_store_pages); 692 fscache_stat(&fscache_n_store_pages);
703 fscache_stat(&fscache_n_cop_write_page); 693 fscache_stat(&fscache_n_cop_write_page);
704 ret = object->cache->ops->write_page(op, page); 694 ret = object->cache->ops->write_page(op, page);
705 fscache_stat_d(&fscache_n_cop_write_page); 695 fscache_stat_d(&fscache_n_cop_write_page);
706 fscache_set_op_state(&op->op, "EndWrite");
707 fscache_end_page_write(object, page); 696 fscache_end_page_write(object, page);
708 if (ret < 0) { 697 if (ret < 0) {
709 fscache_set_op_state(&op->op, "Abort");
710 fscache_abort_object(object); 698 fscache_abort_object(object);
711 } else { 699 } else {
712 fscache_enqueue_operation(&op->op); 700 fscache_enqueue_operation(&op->op);
@@ -778,7 +766,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
778 fscache_operation_init(&op->op, fscache_write_op, 766 fscache_operation_init(&op->op, fscache_write_op,
779 fscache_release_write_op); 767 fscache_release_write_op);
780 op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING); 768 op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
781 fscache_set_op_name(&op->op, "Write1");
782 769
783 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 770 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
784 if (ret < 0) 771 if (ret < 0)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b32eb29a4e6f..0d0e3faddcfa 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -667,6 +667,8 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
667 if (IS_ERR(req)) 667 if (IS_ERR(req))
668 return PTR_ERR(req); 668 return PTR_ERR(req);
669 669
670 dentry_unhash(entry);
671
670 req->in.h.opcode = FUSE_RMDIR; 672 req->in.h.opcode = FUSE_RMDIR;
671 req->in.h.nodeid = get_node_id(dir); 673 req->in.h.nodeid = get_node_id(dir);
672 req->in.numargs = 1; 674 req->in.numargs = 1;
@@ -691,6 +693,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
691 struct fuse_rename_in inarg; 693 struct fuse_rename_in inarg;
692 struct fuse_conn *fc = get_fuse_conn(olddir); 694 struct fuse_conn *fc = get_fuse_conn(olddir);
693 struct fuse_req *req = fuse_get_req(fc); 695 struct fuse_req *req = fuse_get_req(fc);
696
697 if (newent->d_inode && S_ISDIR(newent->d_inode->i_mode))
698 dentry_unhash(newent);
699
694 if (IS_ERR(req)) 700 if (IS_ERR(req))
695 return PTR_ERR(req); 701 return PTR_ERR(req);
696 702
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a2a6abbccc07..2792a790e50b 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1346,11 +1346,14 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1346} 1346}
1347 1347
1348 1348
1349static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1349static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1350 struct shrink_control *sc)
1350{ 1351{
1351 struct gfs2_glock *gl; 1352 struct gfs2_glock *gl;
1352 int may_demote; 1353 int may_demote;
1353 int nr_skipped = 0; 1354 int nr_skipped = 0;
1355 int nr = sc->nr_to_scan;
1356 gfp_t gfp_mask = sc->gfp_mask;
1354 LIST_HEAD(skipped); 1357 LIST_HEAD(skipped);
1355 1358
1356 if (nr == 0) 1359 if (nr == 0)
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index cfa327d33194..c2b34cd2abe0 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -146,7 +146,7 @@ static int __init init_gfs2_fs(void)
146 146
147 gfs2_register_debugfs(); 147 gfs2_register_debugfs();
148 148
149 printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__); 149 printk("GFS2 installed\n");
150 150
151 return 0; 151 return 0;
152 152
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index e23d9864c418..42e8d23bc047 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/mm.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
42#include <linux/completion.h> 43#include <linux/completion.h>
43#include <linux/buffer_head.h> 44#include <linux/buffer_head.h>
@@ -77,19 +78,20 @@ static LIST_HEAD(qd_lru_list);
77static atomic_t qd_lru_count = ATOMIC_INIT(0); 78static atomic_t qd_lru_count = ATOMIC_INIT(0);
78static DEFINE_SPINLOCK(qd_lru_lock); 79static DEFINE_SPINLOCK(qd_lru_lock);
79 80
80int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 81int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
81{ 82{
82 struct gfs2_quota_data *qd; 83 struct gfs2_quota_data *qd;
83 struct gfs2_sbd *sdp; 84 struct gfs2_sbd *sdp;
85 int nr_to_scan = sc->nr_to_scan;
84 86
85 if (nr == 0) 87 if (nr_to_scan == 0)
86 goto out; 88 goto out;
87 89
88 if (!(gfp_mask & __GFP_FS)) 90 if (!(sc->gfp_mask & __GFP_FS))
89 return -1; 91 return -1;
90 92
91 spin_lock(&qd_lru_lock); 93 spin_lock(&qd_lru_lock);
92 while (nr && !list_empty(&qd_lru_list)) { 94 while (nr_to_scan && !list_empty(&qd_lru_list)) {
93 qd = list_entry(qd_lru_list.next, 95 qd = list_entry(qd_lru_list.next,
94 struct gfs2_quota_data, qd_reclaim); 96 struct gfs2_quota_data, qd_reclaim);
95 sdp = qd->qd_gl->gl_sbd; 97 sdp = qd->qd_gl->gl_sbd;
@@ -110,7 +112,7 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
110 spin_unlock(&qd_lru_lock); 112 spin_unlock(&qd_lru_lock);
111 kmem_cache_free(gfs2_quotad_cachep, qd); 113 kmem_cache_free(gfs2_quotad_cachep, qd);
112 spin_lock(&qd_lru_lock); 114 spin_lock(&qd_lru_lock);
113 nr--; 115 nr_to_scan--;
114 } 116 }
115 spin_unlock(&qd_lru_lock); 117 spin_unlock(&qd_lru_lock);
116 118
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index e7d236ca48bd..90bf1c302a98 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -12,6 +12,7 @@
12 12
13struct gfs2_inode; 13struct gfs2_inode;
14struct gfs2_sbd; 14struct gfs2_sbd;
15struct shrink_control;
15 16
16#define NO_QUOTA_CHANGE ((u32)-1) 17#define NO_QUOTA_CHANGE ((u32)-1)
17 18
@@ -51,7 +52,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
51 return ret; 52 return ret;
52} 53}
53 54
54extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); 55extern int gfs2_shrink_qd_memory(struct shrinker *shrink,
56 struct shrink_control *sc);
55extern const struct quotactl_ops gfs2_quotactl_ops; 57extern const struct quotactl_ops gfs2_quotactl_ops;
56 58
57#endif /* __QUOTA_DOT_H__ */ 59#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index b4d70b13be92..1cb70cdba2c1 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -253,6 +253,9 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry)
253 struct inode *inode = dentry->d_inode; 253 struct inode *inode = dentry->d_inode;
254 int res; 254 int res;
255 255
256 if (S_ISDIR(inode->i_mode))
257 dentry_unhash(dentry);
258
256 if (S_ISDIR(inode->i_mode) && inode->i_size != 2) 259 if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
257 return -ENOTEMPTY; 260 return -ENOTEMPTY;
258 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 261 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
@@ -283,6 +286,9 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
283 286
284 /* Unlink destination if it already exists */ 287 /* Unlink destination if it already exists */
285 if (new_dentry->d_inode) { 288 if (new_dentry->d_inode) {
289 if (S_ISDIR(new_dentry->d_inode->i_mode))
290 dentry_unhash(new_dentry);
291
286 res = hfs_remove(new_dir, new_dentry); 292 res = hfs_remove(new_dir, new_dentry);
287 if (res) 293 if (res)
288 return res; 294 return res;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 4df5059c25da..b28835091dd0 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -370,6 +370,8 @@ static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
370 struct inode *inode = dentry->d_inode; 370 struct inode *inode = dentry->d_inode;
371 int res; 371 int res;
372 372
373 dentry_unhash(dentry);
374
373 if (inode->i_size != 2) 375 if (inode->i_size != 2)
374 return -ENOTEMPTY; 376 return -ENOTEMPTY;
375 377
@@ -467,10 +469,12 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
467 469
468 /* Unlink destination if it already exists */ 470 /* Unlink destination if it already exists */
469 if (new_dentry->d_inode) { 471 if (new_dentry->d_inode) {
470 if (S_ISDIR(new_dentry->d_inode->i_mode)) 472 if (S_ISDIR(new_dentry->d_inode->i_mode)) {
473 dentry_unhash(new_dentry);
471 res = hfsplus_rmdir(new_dir, new_dentry); 474 res = hfsplus_rmdir(new_dir, new_dentry);
472 else 475 } else {
473 res = hfsplus_unlink(new_dir, new_dentry); 476 res = hfsplus_unlink(new_dir, new_dentry);
477 }
474 if (res) 478 if (res)
475 return res; 479 return res;
476 } 480 }
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2638c834ed28..e6816b9e6903 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -683,6 +683,8 @@ int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
683 char *file; 683 char *file;
684 int err; 684 int err;
685 685
686 dentry_unhash(dentry);
687
686 if ((file = dentry_name(dentry)) == NULL) 688 if ((file = dentry_name(dentry)) == NULL)
687 return -ENOMEM; 689 return -ENOMEM;
688 err = do_rmdir(file); 690 err = do_rmdir(file);
@@ -736,6 +738,9 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from,
736 char *from_name, *to_name; 738 char *from_name, *to_name;
737 int err; 739 int err;
738 740
741 if (to->d_inode && S_ISDIR(to->d_inode->i_mode))
742 dentry_unhash(to);
743
739 if ((from_name = dentry_name(from)) == NULL) 744 if ((from_name = dentry_name(from)) == NULL)
740 return -ENOMEM; 745 return -ENOMEM;
741 if ((to_name = dentry_name(to)) == NULL) { 746 if ((to_name = dentry_name(to)) == NULL) {
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 1f05839c27a7..ff0ce21c0867 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -395,7 +395,6 @@ again:
395 395
396 dentry_unhash(dentry); 396 dentry_unhash(dentry);
397 if (!d_unhashed(dentry)) { 397 if (!d_unhashed(dentry)) {
398 dput(dentry);
399 hpfs_unlock(dir->i_sb); 398 hpfs_unlock(dir->i_sb);
400 return -ENOSPC; 399 return -ENOSPC;
401 } 400 }
@@ -403,7 +402,6 @@ again:
403 !S_ISREG(inode->i_mode) || 402 !S_ISREG(inode->i_mode) ||
404 get_write_access(inode)) { 403 get_write_access(inode)) {
405 d_rehash(dentry); 404 d_rehash(dentry);
406 dput(dentry);
407 } else { 405 } else {
408 struct iattr newattrs; 406 struct iattr newattrs;
409 /*printk("HPFS: truncating file before delete.\n");*/ 407 /*printk("HPFS: truncating file before delete.\n");*/
@@ -411,7 +409,6 @@ again:
411 newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; 409 newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
412 err = notify_change(dentry, &newattrs); 410 err = notify_change(dentry, &newattrs);
413 put_write_access(inode); 411 put_write_access(inode);
414 dput(dentry);
415 if (!err) 412 if (!err)
416 goto again; 413 goto again;
417 } 414 }
@@ -442,6 +439,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
442 int err; 439 int err;
443 int r; 440 int r;
444 441
442 dentry_unhash(dentry);
443
445 hpfs_adjust_length(name, &len); 444 hpfs_adjust_length(name, &len);
446 hpfs_lock(dir->i_sb); 445 hpfs_lock(dir->i_sb);
447 err = -ENOENT; 446 err = -ENOENT;
@@ -535,6 +534,10 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
535 struct buffer_head *bh; 534 struct buffer_head *bh;
536 struct fnode *fnode; 535 struct fnode *fnode;
537 int err; 536 int err;
537
538 if (new_inode && S_ISDIR(new_inode->i_mode))
539 dentry_unhash(new_dentry);
540
538 if ((err = hpfs_chk_name(new_name, &new_len))) return err; 541 if ((err = hpfs_chk_name(new_name, &new_len))) return err;
539 err = 0; 542 err = 0;
540 hpfs_adjust_length(old_name, &old_len); 543 hpfs_adjust_length(old_name, &old_len);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index b9eeb1cd03ff..7aafeb8fa300 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -412,10 +412,10 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
412 pgoff = offset >> PAGE_SHIFT; 412 pgoff = offset >> PAGE_SHIFT;
413 413
414 i_size_write(inode, offset); 414 i_size_write(inode, offset);
415 spin_lock(&mapping->i_mmap_lock); 415 mutex_lock(&mapping->i_mmap_mutex);
416 if (!prio_tree_empty(&mapping->i_mmap)) 416 if (!prio_tree_empty(&mapping->i_mmap))
417 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); 417 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
418 spin_unlock(&mapping->i_mmap_lock); 418 mutex_unlock(&mapping->i_mmap_mutex);
419 truncate_hugepages(inode, offset); 419 truncate_hugepages(inode, offset);
420 return 0; 420 return 0;
421} 421}
@@ -921,7 +921,8 @@ static int can_do_hugetlb_shm(void)
921 return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); 921 return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
922} 922}
923 923
924struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, 924struct file *hugetlb_file_setup(const char *name, size_t size,
925 vm_flags_t acctflag,
925 struct user_struct **user, int creat_flags) 926 struct user_struct **user, int creat_flags)
926{ 927{
927 int error = -ENOMEM; 928 int error = -ENOMEM;
diff --git a/fs/inode.c b/fs/inode.c
index 05f4fa521325..990d284877a1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -326,12 +326,11 @@ void address_space_init_once(struct address_space *mapping)
326 memset(mapping, 0, sizeof(*mapping)); 326 memset(mapping, 0, sizeof(*mapping));
327 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); 327 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
328 spin_lock_init(&mapping->tree_lock); 328 spin_lock_init(&mapping->tree_lock);
329 spin_lock_init(&mapping->i_mmap_lock); 329 mutex_init(&mapping->i_mmap_mutex);
330 INIT_LIST_HEAD(&mapping->private_list); 330 INIT_LIST_HEAD(&mapping->private_list);
331 spin_lock_init(&mapping->private_lock); 331 spin_lock_init(&mapping->private_lock);
332 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); 332 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
333 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); 333 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
334 mutex_init(&mapping->unmap_mutex);
335} 334}
336EXPORT_SYMBOL(address_space_init_once); 335EXPORT_SYMBOL(address_space_init_once);
337 336
@@ -752,8 +751,12 @@ static void prune_icache(int nr_to_scan)
752 * This function is passed the number of inodes to scan, and it returns the 751 * This function is passed the number of inodes to scan, and it returns the
753 * total number of remaining possibly-reclaimable inodes. 752 * total number of remaining possibly-reclaimable inodes.
754 */ 753 */
755static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 754static int shrink_icache_memory(struct shrinker *shrink,
755 struct shrink_control *sc)
756{ 756{
757 int nr = sc->nr_to_scan;
758 gfp_t gfp_mask = sc->gfp_mask;
759
757 if (nr) { 760 if (nr) {
758 /* 761 /*
759 * Nasty deadlock avoidance. We may hold various FS locks, 762 * Nasty deadlock avoidance. We may hold various FS locks,
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 29148a81c783..7f21cf3aaf92 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -219,7 +219,6 @@ static int journal_submit_data_buffers(journal_t *journal,
219 ret = err; 219 ret = err;
220 spin_lock(&journal->j_list_lock); 220 spin_lock(&journal->j_list_lock);
221 J_ASSERT(jinode->i_transaction == commit_transaction); 221 J_ASSERT(jinode->i_transaction == commit_transaction);
222 commit_transaction->t_flushed_data_blocks = 1;
223 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags); 222 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
224 smp_mb__after_clear_bit(); 223 smp_mb__after_clear_bit();
225 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); 224 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
@@ -672,12 +671,16 @@ start_journal_io:
672 err = 0; 671 err = 0;
673 } 672 }
674 673
674 write_lock(&journal->j_state_lock);
675 J_ASSERT(commit_transaction->t_state == T_COMMIT);
676 commit_transaction->t_state = T_COMMIT_DFLUSH;
677 write_unlock(&journal->j_state_lock);
675 /* 678 /*
676 * If the journal is not located on the file system device, 679 * If the journal is not located on the file system device,
677 * then we must flush the file system device before we issue 680 * then we must flush the file system device before we issue
678 * the commit record 681 * the commit record
679 */ 682 */
680 if (commit_transaction->t_flushed_data_blocks && 683 if (commit_transaction->t_need_data_flush &&
681 (journal->j_fs_dev != journal->j_dev) && 684 (journal->j_fs_dev != journal->j_dev) &&
682 (journal->j_flags & JBD2_BARRIER)) 685 (journal->j_flags & JBD2_BARRIER))
683 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); 686 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
@@ -754,8 +757,13 @@ wait_for_iobuf:
754 required. */ 757 required. */
755 JBUFFER_TRACE(jh, "file as BJ_Forget"); 758 JBUFFER_TRACE(jh, "file as BJ_Forget");
756 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget); 759 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
757 /* Wake up any transactions which were waiting for this 760 /*
758 IO to complete */ 761 * Wake up any transactions which were waiting for this IO to
762 * complete. The barrier must be here so that changes by
763 * jbd2_journal_file_buffer() take effect before wake_up_bit()
764 * does the waitqueue check.
765 */
766 smp_mb();
759 wake_up_bit(&bh->b_state, BH_Unshadow); 767 wake_up_bit(&bh->b_state, BH_Unshadow);
760 JBUFFER_TRACE(jh, "brelse shadowed buffer"); 768 JBUFFER_TRACE(jh, "brelse shadowed buffer");
761 __brelse(bh); 769 __brelse(bh);
@@ -794,6 +802,10 @@ wait_for_iobuf:
794 jbd2_journal_abort(journal, err); 802 jbd2_journal_abort(journal, err);
795 803
796 jbd_debug(3, "JBD: commit phase 5\n"); 804 jbd_debug(3, "JBD: commit phase 5\n");
805 write_lock(&journal->j_state_lock);
806 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
807 commit_transaction->t_state = T_COMMIT_JFLUSH;
808 write_unlock(&journal->j_state_lock);
797 809
798 if (!JBD2_HAS_INCOMPAT_FEATURE(journal, 810 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
799 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { 811 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
@@ -949,7 +961,7 @@ restart_loop:
949 961
950 jbd_debug(3, "JBD: commit phase 7\n"); 962 jbd_debug(3, "JBD: commit phase 7\n");
951 963
952 J_ASSERT(commit_transaction->t_state == T_COMMIT); 964 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
953 965
954 commit_transaction->t_start = jiffies; 966 commit_transaction->t_start = jiffies;
955 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging, 967 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e0ec3db1c395..9a7826990304 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -479,9 +479,12 @@ int __jbd2_log_space_left(journal_t *journal)
479int __jbd2_log_start_commit(journal_t *journal, tid_t target) 479int __jbd2_log_start_commit(journal_t *journal, tid_t target)
480{ 480{
481 /* 481 /*
482 * Are we already doing a recent enough commit? 482 * The only transaction we can possibly wait upon is the
483 * currently running transaction (if it exists). Otherwise,
484 * the target tid must be an old one.
483 */ 485 */
484 if (!tid_geq(journal->j_commit_request, target)) { 486 if (journal->j_running_transaction &&
487 journal->j_running_transaction->t_tid == target) {
485 /* 488 /*
486 * We want a new commit: OK, mark the request and wakeup the 489 * We want a new commit: OK, mark the request and wakeup the
487 * commit thread. We do _not_ do the commit ourselves. 490 * commit thread. We do _not_ do the commit ourselves.
@@ -493,7 +496,15 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
493 journal->j_commit_sequence); 496 journal->j_commit_sequence);
494 wake_up(&journal->j_wait_commit); 497 wake_up(&journal->j_wait_commit);
495 return 1; 498 return 1;
496 } 499 } else if (!tid_geq(journal->j_commit_request, target))
500 /* This should never happen, but if it does, preserve
501 the evidence before kjournald goes into a loop and
502 increments j_commit_sequence beyond all recognition. */
503 WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
504 journal->j_commit_request,
505 journal->j_commit_sequence,
506 target, journal->j_running_transaction ?
507 journal->j_running_transaction->t_tid : 0);
497 return 0; 508 return 0;
498} 509}
499 510
@@ -577,6 +588,47 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
577} 588}
578 589
579/* 590/*
591 * Return 1 if a given transaction has not yet sent barrier request
592 * connected with a transaction commit. If 0 is returned, transaction
593 * may or may not have sent the barrier. Used to avoid sending barrier
594 * twice in common cases.
595 */
596int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
597{
598 int ret = 0;
599 transaction_t *commit_trans;
600
601 if (!(journal->j_flags & JBD2_BARRIER))
602 return 0;
603 read_lock(&journal->j_state_lock);
604 /* Transaction already committed? */
605 if (tid_geq(journal->j_commit_sequence, tid))
606 goto out;
607 commit_trans = journal->j_committing_transaction;
608 if (!commit_trans || commit_trans->t_tid != tid) {
609 ret = 1;
610 goto out;
611 }
612 /*
613 * Transaction is being committed and we already proceeded to
614 * submitting a flush to fs partition?
615 */
616 if (journal->j_fs_dev != journal->j_dev) {
617 if (!commit_trans->t_need_data_flush ||
618 commit_trans->t_state >= T_COMMIT_DFLUSH)
619 goto out;
620 } else {
621 if (commit_trans->t_state >= T_COMMIT_JFLUSH)
622 goto out;
623 }
624 ret = 1;
625out:
626 read_unlock(&journal->j_state_lock);
627 return ret;
628}
629EXPORT_SYMBOL(jbd2_trans_will_send_data_barrier);
630
631/*
580 * Wait for a specified commit to complete. 632 * Wait for a specified commit to complete.
581 * The caller may not hold the journal lock. 633 * The caller may not hold the journal lock.
582 */ 634 */
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 05fa77a23711..3eec82d32fd4 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -82,7 +82,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
82 */ 82 */
83 83
84/* 84/*
85 * Update transiaction's maximum wait time, if debugging is enabled. 85 * Update transaction's maximum wait time, if debugging is enabled.
86 * 86 *
87 * In order for t_max_wait to be reliable, it must be protected by a 87 * In order for t_max_wait to be reliable, it must be protected by a
88 * lock. But doing so will mean that start_this_handle() can not be 88 * lock. But doing so will mean that start_this_handle() can not be
@@ -91,11 +91,10 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
91 * means that maximum wait time reported by the jbd2_run_stats 91 * means that maximum wait time reported by the jbd2_run_stats
92 * tracepoint will always be zero. 92 * tracepoint will always be zero.
93 */ 93 */
94static inline void update_t_max_wait(transaction_t *transaction) 94static inline void update_t_max_wait(transaction_t *transaction,
95 unsigned long ts)
95{ 96{
96#ifdef CONFIG_JBD2_DEBUG 97#ifdef CONFIG_JBD2_DEBUG
97 unsigned long ts = jiffies;
98
99 if (jbd2_journal_enable_debug && 98 if (jbd2_journal_enable_debug &&
100 time_after(transaction->t_start, ts)) { 99 time_after(transaction->t_start, ts)) {
101 ts = jbd2_time_diff(ts, transaction->t_start); 100 ts = jbd2_time_diff(ts, transaction->t_start);
@@ -121,6 +120,7 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
121 tid_t tid; 120 tid_t tid;
122 int needed, need_to_start; 121 int needed, need_to_start;
123 int nblocks = handle->h_buffer_credits; 122 int nblocks = handle->h_buffer_credits;
123 unsigned long ts = jiffies;
124 124
125 if (nblocks > journal->j_max_transaction_buffers) { 125 if (nblocks > journal->j_max_transaction_buffers) {
126 printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", 126 printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
@@ -271,7 +271,7 @@ repeat:
271 /* OK, account for the buffers that this operation expects to 271 /* OK, account for the buffers that this operation expects to
272 * use and add the handle to the running transaction. 272 * use and add the handle to the running transaction.
273 */ 273 */
274 update_t_max_wait(transaction); 274 update_t_max_wait(transaction, ts);
275 handle->h_transaction = transaction; 275 handle->h_transaction = transaction;
276 atomic_inc(&transaction->t_updates); 276 atomic_inc(&transaction->t_updates);
277 atomic_inc(&transaction->t_handle_count); 277 atomic_inc(&transaction->t_handle_count);
@@ -316,7 +316,8 @@ static handle_t *new_handle(int nblocks)
316 * This function is visible to journal users (like ext3fs), so is not 316 * This function is visible to journal users (like ext3fs), so is not
317 * called with the journal already locked. 317 * called with the journal already locked.
318 * 318 *
319 * Return a pointer to a newly allocated handle, or NULL on failure 319 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
320 * on failure.
320 */ 321 */
321handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask) 322handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask)
322{ 323{
@@ -921,8 +922,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
921 */ 922 */
922 JBUFFER_TRACE(jh, "cancelling revoke"); 923 JBUFFER_TRACE(jh, "cancelling revoke");
923 jbd2_journal_cancel_revoke(handle, jh); 924 jbd2_journal_cancel_revoke(handle, jh);
924 jbd2_journal_put_journal_head(jh);
925out: 925out:
926 jbd2_journal_put_journal_head(jh);
926 return err; 927 return err;
927} 928}
928 929
@@ -2147,6 +2148,13 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
2147 jinode->i_next_transaction == transaction) 2148 jinode->i_next_transaction == transaction)
2148 goto done; 2149 goto done;
2149 2150
2151 /*
2152 * We only ever set this variable to 1 so the test is safe. Since
2153 * t_need_data_flush is likely to be set, we do the test to save some
2154 * cacheline bouncing
2155 */
2156 if (!transaction->t_need_data_flush)
2157 transaction->t_need_data_flush = 1;
2150 /* On some different transaction's list - should be 2158 /* On some different transaction's list - should be
2151 * the committing one */ 2159 * the committing one */
2152 if (jinode->i_transaction) { 2160 if (jinode->i_transaction) {
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 82faddd1f321..05f73328b28b 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -609,6 +609,8 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
609 int ret; 609 int ret;
610 uint32_t now = get_seconds(); 610 uint32_t now = get_seconds();
611 611
612 dentry_unhash(dentry);
613
612 for (fd = f->dents ; fd; fd = fd->next) { 614 for (fd = f->dents ; fd; fd = fd->next) {
613 if (fd->ino) 615 if (fd->ino)
614 return -ENOTEMPTY; 616 return -ENOTEMPTY;
@@ -784,6 +786,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
784 uint8_t type; 786 uint8_t type;
785 uint32_t now; 787 uint32_t now;
786 788
789 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
790 dentry_unhash(new_dentry);
791
787 /* The VFS will check for us and prevent trying to rename a 792 /* The VFS will check for us and prevent trying to rename a
788 * file over a directory and vice versa, but if it's a directory, 793 * file over a directory and vice versa, but if it's a directory,
789 * the VFS can't check whether the victim is empty. The filesystem 794 * the VFS can't check whether the victim is empty. The filesystem
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index eaaf2b511e89..865df16a6cf3 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -360,6 +360,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
360 360
361 jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); 361 jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
362 362
363 dentry_unhash(dentry);
364
363 /* Init inode for quota operations. */ 365 /* Init inode for quota operations. */
364 dquot_initialize(dip); 366 dquot_initialize(dip);
365 dquot_initialize(ip); 367 dquot_initialize(ip);
@@ -1095,6 +1097,9 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1095 jfs_info("jfs_rename: %s %s", old_dentry->d_name.name, 1097 jfs_info("jfs_rename: %s %s", old_dentry->d_name.name,
1096 new_dentry->d_name.name); 1098 new_dentry->d_name.name);
1097 1099
1100 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
1101 dentry_unhash(new_dentry);
1102
1098 dquot_initialize(old_dir); 1103 dquot_initialize(old_dir);
1099 dquot_initialize(new_dir); 1104 dquot_initialize(new_dir);
1100 1105
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 9ed89d1663f8..f34c9cde9e94 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -273,6 +273,8 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
273{ 273{
274 struct inode *inode = dentry->d_inode; 274 struct inode *inode = dentry->d_inode;
275 275
276 dentry_unhash(dentry);
277
276 if (!logfs_empty_dir(inode)) 278 if (!logfs_empty_dir(inode))
277 return -ENOTEMPTY; 279 return -ENOTEMPTY;
278 280
@@ -622,6 +624,9 @@ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry,
622 loff_t pos; 624 loff_t pos;
623 int err; 625 int err;
624 626
627 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
628 dentry_unhash(new_dentry);
629
625 /* 1. locate source dd */ 630 /* 1. locate source dd */
626 err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); 631 err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
627 if (err) 632 if (err)
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 2f174be06555..8c32ef3ba88e 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -90,7 +90,8 @@ static DEFINE_SPINLOCK(mb_cache_spinlock);
90 * What the mbcache registers as to get shrunk dynamically. 90 * What the mbcache registers as to get shrunk dynamically.
91 */ 91 */
92 92
93static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); 93static int mb_cache_shrink_fn(struct shrinker *shrink,
94 struct shrink_control *sc);
94 95
95static struct shrinker mb_cache_shrinker = { 96static struct shrinker mb_cache_shrinker = {
96 .shrink = mb_cache_shrink_fn, 97 .shrink = mb_cache_shrink_fn,
@@ -156,18 +157,19 @@ forget:
156 * gets low. 157 * gets low.
157 * 158 *
158 * @shrink: (ignored) 159 * @shrink: (ignored)
159 * @nr_to_scan: Number of objects to scan 160 * @sc: shrink_control passed from reclaim
160 * @gfp_mask: (ignored)
161 * 161 *
162 * Returns the number of objects which are present in the cache. 162 * Returns the number of objects which are present in the cache.
163 */ 163 */
164static int 164static int
165mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 165mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
166{ 166{
167 LIST_HEAD(free_list); 167 LIST_HEAD(free_list);
168 struct mb_cache *cache; 168 struct mb_cache *cache;
169 struct mb_cache_entry *entry, *tmp; 169 struct mb_cache_entry *entry, *tmp;
170 int count = 0; 170 int count = 0;
171 int nr_to_scan = sc->nr_to_scan;
172 gfp_t gfp_mask = sc->gfp_mask;
171 173
172 mb_debug("trying to free %d entries", nr_to_scan); 174 mb_debug("trying to free %d entries", nr_to_scan);
173 spin_lock(&mb_cache_spinlock); 175 spin_lock(&mb_cache_spinlock);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 6e6777f1b4b2..f60aed8db9c4 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -168,6 +168,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry)
168 struct inode * inode = dentry->d_inode; 168 struct inode * inode = dentry->d_inode;
169 int err = -ENOTEMPTY; 169 int err = -ENOTEMPTY;
170 170
171 dentry_unhash(dentry);
172
171 if (minix_empty_dir(inode)) { 173 if (minix_empty_dir(inode)) {
172 err = minix_unlink(dir, dentry); 174 err = minix_unlink(dir, dentry);
173 if (!err) { 175 if (!err) {
@@ -190,6 +192,9 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
190 struct minix_dir_entry * old_de; 192 struct minix_dir_entry * old_de;
191 int err = -ENOENT; 193 int err = -ENOENT;
192 194
195 if (new_inode && S_ISDIR(new_inode->i_mode))
196 dentry_unhash(new_dentry);
197
193 old_de = minix_find_entry(old_dentry, &old_page); 198 old_de = minix_find_entry(old_dentry, &old_page);
194 if (!old_de) 199 if (!old_de)
195 goto out; 200 goto out;
diff --git a/fs/mpage.c b/fs/mpage.c
index 0afc809e46e0..fdfae9fa98cd 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -27,6 +27,7 @@
27#include <linux/writeback.h> 27#include <linux/writeback.h>
28#include <linux/backing-dev.h> 28#include <linux/backing-dev.h>
29#include <linux/pagevec.h> 29#include <linux/pagevec.h>
30#include <linux/cleancache.h>
30 31
31/* 32/*
32 * I/O completion handler for multipage BIOs. 33 * I/O completion handler for multipage BIOs.
@@ -271,6 +272,12 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
271 SetPageMappedToDisk(page); 272 SetPageMappedToDisk(page);
272 } 273 }
273 274
275 if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
276 cleancache_get_page(page) == 0) {
277 SetPageUptodate(page);
278 goto confused;
279 }
280
274 /* 281 /*
275 * This page will go to BIO. Do we need to send this BIO off first? 282 * This page will go to BIO. Do we need to send this BIO off first?
276 */ 283 */
diff --git a/fs/namei.c b/fs/namei.c
index 6ff858c049c0..2358b326b221 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -391,79 +391,28 @@ void path_put(struct path *path)
391} 391}
392EXPORT_SYMBOL(path_put); 392EXPORT_SYMBOL(path_put);
393 393
394/** 394/*
395 * nameidata_drop_rcu - drop this nameidata out of rcu-walk
396 * @nd: nameidata pathwalk data to drop
397 * Returns: 0 on success, -ECHILD on failure
398 *
399 * Path walking has 2 modes, rcu-walk and ref-walk (see 395 * Path walking has 2 modes, rcu-walk and ref-walk (see
400 * Documentation/filesystems/path-lookup.txt). __drop_rcu* functions attempt 396 * Documentation/filesystems/path-lookup.txt). In situations when we can't
401 * to drop out of rcu-walk mode and take normal reference counts on dentries 397 * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
402 * and vfsmounts to transition to rcu-walk mode. __drop_rcu* functions take 398 * normal reference counts on dentries and vfsmounts to transition to rcu-walk
403 * refcounts at the last known good point before rcu-walk got stuck, so 399 * mode. Refcounts are grabbed at the last known good point before rcu-walk
404 * ref-walk may continue from there. If this is not successful (eg. a seqcount 400 * got stuck, so ref-walk may continue from there. If this is not successful
405 * has changed), then failure is returned and path walk restarts from the 401 * (eg. a seqcount has changed), then failure is returned and it's up to caller
406 * beginning in ref-walk mode. 402 * to restart the path walk from the beginning in ref-walk mode.
407 *
408 * nameidata_drop_rcu attempts to drop the current nd->path and nd->root into
409 * ref-walk. Must be called from rcu-walk context.
410 */ 403 */
411static int nameidata_drop_rcu(struct nameidata *nd)
412{
413 struct fs_struct *fs = current->fs;
414 struct dentry *dentry = nd->path.dentry;
415 int want_root = 0;
416
417 BUG_ON(!(nd->flags & LOOKUP_RCU));
418 if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
419 want_root = 1;
420 spin_lock(&fs->lock);
421 if (nd->root.mnt != fs->root.mnt ||
422 nd->root.dentry != fs->root.dentry)
423 goto err_root;
424 }
425 spin_lock(&dentry->d_lock);
426 if (!__d_rcu_to_refcount(dentry, nd->seq))
427 goto err;
428 BUG_ON(nd->inode != dentry->d_inode);
429 spin_unlock(&dentry->d_lock);
430 if (want_root) {
431 path_get(&nd->root);
432 spin_unlock(&fs->lock);
433 }
434 mntget(nd->path.mnt);
435
436 rcu_read_unlock();
437 br_read_unlock(vfsmount_lock);
438 nd->flags &= ~LOOKUP_RCU;
439 return 0;
440err:
441 spin_unlock(&dentry->d_lock);
442err_root:
443 if (want_root)
444 spin_unlock(&fs->lock);
445 return -ECHILD;
446}
447
448/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */
449static inline int nameidata_drop_rcu_maybe(struct nameidata *nd)
450{
451 if (nd->flags & LOOKUP_RCU)
452 return nameidata_drop_rcu(nd);
453 return 0;
454}
455 404
456/** 405/**
457 * nameidata_dentry_drop_rcu - drop nameidata and dentry out of rcu-walk 406 * unlazy_walk - try to switch to ref-walk mode.
458 * @nd: nameidata pathwalk data to drop 407 * @nd: nameidata pathwalk data
459 * @dentry: dentry to drop 408 * @dentry: child of nd->path.dentry or NULL
460 * Returns: 0 on success, -ECHILD on failure 409 * Returns: 0 on success, -ECHILD on failure
461 * 410 *
462 * nameidata_dentry_drop_rcu attempts to drop the current nd->path and nd->root, 411 * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
463 * and dentry into ref-walk. @dentry must be a path found by a do_lookup call on 412 * for ref-walk mode. @dentry must be a path found by a do_lookup call on
464 * @nd. Must be called from rcu-walk context. 413 * @nd or NULL. Must be called from rcu-walk context.
465 */ 414 */
466static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry) 415static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
467{ 416{
468 struct fs_struct *fs = current->fs; 417 struct fs_struct *fs = current->fs;
469 struct dentry *parent = nd->path.dentry; 418 struct dentry *parent = nd->path.dentry;
@@ -478,18 +427,25 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry
478 goto err_root; 427 goto err_root;
479 } 428 }
480 spin_lock(&parent->d_lock); 429 spin_lock(&parent->d_lock);
481 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 430 if (!dentry) {
482 if (!__d_rcu_to_refcount(dentry, nd->seq)) 431 if (!__d_rcu_to_refcount(parent, nd->seq))
483 goto err; 432 goto err_parent;
484 /* 433 BUG_ON(nd->inode != parent->d_inode);
485 * If the sequence check on the child dentry passed, then the child has 434 } else {
486 * not been removed from its parent. This means the parent dentry must 435 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
487 * be valid and able to take a reference at this point. 436 if (!__d_rcu_to_refcount(dentry, nd->seq))
488 */ 437 goto err_child;
489 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); 438 /*
490 BUG_ON(!parent->d_count); 439 * If the sequence check on the child dentry passed, then
491 parent->d_count++; 440 * the child has not been removed from its parent. This
492 spin_unlock(&dentry->d_lock); 441 * means the parent dentry must be valid and able to take
442 * a reference at this point.
443 */
444 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
445 BUG_ON(!parent->d_count);
446 parent->d_count++;
447 spin_unlock(&dentry->d_lock);
448 }
493 spin_unlock(&parent->d_lock); 449 spin_unlock(&parent->d_lock);
494 if (want_root) { 450 if (want_root) {
495 path_get(&nd->root); 451 path_get(&nd->root);
@@ -501,8 +457,10 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry
501 br_read_unlock(vfsmount_lock); 457 br_read_unlock(vfsmount_lock);
502 nd->flags &= ~LOOKUP_RCU; 458 nd->flags &= ~LOOKUP_RCU;
503 return 0; 459 return 0;
504err: 460
461err_child:
505 spin_unlock(&dentry->d_lock); 462 spin_unlock(&dentry->d_lock);
463err_parent:
506 spin_unlock(&parent->d_lock); 464 spin_unlock(&parent->d_lock);
507err_root: 465err_root:
508 if (want_root) 466 if (want_root)
@@ -510,59 +468,6 @@ err_root:
510 return -ECHILD; 468 return -ECHILD;
511} 469}
512 470
513/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */
514static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct dentry *dentry)
515{
516 if (nd->flags & LOOKUP_RCU) {
517 if (unlikely(nameidata_dentry_drop_rcu(nd, dentry))) {
518 nd->flags &= ~LOOKUP_RCU;
519 if (!(nd->flags & LOOKUP_ROOT))
520 nd->root.mnt = NULL;
521 rcu_read_unlock();
522 br_read_unlock(vfsmount_lock);
523 return -ECHILD;
524 }
525 }
526 return 0;
527}
528
529/**
530 * nameidata_drop_rcu_last - drop nameidata ending path walk out of rcu-walk
531 * @nd: nameidata pathwalk data to drop
532 * Returns: 0 on success, -ECHILD on failure
533 *
534 * nameidata_drop_rcu_last attempts to drop the current nd->path into ref-walk.
535 * nd->path should be the final element of the lookup, so nd->root is discarded.
536 * Must be called from rcu-walk context.
537 */
538static int nameidata_drop_rcu_last(struct nameidata *nd)
539{
540 struct dentry *dentry = nd->path.dentry;
541
542 BUG_ON(!(nd->flags & LOOKUP_RCU));
543 nd->flags &= ~LOOKUP_RCU;
544 if (!(nd->flags & LOOKUP_ROOT))
545 nd->root.mnt = NULL;
546 spin_lock(&dentry->d_lock);
547 if (!__d_rcu_to_refcount(dentry, nd->seq))
548 goto err_unlock;
549 BUG_ON(nd->inode != dentry->d_inode);
550 spin_unlock(&dentry->d_lock);
551
552 mntget(nd->path.mnt);
553
554 rcu_read_unlock();
555 br_read_unlock(vfsmount_lock);
556
557 return 0;
558
559err_unlock:
560 spin_unlock(&dentry->d_lock);
561 rcu_read_unlock();
562 br_read_unlock(vfsmount_lock);
563 return -ECHILD;
564}
565
566/** 471/**
567 * release_open_intent - free up open intent resources 472 * release_open_intent - free up open intent resources
568 * @nd: pointer to nameidata 473 * @nd: pointer to nameidata
@@ -606,26 +511,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd)
606 return dentry; 511 return dentry;
607} 512}
608 513
609/* 514/**
610 * handle_reval_path - force revalidation of a dentry 515 * complete_walk - successful completion of path walk
611 * 516 * @nd: pointer nameidata
612 * In some situations the path walking code will trust dentries without
613 * revalidating them. This causes problems for filesystems that depend on
614 * d_revalidate to handle file opens (e.g. NFSv4). When FS_REVAL_DOT is set
615 * (which indicates that it's possible for the dentry to go stale), force
616 * a d_revalidate call before proceeding.
617 * 517 *
618 * Returns 0 if the revalidation was successful. If the revalidation fails, 518 * If we had been in RCU mode, drop out of it and legitimize nd->path.
619 * either return the error returned by d_revalidate or -ESTALE if the 519 * Revalidate the final result, unless we'd already done that during
620 * revalidation it just returned 0. If d_revalidate returns 0, we attempt to 520 * the path walk or the filesystem doesn't ask for it. Return 0 on
621 * invalidate the dentry. It's up to the caller to handle putting references 521 * success, -error on failure. In case of failure caller does not
622 * to the path if necessary. 522 * need to drop nd->path.
623 */ 523 */
624static inline int handle_reval_path(struct nameidata *nd) 524static int complete_walk(struct nameidata *nd)
625{ 525{
626 struct dentry *dentry = nd->path.dentry; 526 struct dentry *dentry = nd->path.dentry;
627 int status; 527 int status;
628 528
529 if (nd->flags & LOOKUP_RCU) {
530 nd->flags &= ~LOOKUP_RCU;
531 if (!(nd->flags & LOOKUP_ROOT))
532 nd->root.mnt = NULL;
533 spin_lock(&dentry->d_lock);
534 if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
535 spin_unlock(&dentry->d_lock);
536 rcu_read_unlock();
537 br_read_unlock(vfsmount_lock);
538 return -ECHILD;
539 }
540 BUG_ON(nd->inode != dentry->d_inode);
541 spin_unlock(&dentry->d_lock);
542 mntget(nd->path.mnt);
543 rcu_read_unlock();
544 br_read_unlock(vfsmount_lock);
545 }
546
629 if (likely(!(nd->flags & LOOKUP_JUMPED))) 547 if (likely(!(nd->flags & LOOKUP_JUMPED)))
630 return 0; 548 return 0;
631 549
@@ -643,6 +561,7 @@ static inline int handle_reval_path(struct nameidata *nd)
643 if (!status) 561 if (!status)
644 status = -ESTALE; 562 status = -ESTALE;
645 563
564 path_put(&nd->path);
646 return status; 565 return status;
647} 566}
648 567
@@ -1241,13 +1160,8 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
1241 if (likely(__follow_mount_rcu(nd, path, inode, false))) 1160 if (likely(__follow_mount_rcu(nd, path, inode, false)))
1242 return 0; 1161 return 0;
1243unlazy: 1162unlazy:
1244 if (dentry) { 1163 if (unlazy_walk(nd, dentry))
1245 if (nameidata_dentry_drop_rcu(nd, dentry)) 1164 return -ECHILD;
1246 return -ECHILD;
1247 } else {
1248 if (nameidata_drop_rcu(nd))
1249 return -ECHILD;
1250 }
1251 } else { 1165 } else {
1252 dentry = __d_lookup(parent, name); 1166 dentry = __d_lookup(parent, name);
1253 } 1167 }
@@ -1303,7 +1217,7 @@ static inline int may_lookup(struct nameidata *nd)
1303 int err = exec_permission(nd->inode, IPERM_FLAG_RCU); 1217 int err = exec_permission(nd->inode, IPERM_FLAG_RCU);
1304 if (err != -ECHILD) 1218 if (err != -ECHILD)
1305 return err; 1219 return err;
1306 if (nameidata_drop_rcu(nd)) 1220 if (unlazy_walk(nd, NULL))
1307 return -ECHILD; 1221 return -ECHILD;
1308 } 1222 }
1309 return exec_permission(nd->inode, 0); 1223 return exec_permission(nd->inode, 0);
@@ -1357,8 +1271,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
1357 return -ENOENT; 1271 return -ENOENT;
1358 } 1272 }
1359 if (unlikely(inode->i_op->follow_link) && follow) { 1273 if (unlikely(inode->i_op->follow_link) && follow) {
1360 if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) 1274 if (nd->flags & LOOKUP_RCU) {
1361 return -ECHILD; 1275 if (unlikely(unlazy_walk(nd, path->dentry))) {
1276 terminate_walk(nd);
1277 return -ECHILD;
1278 }
1279 }
1362 BUG_ON(inode != path->dentry->d_inode); 1280 BUG_ON(inode != path->dentry->d_inode);
1363 return 1; 1281 return 1;
1364 } 1282 }
@@ -1657,18 +1575,8 @@ static int path_lookupat(int dfd, const char *name,
1657 } 1575 }
1658 } 1576 }
1659 1577
1660 if (nd->flags & LOOKUP_RCU) { 1578 if (!err)
1661 /* went all way through without dropping RCU */ 1579 err = complete_walk(nd);
1662 BUG_ON(err);
1663 if (nameidata_drop_rcu_last(nd))
1664 err = -ECHILD;
1665 }
1666
1667 if (!err) {
1668 err = handle_reval_path(nd);
1669 if (err)
1670 path_put(&nd->path);
1671 }
1672 1580
1673 if (!err && nd->flags & LOOKUP_DIRECTORY) { 1581 if (!err && nd->flags & LOOKUP_DIRECTORY) {
1674 if (!nd->inode->i_op->lookup) { 1582 if (!nd->inode->i_op->lookup) {
@@ -2134,13 +2042,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2134 return ERR_PTR(error); 2042 return ERR_PTR(error);
2135 /* fallthrough */ 2043 /* fallthrough */
2136 case LAST_ROOT: 2044 case LAST_ROOT:
2137 if (nd->flags & LOOKUP_RCU) { 2045 error = complete_walk(nd);
2138 if (nameidata_drop_rcu_last(nd))
2139 return ERR_PTR(-ECHILD);
2140 }
2141 error = handle_reval_path(nd);
2142 if (error) 2046 if (error)
2143 goto exit; 2047 return ERR_PTR(error);
2144 audit_inode(pathname, nd->path.dentry); 2048 audit_inode(pathname, nd->path.dentry);
2145 if (open_flag & O_CREAT) { 2049 if (open_flag & O_CREAT) {
2146 error = -EISDIR; 2050 error = -EISDIR;
@@ -2148,10 +2052,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2148 } 2052 }
2149 goto ok; 2053 goto ok;
2150 case LAST_BIND: 2054 case LAST_BIND:
2151 /* can't be RCU mode here */ 2055 error = complete_walk(nd);
2152 error = handle_reval_path(nd);
2153 if (error) 2056 if (error)
2154 goto exit; 2057 return ERR_PTR(error);
2155 audit_inode(pathname, dir); 2058 audit_inode(pathname, dir);
2156 goto ok; 2059 goto ok;
2157 } 2060 }
@@ -2170,10 +2073,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2170 if (error) /* symlink */ 2073 if (error) /* symlink */
2171 return NULL; 2074 return NULL;
2172 /* sayonara */ 2075 /* sayonara */
2173 if (nd->flags & LOOKUP_RCU) { 2076 error = complete_walk(nd);
2174 if (nameidata_drop_rcu_last(nd)) 2077 if (error)
2175 return ERR_PTR(-ECHILD); 2078 return ERR_PTR(-ECHILD);
2176 }
2177 2079
2178 error = -ENOTDIR; 2080 error = -ENOTDIR;
2179 if (nd->flags & LOOKUP_DIRECTORY) { 2081 if (nd->flags & LOOKUP_DIRECTORY) {
@@ -2185,11 +2087,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2185 } 2087 }
2186 2088
2187 /* create side of things */ 2089 /* create side of things */
2188 2090 error = complete_walk(nd);
2189 if (nd->flags & LOOKUP_RCU) { 2091 if (error)
2190 if (nameidata_drop_rcu_last(nd)) 2092 return ERR_PTR(error);
2191 return ERR_PTR(-ECHILD);
2192 }
2193 2093
2194 audit_inode(pathname, dir); 2094 audit_inode(pathname, dir);
2195 error = -EISDIR; 2095 error = -EISDIR;
@@ -2629,10 +2529,10 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode)
2629} 2529}
2630 2530
2631/* 2531/*
2632 * We try to drop the dentry early: we should have 2532 * The dentry_unhash() helper will try to drop the dentry early: we
2633 * a usage count of 2 if we're the only user of this 2533 * should have a usage count of 2 if we're the only user of this
2634 * dentry, and if that is true (possibly after pruning 2534 * dentry, and if that is true (possibly after pruning the dcache),
2635 * the dcache), then we drop the dentry now. 2535 * then we drop the dentry now.
2636 * 2536 *
2637 * A low-level filesystem can, if it choses, legally 2537 * A low-level filesystem can, if it choses, legally
2638 * do a 2538 * do a
@@ -2645,10 +2545,9 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode)
2645 */ 2545 */
2646void dentry_unhash(struct dentry *dentry) 2546void dentry_unhash(struct dentry *dentry)
2647{ 2547{
2648 dget(dentry);
2649 shrink_dcache_parent(dentry); 2548 shrink_dcache_parent(dentry);
2650 spin_lock(&dentry->d_lock); 2549 spin_lock(&dentry->d_lock);
2651 if (dentry->d_count == 2) 2550 if (dentry->d_count == 1)
2652 __d_drop(dentry); 2551 __d_drop(dentry);
2653 spin_unlock(&dentry->d_lock); 2552 spin_unlock(&dentry->d_lock);
2654} 2553}
@@ -2664,25 +2563,26 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
2664 return -EPERM; 2563 return -EPERM;
2665 2564
2666 mutex_lock(&dentry->d_inode->i_mutex); 2565 mutex_lock(&dentry->d_inode->i_mutex);
2667 dentry_unhash(dentry); 2566
2567 error = -EBUSY;
2668 if (d_mountpoint(dentry)) 2568 if (d_mountpoint(dentry))
2669 error = -EBUSY; 2569 goto out;
2670 else { 2570
2671 error = security_inode_rmdir(dir, dentry); 2571 error = security_inode_rmdir(dir, dentry);
2672 if (!error) { 2572 if (error)
2673 error = dir->i_op->rmdir(dir, dentry); 2573 goto out;
2674 if (!error) { 2574
2675 dentry->d_inode->i_flags |= S_DEAD; 2575 error = dir->i_op->rmdir(dir, dentry);
2676 dont_mount(dentry); 2576 if (error)
2677 } 2577 goto out;
2678 } 2578
2679 } 2579 dentry->d_inode->i_flags |= S_DEAD;
2580 dont_mount(dentry);
2581
2582out:
2680 mutex_unlock(&dentry->d_inode->i_mutex); 2583 mutex_unlock(&dentry->d_inode->i_mutex);
2681 if (!error) { 2584 if (!error)
2682 d_delete(dentry); 2585 d_delete(dentry);
2683 }
2684 dput(dentry);
2685
2686 return error; 2586 return error;
2687} 2587}
2688 2588
@@ -3053,12 +2953,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
3053 * HOWEVER, it relies on the assumption that any object with ->lookup() 2953 * HOWEVER, it relies on the assumption that any object with ->lookup()
3054 * has no more than 1 dentry. If "hybrid" objects will ever appear, 2954 * has no more than 1 dentry. If "hybrid" objects will ever appear,
3055 * we'd better make sure that there's no link(2) for them. 2955 * we'd better make sure that there's no link(2) for them.
3056 * d) some filesystems don't support opened-but-unlinked directories, 2956 * d) conversion from fhandle to dentry may come in the wrong moment - when
3057 * either because of layout or because they are not ready to deal with
3058 * all cases correctly. The latter will be fixed (taking this sort of
3059 * stuff into VFS), but the former is not going away. Solution: the same
3060 * trick as in rmdir().
3061 * e) conversion from fhandle to dentry may come in the wrong moment - when
3062 * we are removing the target. Solution: we will have to grab ->i_mutex 2957 * we are removing the target. Solution: we will have to grab ->i_mutex
3063 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on 2958 * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
3064 * ->i_mutex on parents, which works but leads to some truly excessive 2959 * ->i_mutex on parents, which works but leads to some truly excessive
@@ -3068,7 +2963,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
3068 struct inode *new_dir, struct dentry *new_dentry) 2963 struct inode *new_dir, struct dentry *new_dentry)
3069{ 2964{
3070 int error = 0; 2965 int error = 0;
3071 struct inode *target; 2966 struct inode *target = new_dentry->d_inode;
3072 2967
3073 /* 2968 /*
3074 * If we are going to change the parent - check write permissions, 2969 * If we are going to change the parent - check write permissions,
@@ -3084,26 +2979,24 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
3084 if (error) 2979 if (error)
3085 return error; 2980 return error;
3086 2981
3087 target = new_dentry->d_inode;
3088 if (target) 2982 if (target)
3089 mutex_lock(&target->i_mutex); 2983 mutex_lock(&target->i_mutex);
3090 if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) 2984
3091 error = -EBUSY; 2985 error = -EBUSY;
3092 else { 2986 if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
3093 if (target) 2987 goto out;
3094 dentry_unhash(new_dentry); 2988
3095 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); 2989 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
3096 } 2990 if (error)
2991 goto out;
2992
3097 if (target) { 2993 if (target) {
3098 if (!error) { 2994 target->i_flags |= S_DEAD;
3099 target->i_flags |= S_DEAD; 2995 dont_mount(new_dentry);
3100 dont_mount(new_dentry);
3101 }
3102 mutex_unlock(&target->i_mutex);
3103 if (d_unhashed(new_dentry))
3104 d_rehash(new_dentry);
3105 dput(new_dentry);
3106 } 2996 }
2997out:
2998 if (target)
2999 mutex_unlock(&target->i_mutex);
3107 if (!error) 3000 if (!error)
3108 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 3001 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
3109 d_move(old_dentry,new_dentry); 3002 d_move(old_dentry,new_dentry);
@@ -3113,7 +3006,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
3113static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, 3006static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
3114 struct inode *new_dir, struct dentry *new_dentry) 3007 struct inode *new_dir, struct dentry *new_dentry)
3115{ 3008{
3116 struct inode *target; 3009 struct inode *target = new_dentry->d_inode;
3117 int error; 3010 int error;
3118 3011
3119 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); 3012 error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
@@ -3121,19 +3014,22 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
3121 return error; 3014 return error;
3122 3015
3123 dget(new_dentry); 3016 dget(new_dentry);
3124 target = new_dentry->d_inode;
3125 if (target) 3017 if (target)
3126 mutex_lock(&target->i_mutex); 3018 mutex_lock(&target->i_mutex);
3019
3020 error = -EBUSY;
3127 if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) 3021 if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
3128 error = -EBUSY; 3022 goto out;
3129 else 3023
3130 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); 3024 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
3131 if (!error) { 3025 if (error)
3132 if (target) 3026 goto out;
3133 dont_mount(new_dentry); 3027
3134 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 3028 if (target)
3135 d_move(old_dentry, new_dentry); 3029 dont_mount(new_dentry);
3136 } 3030 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
3031 d_move(old_dentry, new_dentry);
3032out:
3137 if (target) 3033 if (target)
3138 mutex_unlock(&target->i_mutex); 3034 mutex_unlock(&target->i_mutex);
3139 dput(new_dentry); 3035 dput(new_dentry);
diff --git a/fs/namespace.c b/fs/namespace.c
index d99bcf59e4c2..fe59bd145d21 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1695,7 +1695,7 @@ static int graft_tree(struct vfsmount *mnt, struct path *path)
1695 1695
1696static int flags_to_propagation_type(int flags) 1696static int flags_to_propagation_type(int flags)
1697{ 1697{
1698 int type = flags & ~MS_REC; 1698 int type = flags & ~(MS_REC | MS_SILENT);
1699 1699
1700 /* Fail if any non-propagation flags are set */ 1700 /* Fail if any non-propagation flags are set */
1701 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) 1701 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index f6946bb5cb55..e3e646b06404 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -1033,6 +1033,8 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
1033 DPRINTK("ncp_rmdir: removing %s/%s\n", 1033 DPRINTK("ncp_rmdir: removing %s/%s\n",
1034 dentry->d_parent->d_name.name, dentry->d_name.name); 1034 dentry->d_parent->d_name.name, dentry->d_name.name);
1035 1035
1036 dentry_unhash(dentry);
1037
1036 error = -EBUSY; 1038 error = -EBUSY;
1037 if (!d_unhashed(dentry)) 1039 if (!d_unhashed(dentry))
1038 goto out; 1040 goto out;
@@ -1139,6 +1141,9 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
1139 old_dentry->d_parent->d_name.name, old_dentry->d_name.name, 1141 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
1140 new_dentry->d_parent->d_name.name, new_dentry->d_name.name); 1142 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
1141 1143
1144 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
1145 dentry_unhash(new_dentry);
1146
1142 ncp_age_dentry(server, old_dentry); 1147 ncp_age_dentry(server, old_dentry);
1143 ncp_age_dentry(server, new_dentry); 1148 ncp_age_dentry(server, new_dentry);
1144 1149
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 0250e4ce4893..202f370526a7 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -461,7 +461,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
461#endif 461#endif
462 struct ncp_entry_info finfo; 462 struct ncp_entry_info finfo;
463 463
464 data.wdog_pid = NULL; 464 memset(&data, 0, sizeof(data));
465 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); 465 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
466 if (!server) 466 if (!server)
467 return -ENOMEM; 467 return -ENOMEM;
@@ -496,7 +496,6 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
496 struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data; 496 struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data;
497 497
498 data.flags = md->flags; 498 data.flags = md->flags;
499 data.int_flags = 0;
500 data.mounted_uid = md->mounted_uid; 499 data.mounted_uid = md->mounted_uid;
501 data.wdog_pid = find_get_pid(md->wdog_pid); 500 data.wdog_pid = find_get_pid(md->wdog_pid);
502 data.ncp_fd = md->ncp_fd; 501 data.ncp_fd = md->ncp_fd;
@@ -507,7 +506,6 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
507 data.file_mode = md->file_mode; 506 data.file_mode = md->file_mode;
508 data.dir_mode = md->dir_mode; 507 data.dir_mode = md->dir_mode;
509 data.info_fd = -1; 508 data.info_fd = -1;
510 data.mounted_vol[0] = 0;
511 } 509 }
512 break; 510 break;
513 default: 511 default:
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index a7c07b44b100..e5d71b27a5b0 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -16,6 +16,7 @@
16#include <linux/mman.h> 16#include <linux/mman.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/fcntl.h> 18#include <linux/fcntl.h>
19#include <linux/memcontrol.h>
19 20
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/system.h> 22#include <asm/system.h>
@@ -92,6 +93,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area,
92 * -- wli 93 * -- wli
93 */ 94 */
94 count_vm_event(PGMAJFAULT); 95 count_vm_event(PGMAJFAULT);
96 mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT);
95 return VM_FAULT_MAJOR; 97 return VM_FAULT_MAJOR;
96} 98}
97 99
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7237672216c8..424e47773a84 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2042,11 +2042,14 @@ static void nfs_access_free_list(struct list_head *head)
2042 } 2042 }
2043} 2043}
2044 2044
2045int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 2045int nfs_access_cache_shrinker(struct shrinker *shrink,
2046 struct shrink_control *sc)
2046{ 2047{
2047 LIST_HEAD(head); 2048 LIST_HEAD(head);
2048 struct nfs_inode *nfsi, *next; 2049 struct nfs_inode *nfsi, *next;
2049 struct nfs_access_entry *cache; 2050 struct nfs_access_entry *cache;
2051 int nr_to_scan = sc->nr_to_scan;
2052 gfp_t gfp_mask = sc->gfp_mask;
2050 2053
2051 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 2054 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2052 return (nr_to_scan == 0) ? 0 : -1; 2055 return (nr_to_scan == 0) ? 0 : -1;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index ce118ce885dd..2df6ca7b5898 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -234,7 +234,7 @@ extern int nfs_init_client(struct nfs_client *clp,
234 234
235/* dir.c */ 235/* dir.c */
236extern int nfs_access_cache_shrinker(struct shrinker *shrink, 236extern int nfs_access_cache_shrinker(struct shrinker *shrink,
237 int nr_to_scan, gfp_t gfp_mask); 237 struct shrink_control *sc);
238 238
239/* inode.c */ 239/* inode.c */
240extern struct workqueue_struct *nfsiod_workqueue; 240extern struct workqueue_struct *nfsiod_workqueue;
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 546849b3e88f..1102a5fbb744 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -334,6 +334,8 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
334 struct nilfs_transaction_info ti; 334 struct nilfs_transaction_info ti;
335 int err; 335 int err;
336 336
337 dentry_unhash(dentry);
338
337 err = nilfs_transaction_begin(dir->i_sb, &ti, 0); 339 err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
338 if (err) 340 if (err)
339 return err; 341 return err;
@@ -369,6 +371,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
369 struct nilfs_transaction_info ti; 371 struct nilfs_transaction_info ti;
370 int err; 372 int err;
371 373
374 if (new_inode && S_ISDIR(new_inode->i_mode))
375 dentry_unhash(new_dentry);
376
372 err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1); 377 err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1);
373 if (unlikely(err)) 378 if (unlikely(err))
374 return err; 379 return err;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 823bc35334e0..cdbaf5e97308 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -41,6 +41,7 @@
41#include <linux/mount.h> 41#include <linux/mount.h>
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43#include <linux/quotaops.h> 43#include <linux/quotaops.h>
44#include <linux/cleancache.h>
44 45
45#define CREATE_TRACE_POINTS 46#define CREATE_TRACE_POINTS
46#include "ocfs2_trace.h" 47#include "ocfs2_trace.h"
@@ -2352,6 +2353,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
2352 mlog_errno(status); 2353 mlog_errno(status);
2353 goto bail; 2354 goto bail;
2354 } 2355 }
2356 cleancache_init_shared_fs((char *)&uuid_net_key, sb);
2355 2357
2356bail: 2358bail:
2357 return status; 2359 return status;
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index de4ff29f1e05..c368360c35a1 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -240,8 +240,12 @@ static int omfs_remove(struct inode *dir, struct dentry *dentry)
240 struct inode *inode = dentry->d_inode; 240 struct inode *inode = dentry->d_inode;
241 int ret; 241 int ret;
242 242
243 if (S_ISDIR(inode->i_mode) && !omfs_dir_is_empty(inode)) 243
244 return -ENOTEMPTY; 244 if (S_ISDIR(inode->i_mode)) {
245 dentry_unhash(dentry);
246 if (!omfs_dir_is_empty(inode))
247 return -ENOTEMPTY;
248 }
245 249
246 ret = omfs_delete_entry(dentry); 250 ret = omfs_delete_entry(dentry);
247 if (ret) 251 if (ret)
@@ -378,6 +382,9 @@ static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
378 int err; 382 int err;
379 383
380 if (new_inode) { 384 if (new_inode) {
385 if (S_ISDIR(new_inode->i_mode))
386 dentry_unhash(new_dentry);
387
381 /* overwriting existing file/dir */ 388 /* overwriting existing file/dir */
382 err = omfs_remove(new_dir, new_dentry); 389 err = omfs_remove(new_dir, new_dentry);
383 if (err) 390 if (err)
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index d545e97d99c3..8ed4d3433199 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -255,7 +255,11 @@ ssize_t part_discard_alignment_show(struct device *dev,
255 struct device_attribute *attr, char *buf) 255 struct device_attribute *attr, char *buf)
256{ 256{
257 struct hd_struct *p = dev_to_part(dev); 257 struct hd_struct *p = dev_to_part(dev);
258 return sprintf(buf, "%u\n", p->discard_alignment); 258 struct gendisk *disk = dev_to_disk(dev);
259
260 return sprintf(buf, "%u\n",
261 queue_limit_discard_alignment(&disk->queue->limits,
262 p->start_sect));
259} 263}
260 264
261ssize_t part_stat_show(struct device *dev, 265ssize_t part_stat_show(struct device *dev,
@@ -449,8 +453,6 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
449 p->start_sect = start; 453 p->start_sect = start;
450 p->alignment_offset = 454 p->alignment_offset =
451 queue_limit_alignment_offset(&disk->queue->limits, start); 455 queue_limit_alignment_offset(&disk->queue->limits, start);
452 p->discard_alignment =
453 queue_limit_discard_alignment(&disk->queue->limits, start);
454 p->nr_sects = len; 456 p->nr_sects = len;
455 p->partno = partno; 457 p->partno = partno;
456 p->policy = get_disk_ro(disk); 458 p->policy = get_disk_ro(disk);
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 19d6750d1d6c..6296b403c67a 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -310,6 +310,15 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
310 goto fail; 310 goto fail;
311 } 311 }
312 312
313 /* Check the GUID Partition Table header size */
314 if (le32_to_cpu((*gpt)->header_size) >
315 bdev_logical_block_size(state->bdev)) {
316 pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
317 le32_to_cpu((*gpt)->header_size),
318 bdev_logical_block_size(state->bdev));
319 goto fail;
320 }
321
313 /* Check the GUID Partition Table CRC */ 322 /* Check the GUID Partition Table CRC */
314 origcrc = le32_to_cpu((*gpt)->header_crc32); 323 origcrc = le32_to_cpu((*gpt)->header_crc32);
315 (*gpt)->header_crc32 = 0; 324 (*gpt)->header_crc32 = 0;
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index df434c5f28fb..c1c729335924 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -20,6 +20,7 @@ proc-y += stat.o
20proc-y += uptime.o 20proc-y += uptime.o
21proc-y += version.o 21proc-y += version.o
22proc-y += softirqs.o 22proc-y += softirqs.o
23proc-y += namespaces.o
23proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o 24proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o
24proc-$(CONFIG_NET) += proc_net.o 25proc-$(CONFIG_NET) += proc_net.o
25proc-$(CONFIG_PROC_KCORE) += kcore.o 26proc-$(CONFIG_PROC_KCORE) += kcore.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 5e4f776b0917..9b45ee84fbcc 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -131,7 +131,7 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
131 * you can test for combinations of others with 131 * you can test for combinations of others with
132 * simple bit tests. 132 * simple bit tests.
133 */ 133 */
134static const char *task_state_array[] = { 134static const char * const task_state_array[] = {
135 "R (running)", /* 0 */ 135 "R (running)", /* 0 */
136 "S (sleeping)", /* 1 */ 136 "S (sleeping)", /* 1 */
137 "D (disk sleep)", /* 2 */ 137 "D (disk sleep)", /* 2 */
@@ -147,7 +147,7 @@ static const char *task_state_array[] = {
147static inline const char *get_task_state(struct task_struct *tsk) 147static inline const char *get_task_state(struct task_struct *tsk)
148{ 148{
149 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; 149 unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
150 const char **p = &task_state_array[0]; 150 const char * const *p = &task_state_array[0];
151 151
152 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array)); 152 BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
153 153
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dfa532730e55..4ede550517a6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -600,7 +600,7 @@ static int proc_fd_access_allowed(struct inode *inode)
600 return allowed; 600 return allowed;
601} 601}
602 602
603static int proc_setattr(struct dentry *dentry, struct iattr *attr) 603int proc_setattr(struct dentry *dentry, struct iattr *attr)
604{ 604{
605 int error; 605 int error;
606 struct inode *inode = dentry->d_inode; 606 struct inode *inode = dentry->d_inode;
@@ -894,20 +894,20 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
894 if (!task) 894 if (!task)
895 goto out_no_task; 895 goto out_no_task;
896 896
897 copied = -ENOMEM;
898 page = (char *)__get_free_page(GFP_TEMPORARY);
899 if (!page)
900 goto out_task;
901
897 mm = check_mem_permission(task); 902 mm = check_mem_permission(task);
898 copied = PTR_ERR(mm); 903 copied = PTR_ERR(mm);
899 if (IS_ERR(mm)) 904 if (IS_ERR(mm))
900 goto out_task; 905 goto out_free;
901 906
902 copied = -EIO; 907 copied = -EIO;
903 if (file->private_data != (void *)((long)current->self_exec_id)) 908 if (file->private_data != (void *)((long)current->self_exec_id))
904 goto out_mm; 909 goto out_mm;
905 910
906 copied = -ENOMEM;
907 page = (char *)__get_free_page(GFP_TEMPORARY);
908 if (!page)
909 goto out_mm;
910
911 copied = 0; 911 copied = 0;
912 while (count > 0) { 912 while (count > 0) {
913 int this_len, retval; 913 int this_len, retval;
@@ -929,9 +929,11 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
929 count -= retval; 929 count -= retval;
930 } 930 }
931 *ppos = dst; 931 *ppos = dst;
932 free_page((unsigned long) page); 932
933out_mm: 933out_mm:
934 mmput(mm); 934 mmput(mm);
935out_free:
936 free_page((unsigned long) page);
935out_task: 937out_task:
936 put_task_struct(task); 938 put_task_struct(task);
937out_no_task: 939out_no_task:
@@ -1059,7 +1061,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
1059{ 1061{
1060 struct task_struct *task; 1062 struct task_struct *task;
1061 char buffer[PROC_NUMBUF]; 1063 char buffer[PROC_NUMBUF];
1062 long oom_adjust; 1064 int oom_adjust;
1063 unsigned long flags; 1065 unsigned long flags;
1064 int err; 1066 int err;
1065 1067
@@ -1071,7 +1073,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
1071 goto out; 1073 goto out;
1072 } 1074 }
1073 1075
1074 err = strict_strtol(strstrip(buffer), 0, &oom_adjust); 1076 err = kstrtoint(strstrip(buffer), 0, &oom_adjust);
1075 if (err) 1077 if (err)
1076 goto out; 1078 goto out;
1077 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && 1079 if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
@@ -1168,7 +1170,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
1168 struct task_struct *task; 1170 struct task_struct *task;
1169 char buffer[PROC_NUMBUF]; 1171 char buffer[PROC_NUMBUF];
1170 unsigned long flags; 1172 unsigned long flags;
1171 long oom_score_adj; 1173 int oom_score_adj;
1172 int err; 1174 int err;
1173 1175
1174 memset(buffer, 0, sizeof(buffer)); 1176 memset(buffer, 0, sizeof(buffer));
@@ -1179,7 +1181,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
1179 goto out; 1181 goto out;
1180 } 1182 }
1181 1183
1182 err = strict_strtol(strstrip(buffer), 0, &oom_score_adj); 1184 err = kstrtoint(strstrip(buffer), 0, &oom_score_adj);
1183 if (err) 1185 if (err)
1184 goto out; 1186 goto out;
1185 if (oom_score_adj < OOM_SCORE_ADJ_MIN || 1187 if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
@@ -1468,7 +1470,7 @@ sched_autogroup_write(struct file *file, const char __user *buf,
1468 struct inode *inode = file->f_path.dentry->d_inode; 1470 struct inode *inode = file->f_path.dentry->d_inode;
1469 struct task_struct *p; 1471 struct task_struct *p;
1470 char buffer[PROC_NUMBUF]; 1472 char buffer[PROC_NUMBUF];
1471 long nice; 1473 int nice;
1472 int err; 1474 int err;
1473 1475
1474 memset(buffer, 0, sizeof(buffer)); 1476 memset(buffer, 0, sizeof(buffer));
@@ -1477,9 +1479,9 @@ sched_autogroup_write(struct file *file, const char __user *buf,
1477 if (copy_from_user(buffer, buf, count)) 1479 if (copy_from_user(buffer, buf, count))
1478 return -EFAULT; 1480 return -EFAULT;
1479 1481
1480 err = strict_strtol(strstrip(buffer), 0, &nice); 1482 err = kstrtoint(strstrip(buffer), 0, &nice);
1481 if (err) 1483 if (err < 0)
1482 return -EINVAL; 1484 return err;
1483 1485
1484 p = get_proc_task(inode); 1486 p = get_proc_task(inode);
1485 if (!p) 1487 if (!p)
@@ -1576,57 +1578,6 @@ static const struct file_operations proc_pid_set_comm_operations = {
1576 .release = single_release, 1578 .release = single_release,
1577}; 1579};
1578 1580
1579/*
1580 * We added or removed a vma mapping the executable. The vmas are only mapped
1581 * during exec and are not mapped with the mmap system call.
1582 * Callers must hold down_write() on the mm's mmap_sem for these
1583 */
1584void added_exe_file_vma(struct mm_struct *mm)
1585{
1586 mm->num_exe_file_vmas++;
1587}
1588
1589void removed_exe_file_vma(struct mm_struct *mm)
1590{
1591 mm->num_exe_file_vmas--;
1592 if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
1593 fput(mm->exe_file);
1594 mm->exe_file = NULL;
1595 }
1596
1597}
1598
1599void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1600{
1601 if (new_exe_file)
1602 get_file(new_exe_file);
1603 if (mm->exe_file)
1604 fput(mm->exe_file);
1605 mm->exe_file = new_exe_file;
1606 mm->num_exe_file_vmas = 0;
1607}
1608
1609struct file *get_mm_exe_file(struct mm_struct *mm)
1610{
1611 struct file *exe_file;
1612
1613 /* We need mmap_sem to protect against races with removal of
1614 * VM_EXECUTABLE vmas */
1615 down_read(&mm->mmap_sem);
1616 exe_file = mm->exe_file;
1617 if (exe_file)
1618 get_file(exe_file);
1619 up_read(&mm->mmap_sem);
1620 return exe_file;
1621}
1622
1623void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
1624{
1625 /* It's safe to write the exe_file pointer without exe_file_lock because
1626 * this is called during fork when the task is not yet in /proc */
1627 newmm->exe_file = get_mm_exe_file(oldmm);
1628}
1629
1630static int proc_exe_link(struct inode *inode, struct path *exe_path) 1581static int proc_exe_link(struct inode *inode, struct path *exe_path)
1631{ 1582{
1632 struct task_struct *task; 1583 struct task_struct *task;
@@ -1736,8 +1687,7 @@ static int task_dumpable(struct task_struct *task)
1736 return 0; 1687 return 0;
1737} 1688}
1738 1689
1739 1690struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
1740static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
1741{ 1691{
1742 struct inode * inode; 1692 struct inode * inode;
1743 struct proc_inode *ei; 1693 struct proc_inode *ei;
@@ -1779,7 +1729,7 @@ out_unlock:
1779 return NULL; 1729 return NULL;
1780} 1730}
1781 1731
1782static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1732int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1783{ 1733{
1784 struct inode *inode = dentry->d_inode; 1734 struct inode *inode = dentry->d_inode;
1785 struct task_struct *task; 1735 struct task_struct *task;
@@ -1820,7 +1770,7 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
1820 * made this apply to all per process world readable and executable 1770 * made this apply to all per process world readable and executable
1821 * directories. 1771 * directories.
1822 */ 1772 */
1823static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) 1773int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
1824{ 1774{
1825 struct inode *inode; 1775 struct inode *inode;
1826 struct task_struct *task; 1776 struct task_struct *task;
@@ -1862,7 +1812,7 @@ static int pid_delete_dentry(const struct dentry * dentry)
1862 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; 1812 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1863} 1813}
1864 1814
1865static const struct dentry_operations pid_dentry_operations = 1815const struct dentry_operations pid_dentry_operations =
1866{ 1816{
1867 .d_revalidate = pid_revalidate, 1817 .d_revalidate = pid_revalidate,
1868 .d_delete = pid_delete_dentry, 1818 .d_delete = pid_delete_dentry,
@@ -1870,9 +1820,6 @@ static const struct dentry_operations pid_dentry_operations =
1870 1820
1871/* Lookups */ 1821/* Lookups */
1872 1822
1873typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
1874 struct task_struct *, const void *);
1875
1876/* 1823/*
1877 * Fill a directory entry. 1824 * Fill a directory entry.
1878 * 1825 *
@@ -1885,8 +1832,8 @@ typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
1885 * reported by readdir in sync with the inode numbers reported 1832 * reported by readdir in sync with the inode numbers reported
1886 * by stat. 1833 * by stat.
1887 */ 1834 */
1888static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, 1835int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
1889 char *name, int len, 1836 const char *name, int len,
1890 instantiate_t instantiate, struct task_struct *task, const void *ptr) 1837 instantiate_t instantiate, struct task_struct *task, const void *ptr)
1891{ 1838{
1892 struct dentry *child, *dir = filp->f_path.dentry; 1839 struct dentry *child, *dir = filp->f_path.dentry;
@@ -2820,6 +2767,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2820 DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations), 2767 DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
2821 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), 2768 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
2822 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), 2769 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2770 DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
2823#ifdef CONFIG_NET 2771#ifdef CONFIG_NET
2824 DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), 2772 DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
2825#endif 2773#endif
@@ -3168,6 +3116,7 @@ out_no_task:
3168static const struct pid_entry tid_base_stuff[] = { 3116static const struct pid_entry tid_base_stuff[] = {
3169 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), 3117 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
3170 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), 3118 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
3119 DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
3171 REG("environ", S_IRUSR, proc_environ_operations), 3120 REG("environ", S_IRUSR, proc_environ_operations),
3172 INF("auxv", S_IRUSR, proc_pid_auxv), 3121 INF("auxv", S_IRUSR, proc_pid_auxv),
3173 ONE("status", S_IRUGO, proc_pid_status), 3122 ONE("status", S_IRUGO, proc_pid_status),
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index f1281339b6fa..f1637f17c37c 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -674,6 +674,7 @@ struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
674 } 674 }
675 return ent; 675 return ent;
676} 676}
677EXPORT_SYMBOL(proc_mkdir_mode);
677 678
678struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 679struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
679 struct proc_dir_entry *parent) 680 struct proc_dir_entry *parent)
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d15aa1b1cc8f..74b48cfa1bb2 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -28,6 +28,7 @@ static void proc_evict_inode(struct inode *inode)
28{ 28{
29 struct proc_dir_entry *de; 29 struct proc_dir_entry *de;
30 struct ctl_table_header *head; 30 struct ctl_table_header *head;
31 const struct proc_ns_operations *ns_ops;
31 32
32 truncate_inode_pages(&inode->i_data, 0); 33 truncate_inode_pages(&inode->i_data, 0);
33 end_writeback(inode); 34 end_writeback(inode);
@@ -44,6 +45,10 @@ static void proc_evict_inode(struct inode *inode)
44 rcu_assign_pointer(PROC_I(inode)->sysctl, NULL); 45 rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
45 sysctl_head_put(head); 46 sysctl_head_put(head);
46 } 47 }
48 /* Release any associated namespace */
49 ns_ops = PROC_I(inode)->ns_ops;
50 if (ns_ops && ns_ops->put)
51 ns_ops->put(PROC_I(inode)->ns);
47} 52}
48 53
49static struct kmem_cache * proc_inode_cachep; 54static struct kmem_cache * proc_inode_cachep;
@@ -62,6 +67,8 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
62 ei->pde = NULL; 67 ei->pde = NULL;
63 ei->sysctl = NULL; 68 ei->sysctl = NULL;
64 ei->sysctl_entry = NULL; 69 ei->sysctl_entry = NULL;
70 ei->ns = NULL;
71 ei->ns_ops = NULL;
65 inode = &ei->vfs_inode; 72 inode = &ei->vfs_inode;
66 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 73 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
67 return inode; 74 return inode;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index c03e8d3a3a5b..7838e5cfec14 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -61,6 +61,14 @@ extern const struct file_operations proc_pagemap_operations;
61extern const struct file_operations proc_net_operations; 61extern const struct file_operations proc_net_operations;
62extern const struct inode_operations proc_net_inode_operations; 62extern const struct inode_operations proc_net_inode_operations;
63 63
64struct proc_maps_private {
65 struct pid *pid;
66 struct task_struct *task;
67#ifdef CONFIG_MMU
68 struct vm_area_struct *tail_vma;
69#endif
70};
71
64void proc_init_inodecache(void); 72void proc_init_inodecache(void);
65 73
66static inline struct pid *proc_pid(struct inode *inode) 74static inline struct pid *proc_pid(struct inode *inode)
@@ -119,3 +127,21 @@ struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
119 */ 127 */
120int proc_readdir(struct file *, void *, filldir_t); 128int proc_readdir(struct file *, void *, filldir_t);
121struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); 129struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
130
131
132
133/* Lookups */
134typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
135 struct task_struct *, const void *);
136int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
137 const char *name, int len,
138 instantiate_t instantiate, struct task_struct *task, const void *ptr);
139int pid_revalidate(struct dentry *dentry, struct nameidata *nd);
140struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task);
141extern const struct dentry_operations pid_dentry_operations;
142int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
143int proc_setattr(struct dentry *dentry, struct iattr *attr);
144
145extern const struct inode_operations proc_ns_dir_inode_operations;
146extern const struct file_operations proc_ns_dir_operations;
147
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
new file mode 100644
index 000000000000..781dec5bd682
--- /dev/null
+++ b/fs/proc/namespaces.c
@@ -0,0 +1,198 @@
1#include <linux/proc_fs.h>
2#include <linux/nsproxy.h>
3#include <linux/sched.h>
4#include <linux/ptrace.h>
5#include <linux/fs_struct.h>
6#include <linux/mount.h>
7#include <linux/path.h>
8#include <linux/namei.h>
9#include <linux/file.h>
10#include <linux/utsname.h>
11#include <net/net_namespace.h>
12#include <linux/mnt_namespace.h>
13#include <linux/ipc_namespace.h>
14#include <linux/pid_namespace.h>
15#include "internal.h"
16
17
18static const struct proc_ns_operations *ns_entries[] = {
19#ifdef CONFIG_NET_NS
20 &netns_operations,
21#endif
22#ifdef CONFIG_UTS_NS
23 &utsns_operations,
24#endif
25#ifdef CONFIG_IPC_NS
26 &ipcns_operations,
27#endif
28};
29
30static const struct file_operations ns_file_operations = {
31 .llseek = no_llseek,
32};
33
34static struct dentry *proc_ns_instantiate(struct inode *dir,
35 struct dentry *dentry, struct task_struct *task, const void *ptr)
36{
37 const struct proc_ns_operations *ns_ops = ptr;
38 struct inode *inode;
39 struct proc_inode *ei;
40 struct dentry *error = ERR_PTR(-ENOENT);
41
42 inode = proc_pid_make_inode(dir->i_sb, task);
43 if (!inode)
44 goto out;
45
46 ei = PROC_I(inode);
47 inode->i_mode = S_IFREG|S_IRUSR;
48 inode->i_fop = &ns_file_operations;
49 ei->ns_ops = ns_ops;
50 ei->ns = ns_ops->get(task);
51 if (!ei->ns)
52 goto out_iput;
53
54 dentry->d_op = &pid_dentry_operations;
55 d_add(dentry, inode);
56 /* Close the race of the process dying before we return the dentry */
57 if (pid_revalidate(dentry, NULL))
58 error = NULL;
59out:
60 return error;
61out_iput:
62 iput(inode);
63 goto out;
64}
65
66static int proc_ns_fill_cache(struct file *filp, void *dirent,
67 filldir_t filldir, struct task_struct *task,
68 const struct proc_ns_operations *ops)
69{
70 return proc_fill_cache(filp, dirent, filldir,
71 ops->name, strlen(ops->name),
72 proc_ns_instantiate, task, ops);
73}
74
75static int proc_ns_dir_readdir(struct file *filp, void *dirent,
76 filldir_t filldir)
77{
78 int i;
79 struct dentry *dentry = filp->f_path.dentry;
80 struct inode *inode = dentry->d_inode;
81 struct task_struct *task = get_proc_task(inode);
82 const struct proc_ns_operations **entry, **last;
83 ino_t ino;
84 int ret;
85
86 ret = -ENOENT;
87 if (!task)
88 goto out_no_task;
89
90 ret = -EPERM;
91 if (!ptrace_may_access(task, PTRACE_MODE_READ))
92 goto out;
93
94 ret = 0;
95 i = filp->f_pos;
96 switch (i) {
97 case 0:
98 ino = inode->i_ino;
99 if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
100 goto out;
101 i++;
102 filp->f_pos++;
103 /* fall through */
104 case 1:
105 ino = parent_ino(dentry);
106 if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
107 goto out;
108 i++;
109 filp->f_pos++;
110 /* fall through */
111 default:
112 i -= 2;
113 if (i >= ARRAY_SIZE(ns_entries)) {
114 ret = 1;
115 goto out;
116 }
117 entry = ns_entries + i;
118 last = &ns_entries[ARRAY_SIZE(ns_entries) - 1];
119 while (entry <= last) {
120 if (proc_ns_fill_cache(filp, dirent, filldir,
121 task, *entry) < 0)
122 goto out;
123 filp->f_pos++;
124 entry++;
125 }
126 }
127
128 ret = 1;
129out:
130 put_task_struct(task);
131out_no_task:
132 return ret;
133}
134
135const struct file_operations proc_ns_dir_operations = {
136 .read = generic_read_dir,
137 .readdir = proc_ns_dir_readdir,
138};
139
140static struct dentry *proc_ns_dir_lookup(struct inode *dir,
141 struct dentry *dentry, struct nameidata *nd)
142{
143 struct dentry *error;
144 struct task_struct *task = get_proc_task(dir);
145 const struct proc_ns_operations **entry, **last;
146 unsigned int len = dentry->d_name.len;
147
148 error = ERR_PTR(-ENOENT);
149
150 if (!task)
151 goto out_no_task;
152
153 error = ERR_PTR(-EPERM);
154 if (!ptrace_may_access(task, PTRACE_MODE_READ))
155 goto out;
156
157 last = &ns_entries[ARRAY_SIZE(ns_entries) - 1];
158 for (entry = ns_entries; entry <= last; entry++) {
159 if (strlen((*entry)->name) != len)
160 continue;
161 if (!memcmp(dentry->d_name.name, (*entry)->name, len))
162 break;
163 }
164 error = ERR_PTR(-ENOENT);
165 if (entry > last)
166 goto out;
167
168 error = proc_ns_instantiate(dir, dentry, task, *entry);
169out:
170 put_task_struct(task);
171out_no_task:
172 return error;
173}
174
175const struct inode_operations proc_ns_dir_inode_operations = {
176 .lookup = proc_ns_dir_lookup,
177 .getattr = pid_getattr,
178 .setattr = proc_setattr,
179};
180
181struct file *proc_ns_fget(int fd)
182{
183 struct file *file;
184
185 file = fget(fd);
186 if (!file)
187 return ERR_PTR(-EBADF);
188
189 if (file->f_op != &ns_file_operations)
190 goto out_invalid;
191
192 return file;
193
194out_invalid:
195 fput(file);
196 return ERR_PTR(-EINVAL);
197}
198
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 1cffa2b8a2fc..9758b654a1bc 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -138,9 +138,9 @@ static int stat_open(struct inode *inode, struct file *file)
138 struct seq_file *m; 138 struct seq_file *m;
139 int res; 139 int res;
140 140
141 /* don't ask for more than the kmalloc() max size, currently 128 KB */ 141 /* don't ask for more than the kmalloc() max size */
142 if (size > 128 * 1024) 142 if (size > KMALLOC_MAX_SIZE)
143 size = 128 * 1024; 143 size = KMALLOC_MAX_SIZE;
144 buf = kmalloc(size, GFP_KERNEL); 144 buf = kmalloc(size, GFP_KERNEL);
145 if (!buf) 145 if (!buf)
146 return -ENOMEM; 146 return -ENOMEM;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 318d8654989b..25b6a887adb9 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -211,7 +211,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
211{ 211{
212 struct mm_struct *mm = vma->vm_mm; 212 struct mm_struct *mm = vma->vm_mm;
213 struct file *file = vma->vm_file; 213 struct file *file = vma->vm_file;
214 int flags = vma->vm_flags; 214 vm_flags_t flags = vma->vm_flags;
215 unsigned long ino = 0; 215 unsigned long ino = 0;
216 unsigned long long pgoff = 0; 216 unsigned long long pgoff = 0;
217 unsigned long start, end; 217 unsigned long start, end;
@@ -536,15 +536,17 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
536 char buffer[PROC_NUMBUF]; 536 char buffer[PROC_NUMBUF];
537 struct mm_struct *mm; 537 struct mm_struct *mm;
538 struct vm_area_struct *vma; 538 struct vm_area_struct *vma;
539 long type; 539 int type;
540 int rv;
540 541
541 memset(buffer, 0, sizeof(buffer)); 542 memset(buffer, 0, sizeof(buffer));
542 if (count > sizeof(buffer) - 1) 543 if (count > sizeof(buffer) - 1)
543 count = sizeof(buffer) - 1; 544 count = sizeof(buffer) - 1;
544 if (copy_from_user(buffer, buf, count)) 545 if (copy_from_user(buffer, buf, count))
545 return -EFAULT; 546 return -EFAULT;
546 if (strict_strtol(strstrip(buffer), 10, &type)) 547 rv = kstrtoint(strstrip(buffer), 10, &type);
547 return -EINVAL; 548 if (rv < 0)
549 return rv;
548 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) 550 if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
549 return -EINVAL; 551 return -EINVAL;
550 task = get_proc_task(file->f_path.dentry->d_inode); 552 task = get_proc_task(file->f_path.dentry->d_inode);
@@ -769,18 +771,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
769 if (!task) 771 if (!task)
770 goto out; 772 goto out;
771 773
772 mm = mm_for_maps(task);
773 ret = PTR_ERR(mm);
774 if (!mm || IS_ERR(mm))
775 goto out_task;
776
777 ret = -EINVAL; 774 ret = -EINVAL;
778 /* file position must be aligned */ 775 /* file position must be aligned */
779 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 776 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
780 goto out_task; 777 goto out_task;
781 778
782 ret = 0; 779 ret = 0;
783
784 if (!count) 780 if (!count)
785 goto out_task; 781 goto out_task;
786 782
@@ -788,7 +784,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
788 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 784 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
789 ret = -ENOMEM; 785 ret = -ENOMEM;
790 if (!pm.buffer) 786 if (!pm.buffer)
791 goto out_mm; 787 goto out_task;
788
789 mm = mm_for_maps(task);
790 ret = PTR_ERR(mm);
791 if (!mm || IS_ERR(mm))
792 goto out_free;
792 793
793 pagemap_walk.pmd_entry = pagemap_pte_range; 794 pagemap_walk.pmd_entry = pagemap_pte_range;
794 pagemap_walk.pte_hole = pagemap_pte_hole; 795 pagemap_walk.pte_hole = pagemap_pte_hole;
@@ -831,7 +832,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
831 len = min(count, PM_ENTRY_BYTES * pm.pos); 832 len = min(count, PM_ENTRY_BYTES * pm.pos);
832 if (copy_to_user(buf, pm.buffer, len)) { 833 if (copy_to_user(buf, pm.buffer, len)) {
833 ret = -EFAULT; 834 ret = -EFAULT;
834 goto out_free; 835 goto out_mm;
835 } 836 }
836 copied += len; 837 copied += len;
837 buf += len; 838 buf += len;
@@ -841,10 +842,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
841 if (!ret || ret == PM_END_OF_BUFFER) 842 if (!ret || ret == PM_END_OF_BUFFER)
842 ret = copied; 843 ret = copied;
843 844
844out_free:
845 kfree(pm.buffer);
846out_mm: 845out_mm:
847 mmput(mm); 846 mmput(mm);
847out_free:
848 kfree(pm.buffer);
848out_task: 849out_task:
849 put_task_struct(task); 850 put_task_struct(task);
850out: 851out:
@@ -858,7 +859,192 @@ const struct file_operations proc_pagemap_operations = {
858#endif /* CONFIG_PROC_PAGE_MONITOR */ 859#endif /* CONFIG_PROC_PAGE_MONITOR */
859 860
860#ifdef CONFIG_NUMA 861#ifdef CONFIG_NUMA
861extern int show_numa_map(struct seq_file *m, void *v); 862
863struct numa_maps {
864 struct vm_area_struct *vma;
865 unsigned long pages;
866 unsigned long anon;
867 unsigned long active;
868 unsigned long writeback;
869 unsigned long mapcount_max;
870 unsigned long dirty;
871 unsigned long swapcache;
872 unsigned long node[MAX_NUMNODES];
873};
874
875struct numa_maps_private {
876 struct proc_maps_private proc_maps;
877 struct numa_maps md;
878};
879
880static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
881{
882 int count = page_mapcount(page);
883
884 md->pages++;
885 if (pte_dirty || PageDirty(page))
886 md->dirty++;
887
888 if (PageSwapCache(page))
889 md->swapcache++;
890
891 if (PageActive(page) || PageUnevictable(page))
892 md->active++;
893
894 if (PageWriteback(page))
895 md->writeback++;
896
897 if (PageAnon(page))
898 md->anon++;
899
900 if (count > md->mapcount_max)
901 md->mapcount_max = count;
902
903 md->node[page_to_nid(page)]++;
904}
905
906static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
907 unsigned long end, struct mm_walk *walk)
908{
909 struct numa_maps *md;
910 spinlock_t *ptl;
911 pte_t *orig_pte;
912 pte_t *pte;
913
914 md = walk->private;
915 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
916 do {
917 struct page *page;
918 int nid;
919
920 if (!pte_present(*pte))
921 continue;
922
923 page = vm_normal_page(md->vma, addr, *pte);
924 if (!page)
925 continue;
926
927 if (PageReserved(page))
928 continue;
929
930 nid = page_to_nid(page);
931 if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
932 continue;
933
934 gather_stats(page, md, pte_dirty(*pte));
935
936 } while (pte++, addr += PAGE_SIZE, addr != end);
937 pte_unmap_unlock(orig_pte, ptl);
938 return 0;
939}
940#ifdef CONFIG_HUGETLB_PAGE
941static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
942 unsigned long addr, unsigned long end, struct mm_walk *walk)
943{
944 struct numa_maps *md;
945 struct page *page;
946
947 if (pte_none(*pte))
948 return 0;
949
950 page = pte_page(*pte);
951 if (!page)
952 return 0;
953
954 md = walk->private;
955 gather_stats(page, md, pte_dirty(*pte));
956 return 0;
957}
958
959#else
960static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
961 unsigned long addr, unsigned long end, struct mm_walk *walk)
962{
963 return 0;
964}
965#endif
966
967/*
968 * Display pages allocated per node and memory policy via /proc.
969 */
970static int show_numa_map(struct seq_file *m, void *v)
971{
972 struct numa_maps_private *numa_priv = m->private;
973 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
974 struct vm_area_struct *vma = v;
975 struct numa_maps *md = &numa_priv->md;
976 struct file *file = vma->vm_file;
977 struct mm_struct *mm = vma->vm_mm;
978 struct mm_walk walk = {};
979 struct mempolicy *pol;
980 int n;
981 char buffer[50];
982
983 if (!mm)
984 return 0;
985
986 /* Ensure we start with an empty set of numa_maps statistics. */
987 memset(md, 0, sizeof(*md));
988
989 md->vma = vma;
990
991 walk.hugetlb_entry = gather_hugetbl_stats;
992 walk.pmd_entry = gather_pte_stats;
993 walk.private = md;
994 walk.mm = mm;
995
996 pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
997 mpol_to_str(buffer, sizeof(buffer), pol, 0);
998 mpol_cond_put(pol);
999
1000 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1001
1002 if (file) {
1003 seq_printf(m, " file=");
1004 seq_path(m, &file->f_path, "\n\t= ");
1005 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1006 seq_printf(m, " heap");
1007 } else if (vma->vm_start <= mm->start_stack &&
1008 vma->vm_end >= mm->start_stack) {
1009 seq_printf(m, " stack");
1010 }
1011
1012 walk_page_range(vma->vm_start, vma->vm_end, &walk);
1013
1014 if (!md->pages)
1015 goto out;
1016
1017 if (md->anon)
1018 seq_printf(m, " anon=%lu", md->anon);
1019
1020 if (md->dirty)
1021 seq_printf(m, " dirty=%lu", md->dirty);
1022
1023 if (md->pages != md->anon && md->pages != md->dirty)
1024 seq_printf(m, " mapped=%lu", md->pages);
1025
1026 if (md->mapcount_max > 1)
1027 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1028
1029 if (md->swapcache)
1030 seq_printf(m, " swapcache=%lu", md->swapcache);
1031
1032 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1033 seq_printf(m, " active=%lu", md->active);
1034
1035 if (md->writeback)
1036 seq_printf(m, " writeback=%lu", md->writeback);
1037
1038 for_each_node_state(n, N_HIGH_MEMORY)
1039 if (md->node[n])
1040 seq_printf(m, " N%d=%lu", n, md->node[n]);
1041out:
1042 seq_putc(m, '\n');
1043
1044 if (m->count < m->size)
1045 m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1046 return 0;
1047}
862 1048
863static const struct seq_operations proc_pid_numa_maps_op = { 1049static const struct seq_operations proc_pid_numa_maps_op = {
864 .start = m_start, 1050 .start = m_start,
@@ -869,7 +1055,20 @@ static const struct seq_operations proc_pid_numa_maps_op = {
869 1055
870static int numa_maps_open(struct inode *inode, struct file *file) 1056static int numa_maps_open(struct inode *inode, struct file *file)
871{ 1057{
872 return do_maps_open(inode, file, &proc_pid_numa_maps_op); 1058 struct numa_maps_private *priv;
1059 int ret = -ENOMEM;
1060 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1061 if (priv) {
1062 priv->proc_maps.pid = proc_pid(inode);
1063 ret = seq_open(file, &proc_pid_numa_maps_op);
1064 if (!ret) {
1065 struct seq_file *m = file->private_data;
1066 m->private = priv;
1067 } else {
1068 kfree(priv);
1069 }
1070 }
1071 return ret;
873} 1072}
874 1073
875const struct file_operations proc_numa_maps_operations = { 1074const struct file_operations proc_numa_maps_operations = {
@@ -878,4 +1077,4 @@ const struct file_operations proc_numa_maps_operations = {
878 .llseek = seq_lseek, 1077 .llseek = seq_lseek,
879 .release = seq_release_private, 1078 .release = seq_release_private,
880}; 1079};
881#endif 1080#endif /* CONFIG_NUMA */
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 74802bc5ded9..cd99bf557650 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -35,6 +35,46 @@ static u64 vmcore_size;
35 35
36static struct proc_dir_entry *proc_vmcore = NULL; 36static struct proc_dir_entry *proc_vmcore = NULL;
37 37
38/*
39 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
40 * The called function has to take care of module refcounting.
41 */
42static int (*oldmem_pfn_is_ram)(unsigned long pfn);
43
44int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
45{
46 if (oldmem_pfn_is_ram)
47 return -EBUSY;
48 oldmem_pfn_is_ram = fn;
49 return 0;
50}
51EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
52
53void unregister_oldmem_pfn_is_ram(void)
54{
55 oldmem_pfn_is_ram = NULL;
56 wmb();
57}
58EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
59
60static int pfn_is_ram(unsigned long pfn)
61{
62 int (*fn)(unsigned long pfn);
63 /* pfn is ram unless fn() checks pagetype */
64 int ret = 1;
65
66 /*
67 * Ask hypervisor if the pfn is really ram.
68 * A ballooned page contains no data and reading from such a page
69 * will cause high load in the hypervisor.
70 */
71 fn = oldmem_pfn_is_ram;
72 if (fn)
73 ret = fn(pfn);
74
75 return ret;
76}
77
38/* Reads a page from the oldmem device from given offset. */ 78/* Reads a page from the oldmem device from given offset. */
39static ssize_t read_from_oldmem(char *buf, size_t count, 79static ssize_t read_from_oldmem(char *buf, size_t count,
40 u64 *ppos, int userbuf) 80 u64 *ppos, int userbuf)
@@ -55,9 +95,15 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
55 else 95 else
56 nr_bytes = count; 96 nr_bytes = count;
57 97
58 tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); 98 /* If pfn is not ram, return zeros for sparse dump files */
59 if (tmp < 0) 99 if (pfn_is_ram(pfn) == 0)
60 return tmp; 100 memset(buf, 0, nr_bytes);
101 else {
102 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
103 offset, userbuf);
104 if (tmp < 0)
105 return tmp;
106 }
61 *ppos += nr_bytes; 107 *ppos += nr_bytes;
62 count -= nr_bytes; 108 count -= nr_bytes;
63 buf += nr_bytes; 109 buf += nr_bytes;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d3c032f5fa0a..5b572c89e6c4 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -691,8 +691,11 @@ static void prune_dqcache(int count)
691 * This is called from kswapd when we think we need some 691 * This is called from kswapd when we think we need some
692 * more memory 692 * more memory
693 */ 693 */
694static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 694static int shrink_dqcache_memory(struct shrinker *shrink,
695 struct shrink_control *sc)
695{ 696{
697 int nr = sc->nr_to_scan;
698
696 if (nr) { 699 if (nr) {
697 spin_lock(&dq_list_lock); 700 spin_lock(&dq_list_lock);
698 prune_dqcache(nr); 701 prune_dqcache(nr);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 118662690cdf..76c8164d5651 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -831,6 +831,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
831 INITIALIZE_PATH(path); 831 INITIALIZE_PATH(path);
832 struct reiserfs_dir_entry de; 832 struct reiserfs_dir_entry de;
833 833
834 dentry_unhash(dentry);
835
834 /* we will be doing 2 balancings and update 2 stat data, we change quotas 836 /* we will be doing 2 balancings and update 2 stat data, we change quotas
835 * of the owner of the directory and of the owner of the parent directory. 837 * of the owner of the directory and of the owner of the parent directory.
836 * The quota structure is possibly deleted only on last iput => outside 838 * The quota structure is possibly deleted only on last iput => outside
@@ -1225,6 +1227,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1225 unsigned long savelink = 1; 1227 unsigned long savelink = 1;
1226 struct timespec ctime; 1228 struct timespec ctime;
1227 1229
1230 if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
1231 dentry_unhash(new_dentry);
1232
1228 /* three balancings: (1) old name removal, (2) new name insertion 1233 /* three balancings: (1) old name removal, (2) new name insertion
1229 and (3) maybe "save" link insertion 1234 and (3) maybe "save" link insertion
1230 stat data updates: (1) old directory, 1235 stat data updates: (1) old directory,
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 47d2a4498b03..50f1abccd1cd 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -105,7 +105,6 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
105 mutex_unlock(&dentry->d_inode->i_mutex); 105 mutex_unlock(&dentry->d_inode->i_mutex);
106 if (!error) 106 if (!error)
107 d_delete(dentry); 107 d_delete(dentry);
108 dput(dentry);
109 108
110 return error; 109 return error;
111} 110}
diff --git a/fs/splice.c b/fs/splice.c
index 50a5d978da16..aa866d309695 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -162,6 +162,14 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = {
162 .get = generic_pipe_buf_get, 162 .get = generic_pipe_buf_get,
163}; 163};
164 164
165static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
166{
167 smp_mb();
168 if (waitqueue_active(&pipe->wait))
169 wake_up_interruptible(&pipe->wait);
170 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
171}
172
165/** 173/**
166 * splice_to_pipe - fill passed data into a pipe 174 * splice_to_pipe - fill passed data into a pipe
167 * @pipe: pipe to fill 175 * @pipe: pipe to fill
@@ -247,12 +255,8 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
247 255
248 pipe_unlock(pipe); 256 pipe_unlock(pipe);
249 257
250 if (do_wakeup) { 258 if (do_wakeup)
251 smp_mb(); 259 wakeup_pipe_readers(pipe);
252 if (waitqueue_active(&pipe->wait))
253 wake_up_interruptible(&pipe->wait);
254 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
255 }
256 260
257 while (page_nr < spd_pages) 261 while (page_nr < spd_pages)
258 spd->spd_release(spd, page_nr++); 262 spd->spd_release(spd, page_nr++);
@@ -1892,12 +1896,9 @@ retry:
1892 /* 1896 /*
1893 * If we put data in the output pipe, wakeup any potential readers. 1897 * If we put data in the output pipe, wakeup any potential readers.
1894 */ 1898 */
1895 if (ret > 0) { 1899 if (ret > 0)
1896 smp_mb(); 1900 wakeup_pipe_readers(opipe);
1897 if (waitqueue_active(&opipe->wait)) 1901
1898 wake_up_interruptible(&opipe->wait);
1899 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1900 }
1901 if (input_wakeup) 1902 if (input_wakeup)
1902 wakeup_pipe_writers(ipipe); 1903 wakeup_pipe_writers(ipipe);
1903 1904
@@ -1976,12 +1977,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
1976 /* 1977 /*
1977 * If we put data in the output pipe, wakeup any potential readers. 1978 * If we put data in the output pipe, wakeup any potential readers.
1978 */ 1979 */
1979 if (ret > 0) { 1980 if (ret > 0)
1980 smp_mb(); 1981 wakeup_pipe_readers(opipe);
1981 if (waitqueue_active(&opipe->wait))
1982 wake_up_interruptible(&opipe->wait);
1983 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1984 }
1985 1982
1986 return ret; 1983 return ret;
1987} 1984}
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 8ab48bc2fa7d..ed0eb2a921f4 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 4b5a3fbb1f1f..f744be98cd5a 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -393,19 +393,36 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
393/* 393/*
394 * Read a filesystem table (uncompressed sequence of bytes) from disk 394 * Read a filesystem table (uncompressed sequence of bytes) from disk
395 */ 395 */
396int squashfs_read_table(struct super_block *sb, void *buffer, u64 block, 396void *squashfs_read_table(struct super_block *sb, u64 block, int length)
397 int length)
398{ 397{
399 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 398 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
400 int i, res; 399 int i, res;
401 void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL); 400 void *table, *buffer, **data;
402 if (data == NULL) 401
403 return -ENOMEM; 402 table = buffer = kmalloc(length, GFP_KERNEL);
403 if (table == NULL)
404 return ERR_PTR(-ENOMEM);
405
406 data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
407 if (data == NULL) {
408 res = -ENOMEM;
409 goto failed;
410 }
404 411
405 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) 412 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
406 data[i] = buffer; 413 data[i] = buffer;
414
407 res = squashfs_read_data(sb, data, block, length | 415 res = squashfs_read_data(sb, data, block, length |
408 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); 416 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
417
409 kfree(data); 418 kfree(data);
410 return res; 419
420 if (res < 0)
421 goto failed;
422
423 return table;
424
425failed:
426 kfree(table);
427 return ERR_PTR(res);
411} 428}
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index e921bd213738..9f1b0bb96f13 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index 099745ad5691..8ba70cff09a6 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -4,7 +4,7 @@
4 * Squashfs - a compressed read only filesystem for Linux 4 * Squashfs - a compressed read only filesystem for Linux
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
7 * Phillip Lougher <phillip@lougher.demon.co.uk> 7 * Phillip Lougher <phillip@squashfs.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index 3f79cd1d0c19..9dfe2ce0fb70 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 7f93d5a9ee05..730c56248c9b 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -121,30 +121,38 @@ static struct dentry *squashfs_get_parent(struct dentry *child)
121 * Read uncompressed inode lookup table indexes off disk into memory 121 * Read uncompressed inode lookup table indexes off disk into memory
122 */ 122 */
123__le64 *squashfs_read_inode_lookup_table(struct super_block *sb, 123__le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
124 u64 lookup_table_start, unsigned int inodes) 124 u64 lookup_table_start, u64 next_table, unsigned int inodes)
125{ 125{
126 unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); 126 unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
127 __le64 *inode_lookup_table; 127 __le64 *table;
128 int err;
129 128
130 TRACE("In read_inode_lookup_table, length %d\n", length); 129 TRACE("In read_inode_lookup_table, length %d\n", length);
131 130
132 /* Allocate inode lookup table indexes */ 131 /* Sanity check values */
133 inode_lookup_table = kmalloc(length, GFP_KERNEL); 132
134 if (inode_lookup_table == NULL) { 133 /* there should always be at least one inode */
135 ERROR("Failed to allocate inode lookup table\n"); 134 if (inodes == 0)
136 return ERR_PTR(-ENOMEM); 135 return ERR_PTR(-EINVAL);
137 } 136
137 /* length bytes should not extend into the next table - this check
138 * also traps instances where lookup_table_start is incorrectly larger
139 * than the next table start
140 */
141 if (lookup_table_start + length > next_table)
142 return ERR_PTR(-EINVAL);
143
144 table = squashfs_read_table(sb, lookup_table_start, length);
138 145
139 err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start, 146 /*
140 length); 147 * table[0] points to the first inode lookup table metadata block,
141 if (err < 0) { 148 * this should be less than lookup_table_start
142 ERROR("unable to read inode lookup table\n"); 149 */
143 kfree(inode_lookup_table); 150 if (!IS_ERR(table) && table[0] >= lookup_table_start) {
144 return ERR_PTR(err); 151 kfree(table);
152 return ERR_PTR(-EINVAL);
145 } 153 }
146 154
147 return inode_lookup_table; 155 return table;
148} 156}
149 157
150 158
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index a25c5060bdcb..38bb1c640559 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 7eef571443c6..1516a6490bfb 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -71,26 +71,29 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
71 * Read the uncompressed fragment lookup table indexes off disk into memory 71 * Read the uncompressed fragment lookup table indexes off disk into memory
72 */ 72 */
73__le64 *squashfs_read_fragment_index_table(struct super_block *sb, 73__le64 *squashfs_read_fragment_index_table(struct super_block *sb,
74 u64 fragment_table_start, unsigned int fragments) 74 u64 fragment_table_start, u64 next_table, unsigned int fragments)
75{ 75{
76 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments); 76 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments);
77 __le64 *fragment_index; 77 __le64 *table;
78 int err;
79 78
80 /* Allocate fragment lookup table indexes */ 79 /*
81 fragment_index = kmalloc(length, GFP_KERNEL); 80 * Sanity check, length bytes should not extend into the next table -
82 if (fragment_index == NULL) { 81 * this check also traps instances where fragment_table_start is
83 ERROR("Failed to allocate fragment index table\n"); 82 * incorrectly larger than the next table start
84 return ERR_PTR(-ENOMEM); 83 */
85 } 84 if (fragment_table_start + length > next_table)
85 return ERR_PTR(-EINVAL);
86
87 table = squashfs_read_table(sb, fragment_table_start, length);
86 88
87 err = squashfs_read_table(sb, fragment_index, fragment_table_start, 89 /*
88 length); 90 * table[0] points to the first fragment table metadata block, this
89 if (err < 0) { 91 * should be less than fragment_table_start
90 ERROR("unable to read fragment index table\n"); 92 */
91 kfree(fragment_index); 93 if (!IS_ERR(table) && table[0] >= fragment_table_start) {
92 return ERR_PTR(err); 94 kfree(table);
95 return ERR_PTR(-EINVAL);
93 } 96 }
94 97
95 return fragment_index; 98 return table;
96} 99}
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index d8f32452638e..a70858e0fb44 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -66,27 +66,37 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
66 * Read uncompressed id lookup table indexes from disk into memory 66 * Read uncompressed id lookup table indexes from disk into memory
67 */ 67 */
68__le64 *squashfs_read_id_index_table(struct super_block *sb, 68__le64 *squashfs_read_id_index_table(struct super_block *sb,
69 u64 id_table_start, unsigned short no_ids) 69 u64 id_table_start, u64 next_table, unsigned short no_ids)
70{ 70{
71 unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); 71 unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
72 __le64 *id_table; 72 __le64 *table;
73 int err;
74 73
75 TRACE("In read_id_index_table, length %d\n", length); 74 TRACE("In read_id_index_table, length %d\n", length);
76 75
77 /* Allocate id lookup table indexes */ 76 /* Sanity check values */
78 id_table = kmalloc(length, GFP_KERNEL); 77
79 if (id_table == NULL) { 78 /* there should always be at least one id */
80 ERROR("Failed to allocate id index table\n"); 79 if (no_ids == 0)
81 return ERR_PTR(-ENOMEM); 80 return ERR_PTR(-EINVAL);
82 } 81
82 /*
83 * length bytes should not extend into the next table - this check
84 * also traps instances where id_table_start is incorrectly larger
85 * than the next table start
86 */
87 if (id_table_start + length > next_table)
88 return ERR_PTR(-EINVAL);
89
90 table = squashfs_read_table(sb, id_table_start, length);
83 91
84 err = squashfs_read_table(sb, id_table, id_table_start, length); 92 /*
85 if (err < 0) { 93 * table[0] points to the first id lookup table metadata block, this
86 ERROR("unable to read id index table\n"); 94 * should be less than id_table_start
87 kfree(id_table); 95 */
88 return ERR_PTR(err); 96 if (!IS_ERR(table) && table[0] >= id_table_start) {
97 kfree(table);
98 return ERR_PTR(-EINVAL);
89 } 99 }
90 100
91 return id_table; 101 return table;
92} 102}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 62e63ad25075..04bebcaa2373 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 5d922a6701ab..4bc63ac64bc0 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 1f2e608b8785..e3be6a71cfa7 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -44,24 +44,24 @@ extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *,
44 u64, int); 44 u64, int);
45extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *, 45extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
46 u64, int); 46 u64, int);
47extern int squashfs_read_table(struct super_block *, void *, u64, int); 47extern void *squashfs_read_table(struct super_block *, u64, int);
48 48
49/* decompressor.c */ 49/* decompressor.c */
50extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int); 50extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
51extern void *squashfs_decompressor_init(struct super_block *, unsigned short); 51extern void *squashfs_decompressor_init(struct super_block *, unsigned short);
52 52
53/* export.c */ 53/* export.c */
54extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, 54extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64,
55 unsigned int); 55 unsigned int);
56 56
57/* fragment.c */ 57/* fragment.c */
58extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *); 58extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *);
59extern __le64 *squashfs_read_fragment_index_table(struct super_block *, 59extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
60 u64, unsigned int); 60 u64, u64, unsigned int);
61 61
62/* id.c */ 62/* id.c */
63extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); 63extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
64extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, 64extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64,
65 unsigned short); 65 unsigned short);
66 66
67/* inode.c */ 67/* inode.c */
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 4582c568ef4d..b4a4e539a08c 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -4,7 +4,7 @@
4 * Squashfs 4 * Squashfs
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
7 * Phillip Lougher <phillip@lougher.demon.co.uk> 7 * Phillip Lougher <phillip@squashfs.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
index 359baefc01fc..73588e7700ed 100644
--- a/fs/squashfs/squashfs_fs_i.h
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -4,7 +4,7 @@
4 * Squashfs 4 * Squashfs
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
7 * Phillip Lougher <phillip@lougher.demon.co.uk> 7 * Phillip Lougher <phillip@squashfs.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index d9037a5215f0..651f0b31d296 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -4,7 +4,7 @@
4 * Squashfs 4 * Squashfs
5 * 5 *
6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 6 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
7 * Phillip Lougher <phillip@lougher.demon.co.uk> 7 * Phillip Lougher <phillip@squashfs.org.uk>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5c8184c061a4..6f26abee3597 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -83,7 +83,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
83 long long root_inode; 83 long long root_inode;
84 unsigned short flags; 84 unsigned short flags;
85 unsigned int fragments; 85 unsigned int fragments;
86 u64 lookup_table_start, xattr_id_table_start; 86 u64 lookup_table_start, xattr_id_table_start, next_table;
87 int err; 87 int err;
88 88
89 TRACE("Entered squashfs_fill_superblock\n"); 89 TRACE("Entered squashfs_fill_superblock\n");
@@ -95,12 +95,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
95 } 95 }
96 msblk = sb->s_fs_info; 96 msblk = sb->s_fs_info;
97 97
98 sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
99 if (sblk == NULL) {
100 ERROR("Failed to allocate squashfs_super_block\n");
101 goto failure;
102 }
103
104 msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE); 98 msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE);
105 msblk->devblksize_log2 = ffz(~msblk->devblksize); 99 msblk->devblksize_log2 = ffz(~msblk->devblksize);
106 100
@@ -114,10 +108,12 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
114 * of bytes_used) we need to set it to an initial sensible dummy value 108 * of bytes_used) we need to set it to an initial sensible dummy value
115 */ 109 */
116 msblk->bytes_used = sizeof(*sblk); 110 msblk->bytes_used = sizeof(*sblk);
117 err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk)); 111 sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk));
118 112
119 if (err < 0) { 113 if (IS_ERR(sblk)) {
120 ERROR("unable to read squashfs_super_block\n"); 114 ERROR("unable to read squashfs_super_block\n");
115 err = PTR_ERR(sblk);
116 sblk = NULL;
121 goto failed_mount; 117 goto failed_mount;
122 } 118 }
123 119
@@ -218,18 +214,61 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
218 goto failed_mount; 214 goto failed_mount;
219 } 215 }
220 216
217 /* Handle xattrs */
218 sb->s_xattr = squashfs_xattr_handlers;
219 xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
220 if (xattr_id_table_start == SQUASHFS_INVALID_BLK) {
221 next_table = msblk->bytes_used;
222 goto allocate_id_index_table;
223 }
224
225 /* Allocate and read xattr id lookup table */
226 msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
227 xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
228 if (IS_ERR(msblk->xattr_id_table)) {
229 ERROR("unable to read xattr id index table\n");
230 err = PTR_ERR(msblk->xattr_id_table);
231 msblk->xattr_id_table = NULL;
232 if (err != -ENOTSUPP)
233 goto failed_mount;
234 }
235 next_table = msblk->xattr_table;
236
237allocate_id_index_table:
221 /* Allocate and read id index table */ 238 /* Allocate and read id index table */
222 msblk->id_table = squashfs_read_id_index_table(sb, 239 msblk->id_table = squashfs_read_id_index_table(sb,
223 le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids)); 240 le64_to_cpu(sblk->id_table_start), next_table,
241 le16_to_cpu(sblk->no_ids));
224 if (IS_ERR(msblk->id_table)) { 242 if (IS_ERR(msblk->id_table)) {
243 ERROR("unable to read id index table\n");
225 err = PTR_ERR(msblk->id_table); 244 err = PTR_ERR(msblk->id_table);
226 msblk->id_table = NULL; 245 msblk->id_table = NULL;
227 goto failed_mount; 246 goto failed_mount;
228 } 247 }
248 next_table = msblk->id_table[0];
249
250 /* Handle inode lookup table */
251 lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
252 if (lookup_table_start == SQUASHFS_INVALID_BLK)
253 goto handle_fragments;
254
255 /* Allocate and read inode lookup table */
256 msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
257 lookup_table_start, next_table, msblk->inodes);
258 if (IS_ERR(msblk->inode_lookup_table)) {
259 ERROR("unable to read inode lookup table\n");
260 err = PTR_ERR(msblk->inode_lookup_table);
261 msblk->inode_lookup_table = NULL;
262 goto failed_mount;
263 }
264 next_table = msblk->inode_lookup_table[0];
229 265
266 sb->s_export_op = &squashfs_export_ops;
267
268handle_fragments:
230 fragments = le32_to_cpu(sblk->fragments); 269 fragments = le32_to_cpu(sblk->fragments);
231 if (fragments == 0) 270 if (fragments == 0)
232 goto allocate_lookup_table; 271 goto check_directory_table;
233 272
234 msblk->fragment_cache = squashfs_cache_init("fragment", 273 msblk->fragment_cache = squashfs_cache_init("fragment",
235 SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); 274 SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
@@ -240,45 +279,29 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
240 279
241 /* Allocate and read fragment index table */ 280 /* Allocate and read fragment index table */
242 msblk->fragment_index = squashfs_read_fragment_index_table(sb, 281 msblk->fragment_index = squashfs_read_fragment_index_table(sb,
243 le64_to_cpu(sblk->fragment_table_start), fragments); 282 le64_to_cpu(sblk->fragment_table_start), next_table, fragments);
244 if (IS_ERR(msblk->fragment_index)) { 283 if (IS_ERR(msblk->fragment_index)) {
284 ERROR("unable to read fragment index table\n");
245 err = PTR_ERR(msblk->fragment_index); 285 err = PTR_ERR(msblk->fragment_index);
246 msblk->fragment_index = NULL; 286 msblk->fragment_index = NULL;
247 goto failed_mount; 287 goto failed_mount;
248 } 288 }
289 next_table = msblk->fragment_index[0];
249 290
250allocate_lookup_table: 291check_directory_table:
251 lookup_table_start = le64_to_cpu(sblk->lookup_table_start); 292 /* Sanity check directory_table */
252 if (lookup_table_start == SQUASHFS_INVALID_BLK) 293 if (msblk->directory_table >= next_table) {
253 goto allocate_xattr_table; 294 err = -EINVAL;
254
255 /* Allocate and read inode lookup table */
256 msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
257 lookup_table_start, msblk->inodes);
258 if (IS_ERR(msblk->inode_lookup_table)) {
259 err = PTR_ERR(msblk->inode_lookup_table);
260 msblk->inode_lookup_table = NULL;
261 goto failed_mount; 295 goto failed_mount;
262 } 296 }
263 297
264 sb->s_export_op = &squashfs_export_ops; 298 /* Sanity check inode_table */
265 299 if (msblk->inode_table >= msblk->directory_table) {
266allocate_xattr_table: 300 err = -EINVAL;
267 sb->s_xattr = squashfs_xattr_handlers; 301 goto failed_mount;
268 xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
269 if (xattr_id_table_start == SQUASHFS_INVALID_BLK)
270 goto allocate_root;
271
272 /* Allocate and read xattr id lookup table */
273 msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
274 xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
275 if (IS_ERR(msblk->xattr_id_table)) {
276 err = PTR_ERR(msblk->xattr_id_table);
277 msblk->xattr_id_table = NULL;
278 if (err != -ENOTSUPP)
279 goto failed_mount;
280 } 302 }
281allocate_root: 303
304 /* allocate root */
282 root = new_inode(sb); 305 root = new_inode(sb);
283 if (!root) { 306 if (!root) {
284 err = -ENOMEM; 307 err = -ENOMEM;
@@ -318,11 +341,6 @@ failed_mount:
318 sb->s_fs_info = NULL; 341 sb->s_fs_info = NULL;
319 kfree(sblk); 342 kfree(sblk);
320 return err; 343 return err;
321
322failure:
323 kfree(sb->s_fs_info);
324 sb->s_fs_info = NULL;
325 return -ENOMEM;
326} 344}
327 345
328 346
@@ -475,5 +493,5 @@ static const struct super_operations squashfs_super_ops = {
475module_init(init_squashfs_fs); 493module_init(init_squashfs_fs);
476module_exit(exit_squashfs_fs); 494module_exit(exit_squashfs_fs);
477MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem"); 495MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem");
478MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>"); 496MODULE_AUTHOR("Phillip Lougher <phillip@squashfs.org.uk>");
479MODULE_LICENSE("GPL"); 497MODULE_LICENSE("GPL");
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index ec86434921e1..1191817264cc 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
index 3876c36699a1..92fcde7b4d61 100644
--- a/fs/squashfs/xattr.c
+++ b/fs/squashfs/xattr.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2010 4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
index b634efce4bde..c83f5d9ec125 100644
--- a/fs/squashfs/xattr.h
+++ b/fs/squashfs/xattr.h
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2010 4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -31,6 +31,7 @@ static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
31 u64 start, u64 *xattr_table_start, int *xattr_ids) 31 u64 start, u64 *xattr_table_start, int *xattr_ids)
32{ 32{
33 ERROR("Xattrs in filesystem, these will be ignored\n"); 33 ERROR("Xattrs in filesystem, these will be ignored\n");
34 *xattr_table_start = start;
34 return ERR_PTR(-ENOTSUPP); 35 return ERR_PTR(-ENOTSUPP);
35} 36}
36 37
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
index 05385dbe1465..c89607d690c4 100644
--- a/fs/squashfs/xattr_id.c
+++ b/fs/squashfs/xattr_id.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2010 4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -67,34 +67,29 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
67 u64 *xattr_table_start, int *xattr_ids) 67 u64 *xattr_table_start, int *xattr_ids)
68{ 68{
69 unsigned int len; 69 unsigned int len;
70 __le64 *xid_table; 70 struct squashfs_xattr_id_table *id_table;
71 struct squashfs_xattr_id_table id_table; 71
72 int err; 72 id_table = squashfs_read_table(sb, start, sizeof(*id_table));
73 if (IS_ERR(id_table))
74 return (__le64 *) id_table;
75
76 *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
77 *xattr_ids = le32_to_cpu(id_table->xattr_ids);
78 kfree(id_table);
79
80 /* Sanity check values */
81
82 /* there is always at least one xattr id */
83 if (*xattr_ids == 0)
84 return ERR_PTR(-EINVAL);
85
86 /* xattr_table should be less than start */
87 if (*xattr_table_start >= start)
88 return ERR_PTR(-EINVAL);
73 89
74 err = squashfs_read_table(sb, &id_table, start, sizeof(id_table));
75 if (err < 0) {
76 ERROR("unable to read xattr id table\n");
77 return ERR_PTR(err);
78 }
79 *xattr_table_start = le64_to_cpu(id_table.xattr_table_start);
80 *xattr_ids = le32_to_cpu(id_table.xattr_ids);
81 len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); 90 len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
82 91
83 TRACE("In read_xattr_index_table, length %d\n", len); 92 TRACE("In read_xattr_index_table, length %d\n", len);
84 93
85 /* Allocate xattr id lookup table indexes */ 94 return squashfs_read_table(sb, start + sizeof(*id_table), len);
86 xid_table = kmalloc(len, GFP_KERNEL);
87 if (xid_table == NULL) {
88 ERROR("Failed to allocate xattr id index table\n");
89 return ERR_PTR(-ENOMEM);
90 }
91
92 err = squashfs_read_table(sb, xid_table, start + sizeof(id_table), len);
93 if (err < 0) {
94 ERROR("unable to read xattr id index table\n");
95 kfree(xid_table);
96 return ERR_PTR(err);
97 }
98
99 return xid_table;
100} 95}
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index aa47a286d1f8..1760b7d108f6 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 517688b32ffa..55d918fd2d86 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -2,7 +2,7 @@
2 * Squashfs - a compressed read only filesystem for Linux 2 * Squashfs - a compressed read only filesystem for Linux
3 * 3 *
4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 4 * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5 * Phillip Lougher <phillip@lougher.demon.co.uk> 5 * Phillip Lougher <phillip@squashfs.org.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
diff --git a/fs/super.c b/fs/super.c
index c04f7e0b7ed2..c75593953c52 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -31,6 +31,7 @@
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/backing-dev.h> 32#include <linux/backing-dev.h>
33#include <linux/rculist_bl.h> 33#include <linux/rculist_bl.h>
34#include <linux/cleancache.h>
34#include "internal.h" 35#include "internal.h"
35 36
36 37
@@ -112,6 +113,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
112 s->s_maxbytes = MAX_NON_LFS; 113 s->s_maxbytes = MAX_NON_LFS;
113 s->s_op = &default_op; 114 s->s_op = &default_op;
114 s->s_time_gran = 1000000000; 115 s->s_time_gran = 1000000000;
116 s->cleancache_poolid = -1;
115 } 117 }
116out: 118out:
117 return s; 119 return s;
@@ -177,6 +179,7 @@ void deactivate_locked_super(struct super_block *s)
177{ 179{
178 struct file_system_type *fs = s->s_type; 180 struct file_system_type *fs = s->s_type;
179 if (atomic_dec_and_test(&s->s_active)) { 181 if (atomic_dec_and_test(&s->s_active)) {
182 cleancache_flush_fs(s);
180 fs->kill_sb(s); 183 fs->kill_sb(s);
181 /* 184 /*
182 * We need to call rcu_barrier so all the delayed rcu free 185 * We need to call rcu_barrier so all the delayed rcu free
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index e474fbcf8bde..e2cc6756f3b1 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -196,6 +196,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
196 struct inode *inode = dentry->d_inode; 196 struct inode *inode = dentry->d_inode;
197 int err = -ENOTEMPTY; 197 int err = -ENOTEMPTY;
198 198
199 dentry_unhash(dentry);
200
199 if (sysv_empty_dir(inode)) { 201 if (sysv_empty_dir(inode)) {
200 err = sysv_unlink(dir, dentry); 202 err = sysv_unlink(dir, dentry);
201 if (!err) { 203 if (!err) {
@@ -222,6 +224,9 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
222 struct sysv_dir_entry * old_de; 224 struct sysv_dir_entry * old_de;
223 int err = -ENOENT; 225 int err = -ENOENT;
224 226
227 if (new_inode && S_ISDIR(new_inode->i_mode))
228 dentry_unhash(new_dentry);
229
225 old_de = sysv_find_entry(old_dentry, &old_page); 230 old_de = sysv_find_entry(old_dentry, &old_page);
226 if (!old_de) 231 if (!old_de)
227 goto out; 232 goto out;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ef5abd38f0bf..c2b80943560d 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -656,6 +656,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
656 struct ubifs_inode *dir_ui = ubifs_inode(dir); 656 struct ubifs_inode *dir_ui = ubifs_inode(dir);
657 struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 }; 657 struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
658 658
659 dentry_unhash(dentry);
660
659 /* 661 /*
660 * Budget request settings: deletion direntry, deletion inode and 662 * Budget request settings: deletion direntry, deletion inode and
661 * changing the parent inode. If budgeting fails, go ahead anyway 663 * changing the parent inode. If budgeting fails, go ahead anyway
@@ -976,6 +978,9 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
976 .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; 978 .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
977 struct timespec time; 979 struct timespec time;
978 980
981 if (new_inode && S_ISDIR(new_inode->i_mode))
982 dentry_unhash(new_dentry);
983
979 /* 984 /*
980 * Budget request settings: deletion direntry, new direntry, removing 985 * Budget request settings: deletion direntry, new direntry, removing
981 * the old inode, and changing old and new parent directory inodes. 986 * the old inode, and changing old and new parent directory inodes.
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index f1dce848ef96..4d76594c2a8f 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -783,6 +783,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
783 struct fileIdentDesc *fi, cfi; 783 struct fileIdentDesc *fi, cfi;
784 struct kernel_lb_addr tloc; 784 struct kernel_lb_addr tloc;
785 785
786 dentry_unhash(dentry);
787
786 retval = -ENOENT; 788 retval = -ENOENT;
787 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); 789 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
788 if (!fi) 790 if (!fi)
@@ -1081,6 +1083,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1081 struct kernel_lb_addr tloc; 1083 struct kernel_lb_addr tloc;
1082 struct udf_inode_info *old_iinfo = UDF_I(old_inode); 1084 struct udf_inode_info *old_iinfo = UDF_I(old_inode);
1083 1085
1086 if (new_inode && S_ISDIR(new_inode->i_mode))
1087 dentry_unhash(new_dentry);
1088
1084 ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); 1089 ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
1085 if (ofi) { 1090 if (ofi) {
1086 if (ofibh.sbh != ofibh.ebh) 1091 if (ofibh.sbh != ofibh.ebh)
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 46f7a807bbc1..42694e11c23d 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -424,8 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
424 ufs_cpu_to_data_ptr(sb, p, result); 424 ufs_cpu_to_data_ptr(sb, p, result);
425 *err = 0; 425 *err = 0;
426 UFS_I(inode)->i_lastfrag = 426 UFS_I(inode)->i_lastfrag =
427 max_t(u32, UFS_I(inode)->i_lastfrag, 427 max(UFS_I(inode)->i_lastfrag, fragment + count);
428 fragment + count);
429 ufs_clear_frags(inode, result + oldcount, 428 ufs_clear_frags(inode, result + oldcount,
430 newcount - oldcount, locked_page != NULL); 429 newcount - oldcount, locked_page != NULL);
431 } 430 }
@@ -440,7 +439,8 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
440 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err); 439 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
441 if (result) { 440 if (result) {
442 *err = 0; 441 *err = 0;
443 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 442 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443 fragment + count);
444 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 444 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445 locked_page != NULL); 445 locked_page != NULL);
446 unlock_super(sb); 446 unlock_super(sb);
@@ -479,7 +479,8 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
479 uspi->s_sbbase + result, locked_page); 479 uspi->s_sbbase + result, locked_page);
480 ufs_cpu_to_data_ptr(sb, p, result); 480 ufs_cpu_to_data_ptr(sb, p, result);
481 *err = 0; 481 *err = 0;
482 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 482 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483 fragment + count);
483 unlock_super(sb); 484 unlock_super(sb);
484 if (newcount < request) 485 if (newcount < request)
485 ufs_free_fragments (inode, result + newcount, request - newcount); 486 ufs_free_fragments (inode, result + newcount, request - newcount);
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 29309e25417f..953ebdfc5bf7 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -258,6 +258,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
258 struct inode * inode = dentry->d_inode; 258 struct inode * inode = dentry->d_inode;
259 int err= -ENOTEMPTY; 259 int err= -ENOTEMPTY;
260 260
261 dentry_unhash(dentry);
262
261 lock_ufs(dir->i_sb); 263 lock_ufs(dir->i_sb);
262 if (ufs_empty_dir (inode)) { 264 if (ufs_empty_dir (inode)) {
263 err = ufs_unlink(dir, dentry); 265 err = ufs_unlink(dir, dentry);
@@ -282,6 +284,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
282 struct ufs_dir_entry *old_de; 284 struct ufs_dir_entry *old_de;
283 int err = -ENOENT; 285 int err = -ENOENT;
284 286
287 if (new_inode && S_ISDIR(new_inode->i_mode))
288 dentry_unhash(new_dentry);
289
285 old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); 290 old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
286 if (!old_de) 291 if (!old_de)
287 goto out; 292 goto out;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 5f821dbc0579..f04f89fbd4d9 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -84,7 +84,7 @@ static int ufs_trunc_direct(struct inode *inode)
84 retry = 0; 84 retry = 0;
85 85
86 frag1 = DIRECT_FRAGMENT; 86 frag1 = DIRECT_FRAGMENT;
87 frag4 = min_t(u32, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 87 frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
88 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 88 frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
89 frag3 = frag4 & ~uspi->s_fpbmask; 89 frag3 = frag4 & ~uspi->s_fpbmask;
90 block1 = block2 = 0; 90 block1 = block2 = 0;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 52b2b5da566e..5e68099db2a5 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1422,12 +1422,12 @@ restart:
1422int 1422int
1423xfs_buftarg_shrink( 1423xfs_buftarg_shrink(
1424 struct shrinker *shrink, 1424 struct shrinker *shrink,
1425 int nr_to_scan, 1425 struct shrink_control *sc)
1426 gfp_t mask)
1427{ 1426{
1428 struct xfs_buftarg *btp = container_of(shrink, 1427 struct xfs_buftarg *btp = container_of(shrink,
1429 struct xfs_buftarg, bt_shrinker); 1428 struct xfs_buftarg, bt_shrinker);
1430 struct xfs_buf *bp; 1429 struct xfs_buf *bp;
1430 int nr_to_scan = sc->nr_to_scan;
1431 LIST_HEAD(dispose); 1431 LIST_HEAD(dispose);
1432 1432
1433 if (!nr_to_scan) 1433 if (!nr_to_scan)
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c
index d61611c88012..244e797dae32 100644
--- a/fs/xfs/linux-2.6/xfs_discard.c
+++ b/fs/xfs/linux-2.6/xfs_discard.c
@@ -191,3 +191,32 @@ xfs_ioc_trim(
191 return -XFS_ERROR(EFAULT); 191 return -XFS_ERROR(EFAULT);
192 return 0; 192 return 0;
193} 193}
194
195int
196xfs_discard_extents(
197 struct xfs_mount *mp,
198 struct list_head *list)
199{
200 struct xfs_busy_extent *busyp;
201 int error = 0;
202
203 list_for_each_entry(busyp, list, list) {
204 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
205 busyp->length);
206
207 error = -blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
208 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
209 XFS_FSB_TO_BB(mp, busyp->length),
210 GFP_NOFS, 0);
211 if (error && error != EOPNOTSUPP) {
212 xfs_info(mp,
213 "discard failed for extent [0x%llu,%u], error %d",
214 (unsigned long long)busyp->bno,
215 busyp->length,
216 error);
217 return error;
218 }
219 }
220
221 return 0;
222}
diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/linux-2.6/xfs_discard.h
index e82b6dd3e127..344879aea646 100644
--- a/fs/xfs/linux-2.6/xfs_discard.h
+++ b/fs/xfs/linux-2.6/xfs_discard.h
@@ -2,7 +2,9 @@
2#define XFS_DISCARD_H 1 2#define XFS_DISCARD_H 1
3 3
4struct fstrim_range; 4struct fstrim_range;
5struct list_head;
5 6
6extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *); 7extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *);
8extern int xfs_discard_extents(struct xfs_mount *, struct list_head *);
7 9
8#endif /* XFS_DISCARD_H */ 10#endif /* XFS_DISCARD_H */
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index b0aa59e51fd0..98b9c91fcdf1 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -110,8 +110,10 @@ mempool_t *xfs_ioend_pool;
110#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 110#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
111#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 111#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
112#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 112#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
113#define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */ 113#define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */
114#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */ 114#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */
115#define MNTOPT_DISCARD "discard" /* Discard unused blocks */
116#define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */
115 117
116/* 118/*
117 * Table driven mount option parser. 119 * Table driven mount option parser.
@@ -355,6 +357,10 @@ xfs_parseargs(
355 mp->m_flags |= XFS_MOUNT_DELAYLOG; 357 mp->m_flags |= XFS_MOUNT_DELAYLOG;
356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 358 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
357 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 359 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
360 } else if (!strcmp(this_char, MNTOPT_DISCARD)) {
361 mp->m_flags |= XFS_MOUNT_DISCARD;
362 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
363 mp->m_flags &= ~XFS_MOUNT_DISCARD;
358 } else if (!strcmp(this_char, "ihashsize")) { 364 } else if (!strcmp(this_char, "ihashsize")) {
359 xfs_warn(mp, 365 xfs_warn(mp,
360 "ihashsize no longer used, option is deprecated."); 366 "ihashsize no longer used, option is deprecated.");
@@ -388,6 +394,13 @@ xfs_parseargs(
388 return EINVAL; 394 return EINVAL;
389 } 395 }
390 396
397 if ((mp->m_flags & XFS_MOUNT_DISCARD) &&
398 !(mp->m_flags & XFS_MOUNT_DELAYLOG)) {
399 xfs_warn(mp,
400 "the discard option is incompatible with the nodelaylog option");
401 return EINVAL;
402 }
403
391#ifndef CONFIG_XFS_QUOTA 404#ifndef CONFIG_XFS_QUOTA
392 if (XFS_IS_QUOTA_RUNNING(mp)) { 405 if (XFS_IS_QUOTA_RUNNING(mp)) {
393 xfs_warn(mp, "quota support not available in this kernel."); 406 xfs_warn(mp, "quota support not available in this kernel.");
@@ -488,6 +501,7 @@ xfs_showargs(
488 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, 501 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
489 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, 502 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
490 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, 503 { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
504 { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD },
491 { 0, NULL } 505 { 0, NULL }
492 }; 506 };
493 static struct proc_xfs_info xfs_info_unset[] = { 507 static struct proc_xfs_info xfs_info_unset[] = {
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index cb1bb2080e44..8ecad5ff9f9b 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -1032,13 +1032,14 @@ xfs_reclaim_inodes(
1032static int 1032static int
1033xfs_reclaim_inode_shrink( 1033xfs_reclaim_inode_shrink(
1034 struct shrinker *shrink, 1034 struct shrinker *shrink,
1035 int nr_to_scan, 1035 struct shrink_control *sc)
1036 gfp_t gfp_mask)
1037{ 1036{
1038 struct xfs_mount *mp; 1037 struct xfs_mount *mp;
1039 struct xfs_perag *pag; 1038 struct xfs_perag *pag;
1040 xfs_agnumber_t ag; 1039 xfs_agnumber_t ag;
1041 int reclaimable; 1040 int reclaimable;
1041 int nr_to_scan = sc->nr_to_scan;
1042 gfp_t gfp_mask = sc->gfp_mask;
1042 1043
1043 mp = container_of(shrink, struct xfs_mount, m_inode_shrink); 1044 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1044 if (nr_to_scan) { 1045 if (nr_to_scan) {
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 69228aa8605a..b94dace4e785 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -60,7 +60,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
60 60
61STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 61STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
62STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 62STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
63STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); 63STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
64 64
65static struct shrinker xfs_qm_shaker = { 65static struct shrinker xfs_qm_shaker = {
66 .shrink = xfs_qm_shake, 66 .shrink = xfs_qm_shake,
@@ -2009,10 +2009,10 @@ xfs_qm_shake_freelist(
2009STATIC int 2009STATIC int
2010xfs_qm_shake( 2010xfs_qm_shake(
2011 struct shrinker *shrink, 2011 struct shrinker *shrink,
2012 int nr_to_scan, 2012 struct shrink_control *sc)
2013 gfp_t gfp_mask)
2014{ 2013{
2015 int ndqused, nfree, n; 2014 int ndqused, nfree, n;
2015 gfp_t gfp_mask = sc->gfp_mask;
2016 2016
2017 if (!kmem_shake_allow(gfp_mask)) 2017 if (!kmem_shake_allow(gfp_mask))
2018 return 0; 2018 return 0;
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index da0a561ffba2..6530769a999b 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -187,6 +187,9 @@ struct xfs_busy_extent {
187 xfs_agnumber_t agno; 187 xfs_agnumber_t agno;
188 xfs_agblock_t bno; 188 xfs_agblock_t bno;
189 xfs_extlen_t length; 189 xfs_extlen_t length;
190 unsigned int flags;
191#define XFS_ALLOC_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
192#define XFS_ALLOC_BUSY_SKIP_DISCARD 0x02 /* do not discard */
190}; 193};
191 194
192/* 195/*
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index acdced86413c..95862bbff56b 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2469,7 +2469,7 @@ xfs_free_extent(
2469 2469
2470 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); 2470 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2471 if (!error) 2471 if (!error)
2472 xfs_alloc_busy_insert(tp, args.agno, args.agbno, len); 2472 xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0);
2473error0: 2473error0:
2474 xfs_perag_put(args.pag); 2474 xfs_perag_put(args.pag);
2475 return error; 2475 return error;
@@ -2480,7 +2480,8 @@ xfs_alloc_busy_insert(
2480 struct xfs_trans *tp, 2480 struct xfs_trans *tp,
2481 xfs_agnumber_t agno, 2481 xfs_agnumber_t agno,
2482 xfs_agblock_t bno, 2482 xfs_agblock_t bno,
2483 xfs_extlen_t len) 2483 xfs_extlen_t len,
2484 unsigned int flags)
2484{ 2485{
2485 struct xfs_busy_extent *new; 2486 struct xfs_busy_extent *new;
2486 struct xfs_busy_extent *busyp; 2487 struct xfs_busy_extent *busyp;
@@ -2504,6 +2505,7 @@ xfs_alloc_busy_insert(
2504 new->bno = bno; 2505 new->bno = bno;
2505 new->length = len; 2506 new->length = len;
2506 INIT_LIST_HEAD(&new->list); 2507 INIT_LIST_HEAD(&new->list);
2508 new->flags = flags;
2507 2509
2508 /* trace before insert to be able to see failed inserts */ 2510 /* trace before insert to be able to see failed inserts */
2509 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len); 2511 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
@@ -2609,6 +2611,18 @@ xfs_alloc_busy_update_extent(
2609 xfs_agblock_t bend = bbno + busyp->length; 2611 xfs_agblock_t bend = bbno + busyp->length;
2610 2612
2611 /* 2613 /*
2614 * This extent is currently being discarded. Give the thread
2615 * performing the discard a chance to mark the extent unbusy
2616 * and retry.
2617 */
2618 if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
2619 spin_unlock(&pag->pagb_lock);
2620 delay(1);
2621 spin_lock(&pag->pagb_lock);
2622 return false;
2623 }
2624
2625 /*
2612 * If there is a busy extent overlapping a user allocation, we have 2626 * If there is a busy extent overlapping a user allocation, we have
2613 * no choice but to force the log and retry the search. 2627 * no choice but to force the log and retry the search.
2614 * 2628 *
@@ -2813,7 +2827,8 @@ restart:
2813 * If this is a metadata allocation, try to reuse the busy 2827 * If this is a metadata allocation, try to reuse the busy
2814 * extent instead of trimming the allocation. 2828 * extent instead of trimming the allocation.
2815 */ 2829 */
2816 if (!args->userdata) { 2830 if (!args->userdata &&
2831 !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
2817 if (!xfs_alloc_busy_update_extent(args->mp, args->pag, 2832 if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
2818 busyp, fbno, flen, 2833 busyp, fbno, flen,
2819 false)) 2834 false))
@@ -2979,10 +2994,16 @@ xfs_alloc_busy_clear_one(
2979 kmem_free(busyp); 2994 kmem_free(busyp);
2980} 2995}
2981 2996
2997/*
2998 * Remove all extents on the passed in list from the busy extents tree.
2999 * If do_discard is set skip extents that need to be discarded, and mark
3000 * these as undergoing a discard operation instead.
3001 */
2982void 3002void
2983xfs_alloc_busy_clear( 3003xfs_alloc_busy_clear(
2984 struct xfs_mount *mp, 3004 struct xfs_mount *mp,
2985 struct list_head *list) 3005 struct list_head *list,
3006 bool do_discard)
2986{ 3007{
2987 struct xfs_busy_extent *busyp, *n; 3008 struct xfs_busy_extent *busyp, *n;
2988 struct xfs_perag *pag = NULL; 3009 struct xfs_perag *pag = NULL;
@@ -2999,7 +3020,11 @@ xfs_alloc_busy_clear(
2999 agno = busyp->agno; 3020 agno = busyp->agno;
3000 } 3021 }
3001 3022
3002 xfs_alloc_busy_clear_one(mp, pag, busyp); 3023 if (do_discard && busyp->length &&
3024 !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
3025 busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
3026 else
3027 xfs_alloc_busy_clear_one(mp, pag, busyp);
3003 } 3028 }
3004 3029
3005 if (pag) { 3030 if (pag) {
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 240ad288f2f9..2f52b924be79 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -137,10 +137,11 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp,
137#ifdef __KERNEL__ 137#ifdef __KERNEL__
138void 138void
139xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno, 139xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
140 xfs_agblock_t bno, xfs_extlen_t len); 140 xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
141 141
142void 142void
143xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list); 143xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list,
144 bool do_discard);
144 145
145int 146int
146xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, 147xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 8b469d53599f..2b3518826a69 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -120,7 +120,8 @@ xfs_allocbt_free_block(
120 if (error) 120 if (error)
121 return error; 121 return error;
122 122
123 xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); 123 xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
124 XFS_ALLOC_BUSY_SKIP_DISCARD);
124 xfs_trans_agbtree_delta(cur->bc_tp, -1); 125 xfs_trans_agbtree_delta(cur->bc_tp, -1);
125 return 0; 126 return 0;
126} 127}
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index fa00788de2f5..e546a33214c9 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -89,36 +89,19 @@ xfs_bmap_add_attrfork_local(
89 int *flags); /* inode logging flags */ 89 int *flags); /* inode logging flags */
90 90
91/* 91/*
92 * Called by xfs_bmapi to update file extent records and the btree
93 * after allocating space (or doing a delayed allocation).
94 */
95STATIC int /* error */
96xfs_bmap_add_extent(
97 xfs_inode_t *ip, /* incore inode pointer */
98 xfs_extnum_t idx, /* extent number to update/insert */
99 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
100 xfs_bmbt_irec_t *new, /* new data to add to file extents */
101 xfs_fsblock_t *first, /* pointer to firstblock variable */
102 xfs_bmap_free_t *flist, /* list of extents to be freed */
103 int *logflagsp, /* inode logging flags */
104 int whichfork, /* data or attr fork */
105 int rsvd); /* OK to allocate reserved blocks */
106
107/*
108 * Called by xfs_bmap_add_extent to handle cases converting a delayed 92 * Called by xfs_bmap_add_extent to handle cases converting a delayed
109 * allocation to a real allocation. 93 * allocation to a real allocation.
110 */ 94 */
111STATIC int /* error */ 95STATIC int /* error */
112xfs_bmap_add_extent_delay_real( 96xfs_bmap_add_extent_delay_real(
113 xfs_inode_t *ip, /* incore inode pointer */ 97 xfs_inode_t *ip, /* incore inode pointer */
114 xfs_extnum_t idx, /* extent number to update/insert */ 98 xfs_extnum_t *idx, /* extent number to update/insert */
115 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 99 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
116 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 100 xfs_bmbt_irec_t *new, /* new data to add to file extents */
117 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ 101 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
118 xfs_fsblock_t *first, /* pointer to firstblock variable */ 102 xfs_fsblock_t *first, /* pointer to firstblock variable */
119 xfs_bmap_free_t *flist, /* list of extents to be freed */ 103 xfs_bmap_free_t *flist, /* list of extents to be freed */
120 int *logflagsp, /* inode logging flags */ 104 int *logflagsp); /* inode logging flags */
121 int rsvd); /* OK to allocate reserved blocks */
122 105
123/* 106/*
124 * Called by xfs_bmap_add_extent to handle cases converting a hole 107 * Called by xfs_bmap_add_extent to handle cases converting a hole
@@ -127,10 +110,9 @@ xfs_bmap_add_extent_delay_real(
127STATIC int /* error */ 110STATIC int /* error */
128xfs_bmap_add_extent_hole_delay( 111xfs_bmap_add_extent_hole_delay(
129 xfs_inode_t *ip, /* incore inode pointer */ 112 xfs_inode_t *ip, /* incore inode pointer */
130 xfs_extnum_t idx, /* extent number to update/insert */ 113 xfs_extnum_t *idx, /* extent number to update/insert */
131 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 114 xfs_bmbt_irec_t *new, /* new data to add to file extents */
132 int *logflagsp,/* inode logging flags */ 115 int *logflagsp); /* inode logging flags */
133 int rsvd); /* OK to allocate reserved blocks */
134 116
135/* 117/*
136 * Called by xfs_bmap_add_extent to handle cases converting a hole 118 * Called by xfs_bmap_add_extent to handle cases converting a hole
@@ -139,7 +121,7 @@ xfs_bmap_add_extent_hole_delay(
139STATIC int /* error */ 121STATIC int /* error */
140xfs_bmap_add_extent_hole_real( 122xfs_bmap_add_extent_hole_real(
141 xfs_inode_t *ip, /* incore inode pointer */ 123 xfs_inode_t *ip, /* incore inode pointer */
142 xfs_extnum_t idx, /* extent number to update/insert */ 124 xfs_extnum_t *idx, /* extent number to update/insert */
143 xfs_btree_cur_t *cur, /* if null, not a btree */ 125 xfs_btree_cur_t *cur, /* if null, not a btree */
144 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 126 xfs_bmbt_irec_t *new, /* new data to add to file extents */
145 int *logflagsp, /* inode logging flags */ 127 int *logflagsp, /* inode logging flags */
@@ -152,7 +134,7 @@ xfs_bmap_add_extent_hole_real(
152STATIC int /* error */ 134STATIC int /* error */
153xfs_bmap_add_extent_unwritten_real( 135xfs_bmap_add_extent_unwritten_real(
154 xfs_inode_t *ip, /* incore inode pointer */ 136 xfs_inode_t *ip, /* incore inode pointer */
155 xfs_extnum_t idx, /* extent number to update/insert */ 137 xfs_extnum_t *idx, /* extent number to update/insert */
156 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 138 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
157 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 139 xfs_bmbt_irec_t *new, /* new data to add to file extents */
158 int *logflagsp); /* inode logging flags */ 140 int *logflagsp); /* inode logging flags */
@@ -180,22 +162,6 @@ xfs_bmap_btree_to_extents(
180 int whichfork); /* data or attr fork */ 162 int whichfork); /* data or attr fork */
181 163
182/* 164/*
183 * Called by xfs_bmapi to update file extent records and the btree
184 * after removing space (or undoing a delayed allocation).
185 */
186STATIC int /* error */
187xfs_bmap_del_extent(
188 xfs_inode_t *ip, /* incore inode pointer */
189 xfs_trans_t *tp, /* current trans pointer */
190 xfs_extnum_t idx, /* extent number to update/insert */
191 xfs_bmap_free_t *flist, /* list of extents to be freed */
192 xfs_btree_cur_t *cur, /* if null, not a btree */
193 xfs_bmbt_irec_t *new, /* new data to add to file extents */
194 int *logflagsp,/* inode logging flags */
195 int whichfork, /* data or attr fork */
196 int rsvd); /* OK to allocate reserved blocks */
197
198/*
199 * Remove the entry "free" from the free item list. Prev points to the 165 * Remove the entry "free" from the free item list. Prev points to the
200 * previous entry, unless "free" is the head of the list. 166 * previous entry, unless "free" is the head of the list.
201 */ 167 */
@@ -474,14 +440,13 @@ xfs_bmap_add_attrfork_local(
474STATIC int /* error */ 440STATIC int /* error */
475xfs_bmap_add_extent( 441xfs_bmap_add_extent(
476 xfs_inode_t *ip, /* incore inode pointer */ 442 xfs_inode_t *ip, /* incore inode pointer */
477 xfs_extnum_t idx, /* extent number to update/insert */ 443 xfs_extnum_t *idx, /* extent number to update/insert */
478 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 444 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
479 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 445 xfs_bmbt_irec_t *new, /* new data to add to file extents */
480 xfs_fsblock_t *first, /* pointer to firstblock variable */ 446 xfs_fsblock_t *first, /* pointer to firstblock variable */
481 xfs_bmap_free_t *flist, /* list of extents to be freed */ 447 xfs_bmap_free_t *flist, /* list of extents to be freed */
482 int *logflagsp, /* inode logging flags */ 448 int *logflagsp, /* inode logging flags */
483 int whichfork, /* data or attr fork */ 449 int whichfork) /* data or attr fork */
484 int rsvd) /* OK to use reserved data blocks */
485{ 450{
486 xfs_btree_cur_t *cur; /* btree cursor or null */ 451 xfs_btree_cur_t *cur; /* btree cursor or null */
487 xfs_filblks_t da_new; /* new count del alloc blocks used */ 452 xfs_filblks_t da_new; /* new count del alloc blocks used */
@@ -492,23 +457,27 @@ xfs_bmap_add_extent(
492 xfs_extnum_t nextents; /* number of extents in file now */ 457 xfs_extnum_t nextents; /* number of extents in file now */
493 458
494 XFS_STATS_INC(xs_add_exlist); 459 XFS_STATS_INC(xs_add_exlist);
460
495 cur = *curp; 461 cur = *curp;
496 ifp = XFS_IFORK_PTR(ip, whichfork); 462 ifp = XFS_IFORK_PTR(ip, whichfork);
497 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 463 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
498 ASSERT(idx <= nextents);
499 da_old = da_new = 0; 464 da_old = da_new = 0;
500 error = 0; 465 error = 0;
466
467 ASSERT(*idx >= 0);
468 ASSERT(*idx <= nextents);
469
501 /* 470 /*
502 * This is the first extent added to a new/empty file. 471 * This is the first extent added to a new/empty file.
503 * Special case this one, so other routines get to assume there are 472 * Special case this one, so other routines get to assume there are
504 * already extents in the list. 473 * already extents in the list.
505 */ 474 */
506 if (nextents == 0) { 475 if (nextents == 0) {
507 xfs_iext_insert(ip, 0, 1, new, 476 xfs_iext_insert(ip, *idx, 1, new,
508 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); 477 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
509 478
510 ASSERT(cur == NULL); 479 ASSERT(cur == NULL);
511 ifp->if_lastex = 0; 480
512 if (!isnullstartblock(new->br_startblock)) { 481 if (!isnullstartblock(new->br_startblock)) {
513 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 482 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
514 logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); 483 logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
@@ -522,27 +491,25 @@ xfs_bmap_add_extent(
522 if (cur) 491 if (cur)
523 ASSERT((cur->bc_private.b.flags & 492 ASSERT((cur->bc_private.b.flags &
524 XFS_BTCUR_BPRV_WASDEL) == 0); 493 XFS_BTCUR_BPRV_WASDEL) == 0);
525 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new, 494 error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
526 &logflags, rsvd))) 495 &logflags);
527 goto done;
528 } 496 }
529 /* 497 /*
530 * Real allocation off the end of the file. 498 * Real allocation off the end of the file.
531 */ 499 */
532 else if (idx == nextents) { 500 else if (*idx == nextents) {
533 if (cur) 501 if (cur)
534 ASSERT((cur->bc_private.b.flags & 502 ASSERT((cur->bc_private.b.flags &
535 XFS_BTCUR_BPRV_WASDEL) == 0); 503 XFS_BTCUR_BPRV_WASDEL) == 0);
536 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, 504 error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
537 &logflags, whichfork))) 505 &logflags, whichfork);
538 goto done;
539 } else { 506 } else {
540 xfs_bmbt_irec_t prev; /* old extent at offset idx */ 507 xfs_bmbt_irec_t prev; /* old extent at offset idx */
541 508
542 /* 509 /*
543 * Get the record referred to by idx. 510 * Get the record referred to by idx.
544 */ 511 */
545 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev); 512 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &prev);
546 /* 513 /*
547 * If it's a real allocation record, and the new allocation ends 514 * If it's a real allocation record, and the new allocation ends
548 * after the start of the referred to record, then we're filling 515 * after the start of the referred to record, then we're filling
@@ -557,22 +524,18 @@ xfs_bmap_add_extent(
557 if (cur) 524 if (cur)
558 ASSERT(cur->bc_private.b.flags & 525 ASSERT(cur->bc_private.b.flags &
559 XFS_BTCUR_BPRV_WASDEL); 526 XFS_BTCUR_BPRV_WASDEL);
560 if ((error = xfs_bmap_add_extent_delay_real(ip, 527 error = xfs_bmap_add_extent_delay_real(ip,
561 idx, &cur, new, &da_new, first, flist, 528 idx, &cur, new, &da_new,
562 &logflags, rsvd))) 529 first, flist, &logflags);
563 goto done;
564 } else if (new->br_state == XFS_EXT_NORM) {
565 ASSERT(new->br_state == XFS_EXT_NORM);
566 if ((error = xfs_bmap_add_extent_unwritten_real(
567 ip, idx, &cur, new, &logflags)))
568 goto done;
569 } else { 530 } else {
570 ASSERT(new->br_state == XFS_EXT_UNWRITTEN); 531 ASSERT(new->br_state == XFS_EXT_NORM ||
571 if ((error = xfs_bmap_add_extent_unwritten_real( 532 new->br_state == XFS_EXT_UNWRITTEN);
572 ip, idx, &cur, new, &logflags))) 533
534 error = xfs_bmap_add_extent_unwritten_real(ip,
535 idx, &cur, new, &logflags);
536 if (error)
573 goto done; 537 goto done;
574 } 538 }
575 ASSERT(*curp == cur || *curp == NULL);
576 } 539 }
577 /* 540 /*
578 * Otherwise we're filling in a hole with an allocation. 541 * Otherwise we're filling in a hole with an allocation.
@@ -581,13 +544,15 @@ xfs_bmap_add_extent(
581 if (cur) 544 if (cur)
582 ASSERT((cur->bc_private.b.flags & 545 ASSERT((cur->bc_private.b.flags &
583 XFS_BTCUR_BPRV_WASDEL) == 0); 546 XFS_BTCUR_BPRV_WASDEL) == 0);
584 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, 547 error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
585 new, &logflags, whichfork))) 548 new, &logflags, whichfork);
586 goto done;
587 } 549 }
588 } 550 }
589 551
552 if (error)
553 goto done;
590 ASSERT(*curp == cur || *curp == NULL); 554 ASSERT(*curp == cur || *curp == NULL);
555
591 /* 556 /*
592 * Convert to a btree if necessary. 557 * Convert to a btree if necessary.
593 */ 558 */
@@ -615,7 +580,7 @@ xfs_bmap_add_extent(
615 ASSERT(nblks <= da_old); 580 ASSERT(nblks <= da_old);
616 if (nblks < da_old) 581 if (nblks < da_old)
617 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, 582 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
618 (int64_t)(da_old - nblks), rsvd); 583 (int64_t)(da_old - nblks), 0);
619 } 584 }
620 /* 585 /*
621 * Clear out the allocated field, done with it now in any case. 586 * Clear out the allocated field, done with it now in any case.
@@ -640,14 +605,13 @@ done:
640STATIC int /* error */ 605STATIC int /* error */
641xfs_bmap_add_extent_delay_real( 606xfs_bmap_add_extent_delay_real(
642 xfs_inode_t *ip, /* incore inode pointer */ 607 xfs_inode_t *ip, /* incore inode pointer */
643 xfs_extnum_t idx, /* extent number to update/insert */ 608 xfs_extnum_t *idx, /* extent number to update/insert */
644 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 609 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
645 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 610 xfs_bmbt_irec_t *new, /* new data to add to file extents */
646 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ 611 xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */
647 xfs_fsblock_t *first, /* pointer to firstblock variable */ 612 xfs_fsblock_t *first, /* pointer to firstblock variable */
648 xfs_bmap_free_t *flist, /* list of extents to be freed */ 613 xfs_bmap_free_t *flist, /* list of extents to be freed */
649 int *logflagsp, /* inode logging flags */ 614 int *logflagsp) /* inode logging flags */
650 int rsvd) /* OK to use reserved data block allocation */
651{ 615{
652 xfs_btree_cur_t *cur; /* btree cursor */ 616 xfs_btree_cur_t *cur; /* btree cursor */
653 int diff; /* temp value */ 617 int diff; /* temp value */
@@ -673,7 +637,7 @@ xfs_bmap_add_extent_delay_real(
673 */ 637 */
674 cur = *curp; 638 cur = *curp;
675 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 639 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
676 ep = xfs_iext_get_ext(ifp, idx); 640 ep = xfs_iext_get_ext(ifp, *idx);
677 xfs_bmbt_get_all(ep, &PREV); 641 xfs_bmbt_get_all(ep, &PREV);
678 new_endoff = new->br_startoff + new->br_blockcount; 642 new_endoff = new->br_startoff + new->br_blockcount;
679 ASSERT(PREV.br_startoff <= new->br_startoff); 643 ASSERT(PREV.br_startoff <= new->br_startoff);
@@ -692,9 +656,9 @@ xfs_bmap_add_extent_delay_real(
692 * Check and set flags if this segment has a left neighbor. 656 * Check and set flags if this segment has a left neighbor.
693 * Don't set contiguous if the combined extent would be too large. 657 * Don't set contiguous if the combined extent would be too large.
694 */ 658 */
695 if (idx > 0) { 659 if (*idx > 0) {
696 state |= BMAP_LEFT_VALID; 660 state |= BMAP_LEFT_VALID;
697 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); 661 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
698 662
699 if (isnullstartblock(LEFT.br_startblock)) 663 if (isnullstartblock(LEFT.br_startblock))
700 state |= BMAP_LEFT_DELAY; 664 state |= BMAP_LEFT_DELAY;
@@ -712,9 +676,9 @@ xfs_bmap_add_extent_delay_real(
712 * Don't set contiguous if the combined extent would be too large. 676 * Don't set contiguous if the combined extent would be too large.
713 * Also check for all-three-contiguous being too large. 677 * Also check for all-three-contiguous being too large.
714 */ 678 */
715 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { 679 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
716 state |= BMAP_RIGHT_VALID; 680 state |= BMAP_RIGHT_VALID;
717 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); 681 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
718 682
719 if (isnullstartblock(RIGHT.br_startblock)) 683 if (isnullstartblock(RIGHT.br_startblock))
720 state |= BMAP_RIGHT_DELAY; 684 state |= BMAP_RIGHT_DELAY;
@@ -745,14 +709,14 @@ xfs_bmap_add_extent_delay_real(
745 * Filling in all of a previously delayed allocation extent. 709 * Filling in all of a previously delayed allocation extent.
746 * The left and right neighbors are both contiguous with new. 710 * The left and right neighbors are both contiguous with new.
747 */ 711 */
748 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 712 --*idx;
749 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 713 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
714 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
750 LEFT.br_blockcount + PREV.br_blockcount + 715 LEFT.br_blockcount + PREV.br_blockcount +
751 RIGHT.br_blockcount); 716 RIGHT.br_blockcount);
752 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 717 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
753 718
754 xfs_iext_remove(ip, idx, 2, state); 719 xfs_iext_remove(ip, *idx + 1, 2, state);
755 ip->i_df.if_lastex = idx - 1;
756 ip->i_d.di_nextents--; 720 ip->i_d.di_nextents--;
757 if (cur == NULL) 721 if (cur == NULL)
758 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 722 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -784,13 +748,14 @@ xfs_bmap_add_extent_delay_real(
784 * Filling in all of a previously delayed allocation extent. 748 * Filling in all of a previously delayed allocation extent.
785 * The left neighbor is contiguous, the right is not. 749 * The left neighbor is contiguous, the right is not.
786 */ 750 */
787 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 751 --*idx;
788 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 752
753 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
754 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
789 LEFT.br_blockcount + PREV.br_blockcount); 755 LEFT.br_blockcount + PREV.br_blockcount);
790 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 756 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
791 757
792 ip->i_df.if_lastex = idx - 1; 758 xfs_iext_remove(ip, *idx + 1, 1, state);
793 xfs_iext_remove(ip, idx, 1, state);
794 if (cur == NULL) 759 if (cur == NULL)
795 rval = XFS_ILOG_DEXT; 760 rval = XFS_ILOG_DEXT;
796 else { 761 else {
@@ -814,14 +779,13 @@ xfs_bmap_add_extent_delay_real(
814 * Filling in all of a previously delayed allocation extent. 779 * Filling in all of a previously delayed allocation extent.
815 * The right neighbor is contiguous, the left is not. 780 * The right neighbor is contiguous, the left is not.
816 */ 781 */
817 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 782 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
818 xfs_bmbt_set_startblock(ep, new->br_startblock); 783 xfs_bmbt_set_startblock(ep, new->br_startblock);
819 xfs_bmbt_set_blockcount(ep, 784 xfs_bmbt_set_blockcount(ep,
820 PREV.br_blockcount + RIGHT.br_blockcount); 785 PREV.br_blockcount + RIGHT.br_blockcount);
821 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 786 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
822 787
823 ip->i_df.if_lastex = idx; 788 xfs_iext_remove(ip, *idx + 1, 1, state);
824 xfs_iext_remove(ip, idx + 1, 1, state);
825 if (cur == NULL) 789 if (cur == NULL)
826 rval = XFS_ILOG_DEXT; 790 rval = XFS_ILOG_DEXT;
827 else { 791 else {
@@ -837,6 +801,7 @@ xfs_bmap_add_extent_delay_real(
837 RIGHT.br_blockcount, PREV.br_state))) 801 RIGHT.br_blockcount, PREV.br_state)))
838 goto done; 802 goto done;
839 } 803 }
804
840 *dnew = 0; 805 *dnew = 0;
841 break; 806 break;
842 807
@@ -846,11 +811,10 @@ xfs_bmap_add_extent_delay_real(
846 * Neither the left nor right neighbors are contiguous with 811 * Neither the left nor right neighbors are contiguous with
847 * the new one. 812 * the new one.
848 */ 813 */
849 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 814 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
850 xfs_bmbt_set_startblock(ep, new->br_startblock); 815 xfs_bmbt_set_startblock(ep, new->br_startblock);
851 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 816 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
852 817
853 ip->i_df.if_lastex = idx;
854 ip->i_d.di_nextents++; 818 ip->i_d.di_nextents++;
855 if (cur == NULL) 819 if (cur == NULL)
856 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 820 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -866,6 +830,7 @@ xfs_bmap_add_extent_delay_real(
866 goto done; 830 goto done;
867 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 831 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
868 } 832 }
833
869 *dnew = 0; 834 *dnew = 0;
870 break; 835 break;
871 836
@@ -874,17 +839,16 @@ xfs_bmap_add_extent_delay_real(
874 * Filling in the first part of a previous delayed allocation. 839 * Filling in the first part of a previous delayed allocation.
875 * The left neighbor is contiguous. 840 * The left neighbor is contiguous.
876 */ 841 */
877 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 842 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
878 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 843 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
879 LEFT.br_blockcount + new->br_blockcount); 844 LEFT.br_blockcount + new->br_blockcount);
880 xfs_bmbt_set_startoff(ep, 845 xfs_bmbt_set_startoff(ep,
881 PREV.br_startoff + new->br_blockcount); 846 PREV.br_startoff + new->br_blockcount);
882 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 847 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
883 848
884 temp = PREV.br_blockcount - new->br_blockcount; 849 temp = PREV.br_blockcount - new->br_blockcount;
885 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 850 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
886 xfs_bmbt_set_blockcount(ep, temp); 851 xfs_bmbt_set_blockcount(ep, temp);
887 ip->i_df.if_lastex = idx - 1;
888 if (cur == NULL) 852 if (cur == NULL)
889 rval = XFS_ILOG_DEXT; 853 rval = XFS_ILOG_DEXT;
890 else { 854 else {
@@ -904,7 +868,9 @@ xfs_bmap_add_extent_delay_real(
904 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 868 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
905 startblockval(PREV.br_startblock)); 869 startblockval(PREV.br_startblock));
906 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 870 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
907 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 871 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
872
873 --*idx;
908 *dnew = temp; 874 *dnew = temp;
909 break; 875 break;
910 876
@@ -913,12 +879,11 @@ xfs_bmap_add_extent_delay_real(
913 * Filling in the first part of a previous delayed allocation. 879 * Filling in the first part of a previous delayed allocation.
914 * The left neighbor is not contiguous. 880 * The left neighbor is not contiguous.
915 */ 881 */
916 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 882 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
917 xfs_bmbt_set_startoff(ep, new_endoff); 883 xfs_bmbt_set_startoff(ep, new_endoff);
918 temp = PREV.br_blockcount - new->br_blockcount; 884 temp = PREV.br_blockcount - new->br_blockcount;
919 xfs_bmbt_set_blockcount(ep, temp); 885 xfs_bmbt_set_blockcount(ep, temp);
920 xfs_iext_insert(ip, idx, 1, new, state); 886 xfs_iext_insert(ip, *idx, 1, new, state);
921 ip->i_df.if_lastex = idx;
922 ip->i_d.di_nextents++; 887 ip->i_d.di_nextents++;
923 if (cur == NULL) 888 if (cur == NULL)
924 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 889 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -946,9 +911,10 @@ xfs_bmap_add_extent_delay_real(
946 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 911 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
947 startblockval(PREV.br_startblock) - 912 startblockval(PREV.br_startblock) -
948 (cur ? cur->bc_private.b.allocated : 0)); 913 (cur ? cur->bc_private.b.allocated : 0));
949 ep = xfs_iext_get_ext(ifp, idx + 1); 914 ep = xfs_iext_get_ext(ifp, *idx + 1);
950 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 915 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
951 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); 916 trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_);
917
952 *dnew = temp; 918 *dnew = temp;
953 break; 919 break;
954 920
@@ -958,15 +924,13 @@ xfs_bmap_add_extent_delay_real(
958 * The right neighbor is contiguous with the new allocation. 924 * The right neighbor is contiguous with the new allocation.
959 */ 925 */
960 temp = PREV.br_blockcount - new->br_blockcount; 926 temp = PREV.br_blockcount - new->br_blockcount;
961 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 927 trace_xfs_bmap_pre_update(ip, *idx + 1, state, _THIS_IP_);
962 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
963 xfs_bmbt_set_blockcount(ep, temp); 928 xfs_bmbt_set_blockcount(ep, temp);
964 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 929 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx + 1),
965 new->br_startoff, new->br_startblock, 930 new->br_startoff, new->br_startblock,
966 new->br_blockcount + RIGHT.br_blockcount, 931 new->br_blockcount + RIGHT.br_blockcount,
967 RIGHT.br_state); 932 RIGHT.br_state);
968 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); 933 trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_);
969 ip->i_df.if_lastex = idx + 1;
970 if (cur == NULL) 934 if (cur == NULL)
971 rval = XFS_ILOG_DEXT; 935 rval = XFS_ILOG_DEXT;
972 else { 936 else {
@@ -983,10 +947,14 @@ xfs_bmap_add_extent_delay_real(
983 RIGHT.br_state))) 947 RIGHT.br_state)))
984 goto done; 948 goto done;
985 } 949 }
950
986 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 951 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
987 startblockval(PREV.br_startblock)); 952 startblockval(PREV.br_startblock));
953 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
988 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 954 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
989 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 955 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
956
957 ++*idx;
990 *dnew = temp; 958 *dnew = temp;
991 break; 959 break;
992 960
@@ -996,10 +964,9 @@ xfs_bmap_add_extent_delay_real(
996 * The right neighbor is not contiguous. 964 * The right neighbor is not contiguous.
997 */ 965 */
998 temp = PREV.br_blockcount - new->br_blockcount; 966 temp = PREV.br_blockcount - new->br_blockcount;
999 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 967 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1000 xfs_bmbt_set_blockcount(ep, temp); 968 xfs_bmbt_set_blockcount(ep, temp);
1001 xfs_iext_insert(ip, idx + 1, 1, new, state); 969 xfs_iext_insert(ip, *idx + 1, 1, new, state);
1002 ip->i_df.if_lastex = idx + 1;
1003 ip->i_d.di_nextents++; 970 ip->i_d.di_nextents++;
1004 if (cur == NULL) 971 if (cur == NULL)
1005 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 972 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1027,9 +994,11 @@ xfs_bmap_add_extent_delay_real(
1027 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 994 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1028 startblockval(PREV.br_startblock) - 995 startblockval(PREV.br_startblock) -
1029 (cur ? cur->bc_private.b.allocated : 0)); 996 (cur ? cur->bc_private.b.allocated : 0));
1030 ep = xfs_iext_get_ext(ifp, idx); 997 ep = xfs_iext_get_ext(ifp, *idx);
1031 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 998 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1032 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 999 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1000
1001 ++*idx;
1033 *dnew = temp; 1002 *dnew = temp;
1034 break; 1003 break;
1035 1004
@@ -1056,7 +1025,7 @@ xfs_bmap_add_extent_delay_real(
1056 */ 1025 */
1057 temp = new->br_startoff - PREV.br_startoff; 1026 temp = new->br_startoff - PREV.br_startoff;
1058 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 1027 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1059 trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_); 1028 trace_xfs_bmap_pre_update(ip, *idx, 0, _THIS_IP_);
1060 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ 1029 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
1061 LEFT = *new; 1030 LEFT = *new;
1062 RIGHT.br_state = PREV.br_state; 1031 RIGHT.br_state = PREV.br_state;
@@ -1065,8 +1034,7 @@ xfs_bmap_add_extent_delay_real(
1065 RIGHT.br_startoff = new_endoff; 1034 RIGHT.br_startoff = new_endoff;
1066 RIGHT.br_blockcount = temp2; 1035 RIGHT.br_blockcount = temp2;
1067 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ 1036 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
1068 xfs_iext_insert(ip, idx + 1, 2, &LEFT, state); 1037 xfs_iext_insert(ip, *idx + 1, 2, &LEFT, state);
1069 ip->i_df.if_lastex = idx + 1;
1070 ip->i_d.di_nextents++; 1038 ip->i_d.di_nextents++;
1071 if (cur == NULL) 1039 if (cur == NULL)
1072 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1040 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1097,7 +1065,7 @@ xfs_bmap_add_extent_delay_real(
1097 (cur ? cur->bc_private.b.allocated : 0)); 1065 (cur ? cur->bc_private.b.allocated : 0));
1098 if (diff > 0 && 1066 if (diff > 0 &&
1099 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, 1067 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1100 -((int64_t)diff), rsvd)) { 1068 -((int64_t)diff), 0)) {
1101 /* 1069 /*
1102 * Ick gross gag me with a spoon. 1070 * Ick gross gag me with a spoon.
1103 */ 1071 */
@@ -1109,7 +1077,7 @@ xfs_bmap_add_extent_delay_real(
1109 if (!diff || 1077 if (!diff ||
1110 !xfs_icsb_modify_counters(ip->i_mount, 1078 !xfs_icsb_modify_counters(ip->i_mount,
1111 XFS_SBS_FDBLOCKS, 1079 XFS_SBS_FDBLOCKS,
1112 -((int64_t)diff), rsvd)) 1080 -((int64_t)diff), 0))
1113 break; 1081 break;
1114 } 1082 }
1115 if (temp2) { 1083 if (temp2) {
@@ -1118,18 +1086,20 @@ xfs_bmap_add_extent_delay_real(
1118 if (!diff || 1086 if (!diff ||
1119 !xfs_icsb_modify_counters(ip->i_mount, 1087 !xfs_icsb_modify_counters(ip->i_mount,
1120 XFS_SBS_FDBLOCKS, 1088 XFS_SBS_FDBLOCKS,
1121 -((int64_t)diff), rsvd)) 1089 -((int64_t)diff), 0))
1122 break; 1090 break;
1123 } 1091 }
1124 } 1092 }
1125 } 1093 }
1126 ep = xfs_iext_get_ext(ifp, idx); 1094 ep = xfs_iext_get_ext(ifp, *idx);
1127 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 1095 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1128 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1096 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1129 trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_); 1097 trace_xfs_bmap_pre_update(ip, *idx + 2, state, _THIS_IP_);
1130 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), 1098 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx + 2),
1131 nullstartblock((int)temp2)); 1099 nullstartblock((int)temp2));
1132 trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_); 1100 trace_xfs_bmap_post_update(ip, *idx + 2, state, _THIS_IP_);
1101
1102 ++*idx;
1133 *dnew = temp + temp2; 1103 *dnew = temp + temp2;
1134 break; 1104 break;
1135 1105
@@ -1161,7 +1131,7 @@ done:
1161STATIC int /* error */ 1131STATIC int /* error */
1162xfs_bmap_add_extent_unwritten_real( 1132xfs_bmap_add_extent_unwritten_real(
1163 xfs_inode_t *ip, /* incore inode pointer */ 1133 xfs_inode_t *ip, /* incore inode pointer */
1164 xfs_extnum_t idx, /* extent number to update/insert */ 1134 xfs_extnum_t *idx, /* extent number to update/insert */
1165 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ 1135 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
1166 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1136 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1167 int *logflagsp) /* inode logging flags */ 1137 int *logflagsp) /* inode logging flags */
@@ -1188,7 +1158,7 @@ xfs_bmap_add_extent_unwritten_real(
1188 error = 0; 1158 error = 0;
1189 cur = *curp; 1159 cur = *curp;
1190 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1160 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1191 ep = xfs_iext_get_ext(ifp, idx); 1161 ep = xfs_iext_get_ext(ifp, *idx);
1192 xfs_bmbt_get_all(ep, &PREV); 1162 xfs_bmbt_get_all(ep, &PREV);
1193 newext = new->br_state; 1163 newext = new->br_state;
1194 oldext = (newext == XFS_EXT_UNWRITTEN) ? 1164 oldext = (newext == XFS_EXT_UNWRITTEN) ?
@@ -1211,9 +1181,9 @@ xfs_bmap_add_extent_unwritten_real(
1211 * Check and set flags if this segment has a left neighbor. 1181 * Check and set flags if this segment has a left neighbor.
1212 * Don't set contiguous if the combined extent would be too large. 1182 * Don't set contiguous if the combined extent would be too large.
1213 */ 1183 */
1214 if (idx > 0) { 1184 if (*idx > 0) {
1215 state |= BMAP_LEFT_VALID; 1185 state |= BMAP_LEFT_VALID;
1216 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); 1186 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
1217 1187
1218 if (isnullstartblock(LEFT.br_startblock)) 1188 if (isnullstartblock(LEFT.br_startblock))
1219 state |= BMAP_LEFT_DELAY; 1189 state |= BMAP_LEFT_DELAY;
@@ -1231,9 +1201,9 @@ xfs_bmap_add_extent_unwritten_real(
1231 * Don't set contiguous if the combined extent would be too large. 1201 * Don't set contiguous if the combined extent would be too large.
1232 * Also check for all-three-contiguous being too large. 1202 * Also check for all-three-contiguous being too large.
1233 */ 1203 */
1234 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { 1204 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1235 state |= BMAP_RIGHT_VALID; 1205 state |= BMAP_RIGHT_VALID;
1236 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); 1206 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
1237 if (isnullstartblock(RIGHT.br_startblock)) 1207 if (isnullstartblock(RIGHT.br_startblock))
1238 state |= BMAP_RIGHT_DELAY; 1208 state |= BMAP_RIGHT_DELAY;
1239 } 1209 }
@@ -1262,14 +1232,15 @@ xfs_bmap_add_extent_unwritten_real(
1262 * Setting all of a previous oldext extent to newext. 1232 * Setting all of a previous oldext extent to newext.
1263 * The left and right neighbors are both contiguous with new. 1233 * The left and right neighbors are both contiguous with new.
1264 */ 1234 */
1265 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 1235 --*idx;
1266 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1236
1237 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1238 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1267 LEFT.br_blockcount + PREV.br_blockcount + 1239 LEFT.br_blockcount + PREV.br_blockcount +
1268 RIGHT.br_blockcount); 1240 RIGHT.br_blockcount);
1269 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 1241 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1270 1242
1271 xfs_iext_remove(ip, idx, 2, state); 1243 xfs_iext_remove(ip, *idx + 1, 2, state);
1272 ip->i_df.if_lastex = idx - 1;
1273 ip->i_d.di_nextents -= 2; 1244 ip->i_d.di_nextents -= 2;
1274 if (cur == NULL) 1245 if (cur == NULL)
1275 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1246 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1305,13 +1276,14 @@ xfs_bmap_add_extent_unwritten_real(
1305 * Setting all of a previous oldext extent to newext. 1276 * Setting all of a previous oldext extent to newext.
1306 * The left neighbor is contiguous, the right is not. 1277 * The left neighbor is contiguous, the right is not.
1307 */ 1278 */
1308 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 1279 --*idx;
1309 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1280
1281 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1282 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1310 LEFT.br_blockcount + PREV.br_blockcount); 1283 LEFT.br_blockcount + PREV.br_blockcount);
1311 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 1284 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1312 1285
1313 ip->i_df.if_lastex = idx - 1; 1286 xfs_iext_remove(ip, *idx + 1, 1, state);
1314 xfs_iext_remove(ip, idx, 1, state);
1315 ip->i_d.di_nextents--; 1287 ip->i_d.di_nextents--;
1316 if (cur == NULL) 1288 if (cur == NULL)
1317 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1289 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1341,13 +1313,12 @@ xfs_bmap_add_extent_unwritten_real(
1341 * Setting all of a previous oldext extent to newext. 1313 * Setting all of a previous oldext extent to newext.
1342 * The right neighbor is contiguous, the left is not. 1314 * The right neighbor is contiguous, the left is not.
1343 */ 1315 */
1344 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1316 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1345 xfs_bmbt_set_blockcount(ep, 1317 xfs_bmbt_set_blockcount(ep,
1346 PREV.br_blockcount + RIGHT.br_blockcount); 1318 PREV.br_blockcount + RIGHT.br_blockcount);
1347 xfs_bmbt_set_state(ep, newext); 1319 xfs_bmbt_set_state(ep, newext);
1348 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1320 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1349 ip->i_df.if_lastex = idx; 1321 xfs_iext_remove(ip, *idx + 1, 1, state);
1350 xfs_iext_remove(ip, idx + 1, 1, state);
1351 ip->i_d.di_nextents--; 1322 ip->i_d.di_nextents--;
1352 if (cur == NULL) 1323 if (cur == NULL)
1353 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1324 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1378,11 +1349,10 @@ xfs_bmap_add_extent_unwritten_real(
1378 * Neither the left nor right neighbors are contiguous with 1349 * Neither the left nor right neighbors are contiguous with
1379 * the new one. 1350 * the new one.
1380 */ 1351 */
1381 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1352 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1382 xfs_bmbt_set_state(ep, newext); 1353 xfs_bmbt_set_state(ep, newext);
1383 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1354 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1384 1355
1385 ip->i_df.if_lastex = idx;
1386 if (cur == NULL) 1356 if (cur == NULL)
1387 rval = XFS_ILOG_DEXT; 1357 rval = XFS_ILOG_DEXT;
1388 else { 1358 else {
@@ -1404,21 +1374,22 @@ xfs_bmap_add_extent_unwritten_real(
1404 * Setting the first part of a previous oldext extent to newext. 1374 * Setting the first part of a previous oldext extent to newext.
1405 * The left neighbor is contiguous. 1375 * The left neighbor is contiguous.
1406 */ 1376 */
1407 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 1377 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
1408 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1378 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
1409 LEFT.br_blockcount + new->br_blockcount); 1379 LEFT.br_blockcount + new->br_blockcount);
1410 xfs_bmbt_set_startoff(ep, 1380 xfs_bmbt_set_startoff(ep,
1411 PREV.br_startoff + new->br_blockcount); 1381 PREV.br_startoff + new->br_blockcount);
1412 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 1382 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
1413 1383
1414 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1384 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1415 xfs_bmbt_set_startblock(ep, 1385 xfs_bmbt_set_startblock(ep,
1416 new->br_startblock + new->br_blockcount); 1386 new->br_startblock + new->br_blockcount);
1417 xfs_bmbt_set_blockcount(ep, 1387 xfs_bmbt_set_blockcount(ep,
1418 PREV.br_blockcount - new->br_blockcount); 1388 PREV.br_blockcount - new->br_blockcount);
1419 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1389 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1390
1391 --*idx;
1420 1392
1421 ip->i_df.if_lastex = idx - 1;
1422 if (cur == NULL) 1393 if (cur == NULL)
1423 rval = XFS_ILOG_DEXT; 1394 rval = XFS_ILOG_DEXT;
1424 else { 1395 else {
@@ -1449,17 +1420,16 @@ xfs_bmap_add_extent_unwritten_real(
1449 * Setting the first part of a previous oldext extent to newext. 1420 * Setting the first part of a previous oldext extent to newext.
1450 * The left neighbor is not contiguous. 1421 * The left neighbor is not contiguous.
1451 */ 1422 */
1452 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1423 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1453 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 1424 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1454 xfs_bmbt_set_startoff(ep, new_endoff); 1425 xfs_bmbt_set_startoff(ep, new_endoff);
1455 xfs_bmbt_set_blockcount(ep, 1426 xfs_bmbt_set_blockcount(ep,
1456 PREV.br_blockcount - new->br_blockcount); 1427 PREV.br_blockcount - new->br_blockcount);
1457 xfs_bmbt_set_startblock(ep, 1428 xfs_bmbt_set_startblock(ep,
1458 new->br_startblock + new->br_blockcount); 1429 new->br_startblock + new->br_blockcount);
1459 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1430 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1460 1431
1461 xfs_iext_insert(ip, idx, 1, new, state); 1432 xfs_iext_insert(ip, *idx, 1, new, state);
1462 ip->i_df.if_lastex = idx;
1463 ip->i_d.di_nextents++; 1433 ip->i_d.di_nextents++;
1464 if (cur == NULL) 1434 if (cur == NULL)
1465 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1435 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1488,17 +1458,19 @@ xfs_bmap_add_extent_unwritten_real(
1488 * Setting the last part of a previous oldext extent to newext. 1458 * Setting the last part of a previous oldext extent to newext.
1489 * The right neighbor is contiguous with the new allocation. 1459 * The right neighbor is contiguous with the new allocation.
1490 */ 1460 */
1491 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1461 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1492 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1493 xfs_bmbt_set_blockcount(ep, 1462 xfs_bmbt_set_blockcount(ep,
1494 PREV.br_blockcount - new->br_blockcount); 1463 PREV.br_blockcount - new->br_blockcount);
1495 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1464 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1496 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1465
1466 ++*idx;
1467
1468 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1469 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1497 new->br_startoff, new->br_startblock, 1470 new->br_startoff, new->br_startblock,
1498 new->br_blockcount + RIGHT.br_blockcount, newext); 1471 new->br_blockcount + RIGHT.br_blockcount, newext);
1499 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); 1472 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1500 1473
1501 ip->i_df.if_lastex = idx + 1;
1502 if (cur == NULL) 1474 if (cur == NULL)
1503 rval = XFS_ILOG_DEXT; 1475 rval = XFS_ILOG_DEXT;
1504 else { 1476 else {
@@ -1528,13 +1500,14 @@ xfs_bmap_add_extent_unwritten_real(
1528 * Setting the last part of a previous oldext extent to newext. 1500 * Setting the last part of a previous oldext extent to newext.
1529 * The right neighbor is not contiguous. 1501 * The right neighbor is not contiguous.
1530 */ 1502 */
1531 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1503 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1532 xfs_bmbt_set_blockcount(ep, 1504 xfs_bmbt_set_blockcount(ep,
1533 PREV.br_blockcount - new->br_blockcount); 1505 PREV.br_blockcount - new->br_blockcount);
1534 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1506 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1507
1508 ++*idx;
1509 xfs_iext_insert(ip, *idx, 1, new, state);
1535 1510
1536 xfs_iext_insert(ip, idx + 1, 1, new, state);
1537 ip->i_df.if_lastex = idx + 1;
1538 ip->i_d.di_nextents++; 1511 ip->i_d.di_nextents++;
1539 if (cur == NULL) 1512 if (cur == NULL)
1540 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1513 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1568,10 +1541,10 @@ xfs_bmap_add_extent_unwritten_real(
1568 * newext. Contiguity is impossible here. 1541 * newext. Contiguity is impossible here.
1569 * One extent becomes three extents. 1542 * One extent becomes three extents.
1570 */ 1543 */
1571 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1544 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1572 xfs_bmbt_set_blockcount(ep, 1545 xfs_bmbt_set_blockcount(ep,
1573 new->br_startoff - PREV.br_startoff); 1546 new->br_startoff - PREV.br_startoff);
1574 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1547 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1575 1548
1576 r[0] = *new; 1549 r[0] = *new;
1577 r[1].br_startoff = new_endoff; 1550 r[1].br_startoff = new_endoff;
@@ -1579,8 +1552,10 @@ xfs_bmap_add_extent_unwritten_real(
1579 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1552 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1580 r[1].br_startblock = new->br_startblock + new->br_blockcount; 1553 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1581 r[1].br_state = oldext; 1554 r[1].br_state = oldext;
1582 xfs_iext_insert(ip, idx + 1, 2, &r[0], state); 1555
1583 ip->i_df.if_lastex = idx + 1; 1556 ++*idx;
1557 xfs_iext_insert(ip, *idx, 2, &r[0], state);
1558
1584 ip->i_d.di_nextents += 2; 1559 ip->i_d.di_nextents += 2;
1585 if (cur == NULL) 1560 if (cur == NULL)
1586 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 1561 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1650,12 +1625,10 @@ done:
1650STATIC int /* error */ 1625STATIC int /* error */
1651xfs_bmap_add_extent_hole_delay( 1626xfs_bmap_add_extent_hole_delay(
1652 xfs_inode_t *ip, /* incore inode pointer */ 1627 xfs_inode_t *ip, /* incore inode pointer */
1653 xfs_extnum_t idx, /* extent number to update/insert */ 1628 xfs_extnum_t *idx, /* extent number to update/insert */
1654 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1629 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1655 int *logflagsp, /* inode logging flags */ 1630 int *logflagsp) /* inode logging flags */
1656 int rsvd) /* OK to allocate reserved blocks */
1657{ 1631{
1658 xfs_bmbt_rec_host_t *ep; /* extent record for idx */
1659 xfs_ifork_t *ifp; /* inode fork pointer */ 1632 xfs_ifork_t *ifp; /* inode fork pointer */
1660 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 1633 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1661 xfs_filblks_t newlen=0; /* new indirect size */ 1634 xfs_filblks_t newlen=0; /* new indirect size */
@@ -1665,16 +1638,15 @@ xfs_bmap_add_extent_hole_delay(
1665 xfs_filblks_t temp=0; /* temp for indirect calculations */ 1638 xfs_filblks_t temp=0; /* temp for indirect calculations */
1666 1639
1667 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1640 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1668 ep = xfs_iext_get_ext(ifp, idx);
1669 state = 0; 1641 state = 0;
1670 ASSERT(isnullstartblock(new->br_startblock)); 1642 ASSERT(isnullstartblock(new->br_startblock));
1671 1643
1672 /* 1644 /*
1673 * Check and set flags if this segment has a left neighbor 1645 * Check and set flags if this segment has a left neighbor
1674 */ 1646 */
1675 if (idx > 0) { 1647 if (*idx > 0) {
1676 state |= BMAP_LEFT_VALID; 1648 state |= BMAP_LEFT_VALID;
1677 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); 1649 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
1678 1650
1679 if (isnullstartblock(left.br_startblock)) 1651 if (isnullstartblock(left.br_startblock))
1680 state |= BMAP_LEFT_DELAY; 1652 state |= BMAP_LEFT_DELAY;
@@ -1684,9 +1656,9 @@ xfs_bmap_add_extent_hole_delay(
1684 * Check and set flags if the current (right) segment exists. 1656 * Check and set flags if the current (right) segment exists.
1685 * If it doesn't exist, we're converting the hole at end-of-file. 1657 * If it doesn't exist, we're converting the hole at end-of-file.
1686 */ 1658 */
1687 if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { 1659 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1688 state |= BMAP_RIGHT_VALID; 1660 state |= BMAP_RIGHT_VALID;
1689 xfs_bmbt_get_all(ep, &right); 1661 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
1690 1662
1691 if (isnullstartblock(right.br_startblock)) 1663 if (isnullstartblock(right.br_startblock))
1692 state |= BMAP_RIGHT_DELAY; 1664 state |= BMAP_RIGHT_DELAY;
@@ -1719,21 +1691,21 @@ xfs_bmap_add_extent_hole_delay(
1719 * on the left and on the right. 1691 * on the left and on the right.
1720 * Merge all three into a single extent record. 1692 * Merge all three into a single extent record.
1721 */ 1693 */
1694 --*idx;
1722 temp = left.br_blockcount + new->br_blockcount + 1695 temp = left.br_blockcount + new->br_blockcount +
1723 right.br_blockcount; 1696 right.br_blockcount;
1724 1697
1725 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 1698 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1726 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1699 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1727 oldlen = startblockval(left.br_startblock) + 1700 oldlen = startblockval(left.br_startblock) +
1728 startblockval(new->br_startblock) + 1701 startblockval(new->br_startblock) +
1729 startblockval(right.br_startblock); 1702 startblockval(right.br_startblock);
1730 newlen = xfs_bmap_worst_indlen(ip, temp); 1703 newlen = xfs_bmap_worst_indlen(ip, temp);
1731 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1704 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1732 nullstartblock((int)newlen)); 1705 nullstartblock((int)newlen));
1733 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 1706 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1734 1707
1735 xfs_iext_remove(ip, idx, 1, state); 1708 xfs_iext_remove(ip, *idx + 1, 1, state);
1736 ip->i_df.if_lastex = idx - 1;
1737 break; 1709 break;
1738 1710
1739 case BMAP_LEFT_CONTIG: 1711 case BMAP_LEFT_CONTIG:
@@ -1742,17 +1714,17 @@ xfs_bmap_add_extent_hole_delay(
1742 * on the left. 1714 * on the left.
1743 * Merge the new allocation with the left neighbor. 1715 * Merge the new allocation with the left neighbor.
1744 */ 1716 */
1717 --*idx;
1745 temp = left.br_blockcount + new->br_blockcount; 1718 temp = left.br_blockcount + new->br_blockcount;
1746 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 1719
1747 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1720 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1721 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1748 oldlen = startblockval(left.br_startblock) + 1722 oldlen = startblockval(left.br_startblock) +
1749 startblockval(new->br_startblock); 1723 startblockval(new->br_startblock);
1750 newlen = xfs_bmap_worst_indlen(ip, temp); 1724 newlen = xfs_bmap_worst_indlen(ip, temp);
1751 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1725 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1752 nullstartblock((int)newlen)); 1726 nullstartblock((int)newlen));
1753 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 1727 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1754
1755 ip->i_df.if_lastex = idx - 1;
1756 break; 1728 break;
1757 1729
1758 case BMAP_RIGHT_CONTIG: 1730 case BMAP_RIGHT_CONTIG:
@@ -1761,16 +1733,15 @@ xfs_bmap_add_extent_hole_delay(
1761 * on the right. 1733 * on the right.
1762 * Merge the new allocation with the right neighbor. 1734 * Merge the new allocation with the right neighbor.
1763 */ 1735 */
1764 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1736 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1765 temp = new->br_blockcount + right.br_blockcount; 1737 temp = new->br_blockcount + right.br_blockcount;
1766 oldlen = startblockval(new->br_startblock) + 1738 oldlen = startblockval(new->br_startblock) +
1767 startblockval(right.br_startblock); 1739 startblockval(right.br_startblock);
1768 newlen = xfs_bmap_worst_indlen(ip, temp); 1740 newlen = xfs_bmap_worst_indlen(ip, temp);
1769 xfs_bmbt_set_allf(ep, new->br_startoff, 1741 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1742 new->br_startoff,
1770 nullstartblock((int)newlen), temp, right.br_state); 1743 nullstartblock((int)newlen), temp, right.br_state);
1771 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1744 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1772
1773 ip->i_df.if_lastex = idx;
1774 break; 1745 break;
1775 1746
1776 case 0: 1747 case 0:
@@ -1780,14 +1751,13 @@ xfs_bmap_add_extent_hole_delay(
1780 * Insert a new entry. 1751 * Insert a new entry.
1781 */ 1752 */
1782 oldlen = newlen = 0; 1753 oldlen = newlen = 0;
1783 xfs_iext_insert(ip, idx, 1, new, state); 1754 xfs_iext_insert(ip, *idx, 1, new, state);
1784 ip->i_df.if_lastex = idx;
1785 break; 1755 break;
1786 } 1756 }
1787 if (oldlen != newlen) { 1757 if (oldlen != newlen) {
1788 ASSERT(oldlen > newlen); 1758 ASSERT(oldlen > newlen);
1789 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, 1759 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1790 (int64_t)(oldlen - newlen), rsvd); 1760 (int64_t)(oldlen - newlen), 0);
1791 /* 1761 /*
1792 * Nothing to do for disk quota accounting here. 1762 * Nothing to do for disk quota accounting here.
1793 */ 1763 */
@@ -1803,13 +1773,12 @@ xfs_bmap_add_extent_hole_delay(
1803STATIC int /* error */ 1773STATIC int /* error */
1804xfs_bmap_add_extent_hole_real( 1774xfs_bmap_add_extent_hole_real(
1805 xfs_inode_t *ip, /* incore inode pointer */ 1775 xfs_inode_t *ip, /* incore inode pointer */
1806 xfs_extnum_t idx, /* extent number to update/insert */ 1776 xfs_extnum_t *idx, /* extent number to update/insert */
1807 xfs_btree_cur_t *cur, /* if null, not a btree */ 1777 xfs_btree_cur_t *cur, /* if null, not a btree */
1808 xfs_bmbt_irec_t *new, /* new data to add to file extents */ 1778 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1809 int *logflagsp, /* inode logging flags */ 1779 int *logflagsp, /* inode logging flags */
1810 int whichfork) /* data or attr fork */ 1780 int whichfork) /* data or attr fork */
1811{ 1781{
1812 xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */
1813 int error; /* error return value */ 1782 int error; /* error return value */
1814 int i; /* temp state */ 1783 int i; /* temp state */
1815 xfs_ifork_t *ifp; /* inode fork pointer */ 1784 xfs_ifork_t *ifp; /* inode fork pointer */
@@ -1819,8 +1788,7 @@ xfs_bmap_add_extent_hole_real(
1819 int state; /* state bits, accessed thru macros */ 1788 int state; /* state bits, accessed thru macros */
1820 1789
1821 ifp = XFS_IFORK_PTR(ip, whichfork); 1790 ifp = XFS_IFORK_PTR(ip, whichfork);
1822 ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 1791 ASSERT(*idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
1823 ep = xfs_iext_get_ext(ifp, idx);
1824 state = 0; 1792 state = 0;
1825 1793
1826 if (whichfork == XFS_ATTR_FORK) 1794 if (whichfork == XFS_ATTR_FORK)
@@ -1829,9 +1797,9 @@ xfs_bmap_add_extent_hole_real(
1829 /* 1797 /*
1830 * Check and set flags if this segment has a left neighbor. 1798 * Check and set flags if this segment has a left neighbor.
1831 */ 1799 */
1832 if (idx > 0) { 1800 if (*idx > 0) {
1833 state |= BMAP_LEFT_VALID; 1801 state |= BMAP_LEFT_VALID;
1834 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); 1802 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
1835 if (isnullstartblock(left.br_startblock)) 1803 if (isnullstartblock(left.br_startblock))
1836 state |= BMAP_LEFT_DELAY; 1804 state |= BMAP_LEFT_DELAY;
1837 } 1805 }
@@ -1840,9 +1808,9 @@ xfs_bmap_add_extent_hole_real(
1840 * Check and set flags if this segment has a current value. 1808 * Check and set flags if this segment has a current value.
1841 * Not true if we're inserting into the "hole" at eof. 1809 * Not true if we're inserting into the "hole" at eof.
1842 */ 1810 */
1843 if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { 1811 if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1844 state |= BMAP_RIGHT_VALID; 1812 state |= BMAP_RIGHT_VALID;
1845 xfs_bmbt_get_all(ep, &right); 1813 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
1846 if (isnullstartblock(right.br_startblock)) 1814 if (isnullstartblock(right.br_startblock))
1847 state |= BMAP_RIGHT_DELAY; 1815 state |= BMAP_RIGHT_DELAY;
1848 } 1816 }
@@ -1879,14 +1847,15 @@ xfs_bmap_add_extent_hole_real(
1879 * left and on the right. 1847 * left and on the right.
1880 * Merge all three into a single extent record. 1848 * Merge all three into a single extent record.
1881 */ 1849 */
1882 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 1850 --*idx;
1883 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1851 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1852 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1884 left.br_blockcount + new->br_blockcount + 1853 left.br_blockcount + new->br_blockcount +
1885 right.br_blockcount); 1854 right.br_blockcount);
1886 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 1855 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1856
1857 xfs_iext_remove(ip, *idx + 1, 1, state);
1887 1858
1888 xfs_iext_remove(ip, idx, 1, state);
1889 ifp->if_lastex = idx - 1;
1890 XFS_IFORK_NEXT_SET(ip, whichfork, 1859 XFS_IFORK_NEXT_SET(ip, whichfork,
1891 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 1860 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
1892 if (cur == NULL) { 1861 if (cur == NULL) {
@@ -1921,12 +1890,12 @@ xfs_bmap_add_extent_hole_real(
1921 * on the left. 1890 * on the left.
1922 * Merge the new allocation with the left neighbor. 1891 * Merge the new allocation with the left neighbor.
1923 */ 1892 */
1924 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); 1893 --*idx;
1925 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1894 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1895 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1926 left.br_blockcount + new->br_blockcount); 1896 left.br_blockcount + new->br_blockcount);
1927 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); 1897 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1928 1898
1929 ifp->if_lastex = idx - 1;
1930 if (cur == NULL) { 1899 if (cur == NULL) {
1931 rval = xfs_ilog_fext(whichfork); 1900 rval = xfs_ilog_fext(whichfork);
1932 } else { 1901 } else {
@@ -1952,13 +1921,13 @@ xfs_bmap_add_extent_hole_real(
1952 * on the right. 1921 * on the right.
1953 * Merge the new allocation with the right neighbor. 1922 * Merge the new allocation with the right neighbor.
1954 */ 1923 */
1955 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 1924 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1956 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, 1925 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1926 new->br_startoff, new->br_startblock,
1957 new->br_blockcount + right.br_blockcount, 1927 new->br_blockcount + right.br_blockcount,
1958 right.br_state); 1928 right.br_state);
1959 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 1929 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1960 1930
1961 ifp->if_lastex = idx;
1962 if (cur == NULL) { 1931 if (cur == NULL) {
1963 rval = xfs_ilog_fext(whichfork); 1932 rval = xfs_ilog_fext(whichfork);
1964 } else { 1933 } else {
@@ -1984,8 +1953,7 @@ xfs_bmap_add_extent_hole_real(
1984 * real allocation. 1953 * real allocation.
1985 * Insert a new entry. 1954 * Insert a new entry.
1986 */ 1955 */
1987 xfs_iext_insert(ip, idx, 1, new, state); 1956 xfs_iext_insert(ip, *idx, 1, new, state);
1988 ifp->if_lastex = idx;
1989 XFS_IFORK_NEXT_SET(ip, whichfork, 1957 XFS_IFORK_NEXT_SET(ip, whichfork,
1990 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); 1958 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
1991 if (cur == NULL) { 1959 if (cur == NULL) {
@@ -2833,13 +2801,12 @@ STATIC int /* error */
2833xfs_bmap_del_extent( 2801xfs_bmap_del_extent(
2834 xfs_inode_t *ip, /* incore inode pointer */ 2802 xfs_inode_t *ip, /* incore inode pointer */
2835 xfs_trans_t *tp, /* current transaction pointer */ 2803 xfs_trans_t *tp, /* current transaction pointer */
2836 xfs_extnum_t idx, /* extent number to update/delete */ 2804 xfs_extnum_t *idx, /* extent number to update/delete */
2837 xfs_bmap_free_t *flist, /* list of extents to be freed */ 2805 xfs_bmap_free_t *flist, /* list of extents to be freed */
2838 xfs_btree_cur_t *cur, /* if null, not a btree */ 2806 xfs_btree_cur_t *cur, /* if null, not a btree */
2839 xfs_bmbt_irec_t *del, /* data to remove from extents */ 2807 xfs_bmbt_irec_t *del, /* data to remove from extents */
2840 int *logflagsp, /* inode logging flags */ 2808 int *logflagsp, /* inode logging flags */
2841 int whichfork, /* data or attr fork */ 2809 int whichfork) /* data or attr fork */
2842 int rsvd) /* OK to allocate reserved blocks */
2843{ 2810{
2844 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ 2811 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
2845 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ 2812 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
@@ -2870,10 +2837,10 @@ xfs_bmap_del_extent(
2870 2837
2871 mp = ip->i_mount; 2838 mp = ip->i_mount;
2872 ifp = XFS_IFORK_PTR(ip, whichfork); 2839 ifp = XFS_IFORK_PTR(ip, whichfork);
2873 ASSERT((idx >= 0) && (idx < ifp->if_bytes / 2840 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
2874 (uint)sizeof(xfs_bmbt_rec_t))); 2841 (uint)sizeof(xfs_bmbt_rec_t)));
2875 ASSERT(del->br_blockcount > 0); 2842 ASSERT(del->br_blockcount > 0);
2876 ep = xfs_iext_get_ext(ifp, idx); 2843 ep = xfs_iext_get_ext(ifp, *idx);
2877 xfs_bmbt_get_all(ep, &got); 2844 xfs_bmbt_get_all(ep, &got);
2878 ASSERT(got.br_startoff <= del->br_startoff); 2845 ASSERT(got.br_startoff <= del->br_startoff);
2879 del_endoff = del->br_startoff + del->br_blockcount; 2846 del_endoff = del->br_startoff + del->br_blockcount;
@@ -2947,11 +2914,12 @@ xfs_bmap_del_extent(
2947 /* 2914 /*
2948 * Matches the whole extent. Delete the entry. 2915 * Matches the whole extent. Delete the entry.
2949 */ 2916 */
2950 xfs_iext_remove(ip, idx, 1, 2917 xfs_iext_remove(ip, *idx, 1,
2951 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); 2918 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
2952 ifp->if_lastex = idx; 2919 --*idx;
2953 if (delay) 2920 if (delay)
2954 break; 2921 break;
2922
2955 XFS_IFORK_NEXT_SET(ip, whichfork, 2923 XFS_IFORK_NEXT_SET(ip, whichfork,
2956 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); 2924 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2957 flags |= XFS_ILOG_CORE; 2925 flags |= XFS_ILOG_CORE;
@@ -2968,21 +2936,20 @@ xfs_bmap_del_extent(
2968 /* 2936 /*
2969 * Deleting the first part of the extent. 2937 * Deleting the first part of the extent.
2970 */ 2938 */
2971 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 2939 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2972 xfs_bmbt_set_startoff(ep, del_endoff); 2940 xfs_bmbt_set_startoff(ep, del_endoff);
2973 temp = got.br_blockcount - del->br_blockcount; 2941 temp = got.br_blockcount - del->br_blockcount;
2974 xfs_bmbt_set_blockcount(ep, temp); 2942 xfs_bmbt_set_blockcount(ep, temp);
2975 ifp->if_lastex = idx;
2976 if (delay) { 2943 if (delay) {
2977 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2944 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2978 da_old); 2945 da_old);
2979 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 2946 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2980 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 2947 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2981 da_new = temp; 2948 da_new = temp;
2982 break; 2949 break;
2983 } 2950 }
2984 xfs_bmbt_set_startblock(ep, del_endblock); 2951 xfs_bmbt_set_startblock(ep, del_endblock);
2985 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 2952 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2986 if (!cur) { 2953 if (!cur) {
2987 flags |= xfs_ilog_fext(whichfork); 2954 flags |= xfs_ilog_fext(whichfork);
2988 break; 2955 break;
@@ -2998,18 +2965,17 @@ xfs_bmap_del_extent(
2998 * Deleting the last part of the extent. 2965 * Deleting the last part of the extent.
2999 */ 2966 */
3000 temp = got.br_blockcount - del->br_blockcount; 2967 temp = got.br_blockcount - del->br_blockcount;
3001 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 2968 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
3002 xfs_bmbt_set_blockcount(ep, temp); 2969 xfs_bmbt_set_blockcount(ep, temp);
3003 ifp->if_lastex = idx;
3004 if (delay) { 2970 if (delay) {
3005 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 2971 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3006 da_old); 2972 da_old);
3007 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 2973 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3008 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 2974 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
3009 da_new = temp; 2975 da_new = temp;
3010 break; 2976 break;
3011 } 2977 }
3012 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 2978 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
3013 if (!cur) { 2979 if (!cur) {
3014 flags |= xfs_ilog_fext(whichfork); 2980 flags |= xfs_ilog_fext(whichfork);
3015 break; 2981 break;
@@ -3026,7 +2992,7 @@ xfs_bmap_del_extent(
3026 * Deleting the middle of the extent. 2992 * Deleting the middle of the extent.
3027 */ 2993 */
3028 temp = del->br_startoff - got.br_startoff; 2994 temp = del->br_startoff - got.br_startoff;
3029 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 2995 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
3030 xfs_bmbt_set_blockcount(ep, temp); 2996 xfs_bmbt_set_blockcount(ep, temp);
3031 new.br_startoff = del_endoff; 2997 new.br_startoff = del_endoff;
3032 temp2 = got_endoff - del_endoff; 2998 temp2 = got_endoff - del_endoff;
@@ -3113,9 +3079,9 @@ xfs_bmap_del_extent(
3113 } 3079 }
3114 } 3080 }
3115 } 3081 }
3116 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); 3082 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
3117 xfs_iext_insert(ip, idx + 1, 1, &new, state); 3083 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
3118 ifp->if_lastex = idx + 1; 3084 ++*idx;
3119 break; 3085 break;
3120 } 3086 }
3121 /* 3087 /*
@@ -3142,7 +3108,7 @@ xfs_bmap_del_extent(
3142 ASSERT(da_old >= da_new); 3108 ASSERT(da_old >= da_new);
3143 if (da_old > da_new) { 3109 if (da_old > da_new) {
3144 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 3110 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
3145 (int64_t)(da_old - da_new), rsvd); 3111 (int64_t)(da_old - da_new), 0);
3146 } 3112 }
3147done: 3113done:
3148 *logflagsp = flags; 3114 *logflagsp = flags;
@@ -4562,29 +4528,24 @@ xfs_bmapi(
4562 if (rt) { 4528 if (rt) {
4563 error = xfs_mod_incore_sb(mp, 4529 error = xfs_mod_incore_sb(mp,
4564 XFS_SBS_FREXTENTS, 4530 XFS_SBS_FREXTENTS,
4565 -((int64_t)extsz), (flags & 4531 -((int64_t)extsz), 0);
4566 XFS_BMAPI_RSVBLOCKS));
4567 } else { 4532 } else {
4568 error = xfs_icsb_modify_counters(mp, 4533 error = xfs_icsb_modify_counters(mp,
4569 XFS_SBS_FDBLOCKS, 4534 XFS_SBS_FDBLOCKS,
4570 -((int64_t)alen), (flags & 4535 -((int64_t)alen), 0);
4571 XFS_BMAPI_RSVBLOCKS));
4572 } 4536 }
4573 if (!error) { 4537 if (!error) {
4574 error = xfs_icsb_modify_counters(mp, 4538 error = xfs_icsb_modify_counters(mp,
4575 XFS_SBS_FDBLOCKS, 4539 XFS_SBS_FDBLOCKS,
4576 -((int64_t)indlen), (flags & 4540 -((int64_t)indlen), 0);
4577 XFS_BMAPI_RSVBLOCKS));
4578 if (error && rt) 4541 if (error && rt)
4579 xfs_mod_incore_sb(mp, 4542 xfs_mod_incore_sb(mp,
4580 XFS_SBS_FREXTENTS, 4543 XFS_SBS_FREXTENTS,
4581 (int64_t)extsz, (flags & 4544 (int64_t)extsz, 0);
4582 XFS_BMAPI_RSVBLOCKS));
4583 else if (error) 4545 else if (error)
4584 xfs_icsb_modify_counters(mp, 4546 xfs_icsb_modify_counters(mp,
4585 XFS_SBS_FDBLOCKS, 4547 XFS_SBS_FDBLOCKS,
4586 (int64_t)alen, (flags & 4548 (int64_t)alen, 0);
4587 XFS_BMAPI_RSVBLOCKS));
4588 } 4549 }
4589 4550
4590 if (error) { 4551 if (error) {
@@ -4701,13 +4662,12 @@ xfs_bmapi(
4701 if (!wasdelay && (flags & XFS_BMAPI_PREALLOC)) 4662 if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
4702 got.br_state = XFS_EXT_UNWRITTEN; 4663 got.br_state = XFS_EXT_UNWRITTEN;
4703 } 4664 }
4704 error = xfs_bmap_add_extent(ip, lastx, &cur, &got, 4665 error = xfs_bmap_add_extent(ip, &lastx, &cur, &got,
4705 firstblock, flist, &tmp_logflags, 4666 firstblock, flist, &tmp_logflags,
4706 whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); 4667 whichfork);
4707 logflags |= tmp_logflags; 4668 logflags |= tmp_logflags;
4708 if (error) 4669 if (error)
4709 goto error0; 4670 goto error0;
4710 lastx = ifp->if_lastex;
4711 ep = xfs_iext_get_ext(ifp, lastx); 4671 ep = xfs_iext_get_ext(ifp, lastx);
4712 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4672 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4713 xfs_bmbt_get_all(ep, &got); 4673 xfs_bmbt_get_all(ep, &got);
@@ -4803,13 +4763,12 @@ xfs_bmapi(
4803 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) 4763 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4804 ? XFS_EXT_NORM 4764 ? XFS_EXT_NORM
4805 : XFS_EXT_UNWRITTEN; 4765 : XFS_EXT_UNWRITTEN;
4806 error = xfs_bmap_add_extent(ip, lastx, &cur, mval, 4766 error = xfs_bmap_add_extent(ip, &lastx, &cur, mval,
4807 firstblock, flist, &tmp_logflags, 4767 firstblock, flist, &tmp_logflags,
4808 whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); 4768 whichfork);
4809 logflags |= tmp_logflags; 4769 logflags |= tmp_logflags;
4810 if (error) 4770 if (error)
4811 goto error0; 4771 goto error0;
4812 lastx = ifp->if_lastex;
4813 ep = xfs_iext_get_ext(ifp, lastx); 4772 ep = xfs_iext_get_ext(ifp, lastx);
4814 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 4773 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4815 xfs_bmbt_get_all(ep, &got); 4774 xfs_bmbt_get_all(ep, &got);
@@ -4868,14 +4827,14 @@ xfs_bmapi(
4868 /* 4827 /*
4869 * Else go on to the next record. 4828 * Else go on to the next record.
4870 */ 4829 */
4871 ep = xfs_iext_get_ext(ifp, ++lastx);
4872 prev = got; 4830 prev = got;
4873 if (lastx >= nextents) 4831 if (++lastx < nextents) {
4874 eof = 1; 4832 ep = xfs_iext_get_ext(ifp, lastx);
4875 else
4876 xfs_bmbt_get_all(ep, &got); 4833 xfs_bmbt_get_all(ep, &got);
4834 } else {
4835 eof = 1;
4836 }
4877 } 4837 }
4878 ifp->if_lastex = lastx;
4879 *nmap = n; 4838 *nmap = n;
4880 /* 4839 /*
4881 * Transform from btree to extents, give it cur. 4840 * Transform from btree to extents, give it cur.
@@ -4984,7 +4943,6 @@ xfs_bmapi_single(
4984 ASSERT(!isnullstartblock(got.br_startblock)); 4943 ASSERT(!isnullstartblock(got.br_startblock));
4985 ASSERT(bno < got.br_startoff + got.br_blockcount); 4944 ASSERT(bno < got.br_startoff + got.br_blockcount);
4986 *fsb = got.br_startblock + (bno - got.br_startoff); 4945 *fsb = got.br_startblock + (bno - got.br_startoff);
4987 ifp->if_lastex = lastx;
4988 return 0; 4946 return 0;
4989} 4947}
4990 4948
@@ -5026,7 +4984,6 @@ xfs_bunmapi(
5026 int tmp_logflags; /* partial logging flags */ 4984 int tmp_logflags; /* partial logging flags */
5027 int wasdel; /* was a delayed alloc extent */ 4985 int wasdel; /* was a delayed alloc extent */
5028 int whichfork; /* data or attribute fork */ 4986 int whichfork; /* data or attribute fork */
5029 int rsvd; /* OK to allocate reserved blocks */
5030 xfs_fsblock_t sum; 4987 xfs_fsblock_t sum;
5031 4988
5032 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); 4989 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
@@ -5044,7 +5001,7 @@ xfs_bunmapi(
5044 mp = ip->i_mount; 5001 mp = ip->i_mount;
5045 if (XFS_FORCED_SHUTDOWN(mp)) 5002 if (XFS_FORCED_SHUTDOWN(mp))
5046 return XFS_ERROR(EIO); 5003 return XFS_ERROR(EIO);
5047 rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; 5004
5048 ASSERT(len > 0); 5005 ASSERT(len > 0);
5049 ASSERT(nexts >= 0); 5006 ASSERT(nexts >= 0);
5050 ASSERT(ifp->if_ext_max == 5007 ASSERT(ifp->if_ext_max ==
@@ -5160,9 +5117,9 @@ xfs_bunmapi(
5160 del.br_blockcount = mod; 5117 del.br_blockcount = mod;
5161 } 5118 }
5162 del.br_state = XFS_EXT_UNWRITTEN; 5119 del.br_state = XFS_EXT_UNWRITTEN;
5163 error = xfs_bmap_add_extent(ip, lastx, &cur, &del, 5120 error = xfs_bmap_add_extent(ip, &lastx, &cur, &del,
5164 firstblock, flist, &logflags, 5121 firstblock, flist, &logflags,
5165 XFS_DATA_FORK, 0); 5122 XFS_DATA_FORK);
5166 if (error) 5123 if (error)
5167 goto error0; 5124 goto error0;
5168 goto nodelete; 5125 goto nodelete;
@@ -5188,9 +5145,12 @@ xfs_bunmapi(
5188 */ 5145 */
5189 ASSERT(bno >= del.br_blockcount); 5146 ASSERT(bno >= del.br_blockcount);
5190 bno -= del.br_blockcount; 5147 bno -= del.br_blockcount;
5191 if (bno < got.br_startoff) { 5148 if (got.br_startoff > bno) {
5192 if (--lastx >= 0) 5149 if (--lastx >= 0) {
5193 xfs_bmbt_get_all(--ep, &got); 5150 ep = xfs_iext_get_ext(ifp,
5151 lastx);
5152 xfs_bmbt_get_all(ep, &got);
5153 }
5194 } 5154 }
5195 continue; 5155 continue;
5196 } else if (del.br_state == XFS_EXT_UNWRITTEN) { 5156 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
@@ -5214,18 +5174,19 @@ xfs_bunmapi(
5214 prev.br_startoff = start; 5174 prev.br_startoff = start;
5215 } 5175 }
5216 prev.br_state = XFS_EXT_UNWRITTEN; 5176 prev.br_state = XFS_EXT_UNWRITTEN;
5217 error = xfs_bmap_add_extent(ip, lastx - 1, &cur, 5177 lastx--;
5178 error = xfs_bmap_add_extent(ip, &lastx, &cur,
5218 &prev, firstblock, flist, &logflags, 5179 &prev, firstblock, flist, &logflags,
5219 XFS_DATA_FORK, 0); 5180 XFS_DATA_FORK);
5220 if (error) 5181 if (error)
5221 goto error0; 5182 goto error0;
5222 goto nodelete; 5183 goto nodelete;
5223 } else { 5184 } else {
5224 ASSERT(del.br_state == XFS_EXT_NORM); 5185 ASSERT(del.br_state == XFS_EXT_NORM);
5225 del.br_state = XFS_EXT_UNWRITTEN; 5186 del.br_state = XFS_EXT_UNWRITTEN;
5226 error = xfs_bmap_add_extent(ip, lastx, &cur, 5187 error = xfs_bmap_add_extent(ip, &lastx, &cur,
5227 &del, firstblock, flist, &logflags, 5188 &del, firstblock, flist, &logflags,
5228 XFS_DATA_FORK, 0); 5189 XFS_DATA_FORK);
5229 if (error) 5190 if (error)
5230 goto error0; 5191 goto error0;
5231 goto nodelete; 5192 goto nodelete;
@@ -5240,13 +5201,13 @@ xfs_bunmapi(
5240 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); 5201 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5241 do_div(rtexts, mp->m_sb.sb_rextsize); 5202 do_div(rtexts, mp->m_sb.sb_rextsize);
5242 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 5203 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5243 (int64_t)rtexts, rsvd); 5204 (int64_t)rtexts, 0);
5244 (void)xfs_trans_reserve_quota_nblks(NULL, 5205 (void)xfs_trans_reserve_quota_nblks(NULL,
5245 ip, -((long)del.br_blockcount), 0, 5206 ip, -((long)del.br_blockcount), 0,
5246 XFS_QMOPT_RES_RTBLKS); 5207 XFS_QMOPT_RES_RTBLKS);
5247 } else { 5208 } else {
5248 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 5209 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
5249 (int64_t)del.br_blockcount, rsvd); 5210 (int64_t)del.br_blockcount, 0);
5250 (void)xfs_trans_reserve_quota_nblks(NULL, 5211 (void)xfs_trans_reserve_quota_nblks(NULL,
5251 ip, -((long)del.br_blockcount), 0, 5212 ip, -((long)del.br_blockcount), 0,
5252 XFS_QMOPT_RES_REGBLKS); 5213 XFS_QMOPT_RES_REGBLKS);
@@ -5277,31 +5238,29 @@ xfs_bunmapi(
5277 error = XFS_ERROR(ENOSPC); 5238 error = XFS_ERROR(ENOSPC);
5278 goto error0; 5239 goto error0;
5279 } 5240 }
5280 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del, 5241 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
5281 &tmp_logflags, whichfork, rsvd); 5242 &tmp_logflags, whichfork);
5282 logflags |= tmp_logflags; 5243 logflags |= tmp_logflags;
5283 if (error) 5244 if (error)
5284 goto error0; 5245 goto error0;
5285 bno = del.br_startoff - 1; 5246 bno = del.br_startoff - 1;
5286nodelete: 5247nodelete:
5287 lastx = ifp->if_lastex;
5288 /* 5248 /*
5289 * If not done go on to the next (previous) record. 5249 * If not done go on to the next (previous) record.
5290 * Reset ep in case the extents array was re-alloced.
5291 */ 5250 */
5292 ep = xfs_iext_get_ext(ifp, lastx);
5293 if (bno != (xfs_fileoff_t)-1 && bno >= start) { 5251 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5294 if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) || 5252 if (lastx >= 0) {
5295 xfs_bmbt_get_startoff(ep) > bno) { 5253 ep = xfs_iext_get_ext(ifp, lastx);
5296 if (--lastx >= 0) 5254 if (xfs_bmbt_get_startoff(ep) > bno) {
5297 ep = xfs_iext_get_ext(ifp, lastx); 5255 if (--lastx >= 0)
5298 } 5256 ep = xfs_iext_get_ext(ifp,
5299 if (lastx >= 0) 5257 lastx);
5258 }
5300 xfs_bmbt_get_all(ep, &got); 5259 xfs_bmbt_get_all(ep, &got);
5260 }
5301 extno++; 5261 extno++;
5302 } 5262 }
5303 } 5263 }
5304 ifp->if_lastex = lastx;
5305 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0; 5264 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5306 ASSERT(ifp->if_ext_max == 5265 ASSERT(ifp->if_ext_max ==
5307 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); 5266 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 3651191daea1..c62234bde053 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -69,7 +69,6 @@ typedef struct xfs_bmap_free
69#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */ 69#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */
70#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */ 70#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */
71#define XFS_BMAPI_ATTRFORK 0x010 /* use attribute fork not data */ 71#define XFS_BMAPI_ATTRFORK 0x010 /* use attribute fork not data */
72#define XFS_BMAPI_RSVBLOCKS 0x020 /* OK to alloc. reserved data blocks */
73#define XFS_BMAPI_PREALLOC 0x040 /* preallocation op: unwritten space */ 72#define XFS_BMAPI_PREALLOC 0x040 /* preallocation op: unwritten space */
74#define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */ 73#define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */
75 /* combine contig. space */ 74 /* combine contig. space */
@@ -87,7 +86,6 @@ typedef struct xfs_bmap_free
87 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 86 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
88 { XFS_BMAPI_METADATA, "METADATA" }, \ 87 { XFS_BMAPI_METADATA, "METADATA" }, \
89 { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \ 88 { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
90 { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \
91 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 89 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
92 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 90 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
93 { XFS_BMAPI_CONTIG, "CONTIG" }, \ 91 { XFS_BMAPI_CONTIG, "CONTIG" }, \
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index c8e3349c287c..a098a20ca63e 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -920,7 +920,6 @@ xfs_iread_extents(
920 /* 920 /*
921 * We know that the size is valid (it's checked in iformat_btree) 921 * We know that the size is valid (it's checked in iformat_btree)
922 */ 922 */
923 ifp->if_lastex = NULLEXTNUM;
924 ifp->if_bytes = ifp->if_real_bytes = 0; 923 ifp->if_bytes = ifp->if_real_bytes = 0;
925 ifp->if_flags |= XFS_IFEXTENTS; 924 ifp->if_flags |= XFS_IFEXTENTS;
926 xfs_iext_add(ifp, 0, nextents); 925 xfs_iext_add(ifp, 0, nextents);
@@ -2558,12 +2557,9 @@ xfs_iflush_fork(
2558 case XFS_DINODE_FMT_EXTENTS: 2557 case XFS_DINODE_FMT_EXTENTS:
2559 ASSERT((ifp->if_flags & XFS_IFEXTENTS) || 2558 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2560 !(iip->ili_format.ilf_fields & extflag[whichfork])); 2559 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2561 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2562 (ifp->if_bytes == 0));
2563 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2564 (ifp->if_bytes > 0));
2565 if ((iip->ili_format.ilf_fields & extflag[whichfork]) && 2560 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2566 (ifp->if_bytes > 0)) { 2561 (ifp->if_bytes > 0)) {
2562 ASSERT(xfs_iext_get_ext(ifp, 0));
2567 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); 2563 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2568 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, 2564 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2569 whichfork); 2565 whichfork);
@@ -3112,6 +3108,8 @@ xfs_iext_get_ext(
3112 xfs_extnum_t idx) /* index of target extent */ 3108 xfs_extnum_t idx) /* index of target extent */
3113{ 3109{
3114 ASSERT(idx >= 0); 3110 ASSERT(idx >= 0);
3111 ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3112
3115 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { 3113 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3116 return ifp->if_u1.if_ext_irec->er_extbuf; 3114 return ifp->if_u1.if_ext_irec->er_extbuf;
3117 } else if (ifp->if_flags & XFS_IFEXTIREC) { 3115 } else if (ifp->if_flags & XFS_IFEXTIREC) {
@@ -3191,7 +3189,6 @@ xfs_iext_add(
3191 } 3189 }
3192 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; 3190 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3193 ifp->if_real_bytes = 0; 3191 ifp->if_real_bytes = 0;
3194 ifp->if_lastex = nextents + ext_diff;
3195 } 3192 }
3196 /* 3193 /*
3197 * Otherwise use a linear (direct) extent list. 3194 * Otherwise use a linear (direct) extent list.
@@ -3886,8 +3883,10 @@ xfs_iext_idx_to_irec(
3886 xfs_extnum_t page_idx = *idxp; /* extent index in target list */ 3883 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
3887 3884
3888 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 3885 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3889 ASSERT(page_idx >= 0 && page_idx <= 3886 ASSERT(page_idx >= 0);
3890 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); 3887 ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
3888 ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
3889
3891 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; 3890 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3892 erp_idx = 0; 3891 erp_idx = 0;
3893 low = 0; 3892 low = 0;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index ff4e2a30227d..3ae6d58e5473 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -67,7 +67,6 @@ typedef struct xfs_ifork {
67 short if_broot_bytes; /* bytes allocated for root */ 67 short if_broot_bytes; /* bytes allocated for root */
68 unsigned char if_flags; /* per-fork flags */ 68 unsigned char if_flags; /* per-fork flags */
69 unsigned char if_ext_max; /* max # of extent records */ 69 unsigned char if_ext_max; /* max # of extent records */
70 xfs_extnum_t if_lastex; /* last if_extents used */
71 union { 70 union {
72 xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */ 71 xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */
73 xfs_ext_irec_t *if_ext_irec; /* irec map file exts */ 72 xfs_ext_irec_t *if_ext_irec; /* irec map file exts */
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 7d56e88a3f0e..c7755d5a5fbe 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -29,6 +29,7 @@
29#include "xfs_mount.h" 29#include "xfs_mount.h"
30#include "xfs_error.h" 30#include "xfs_error.h"
31#include "xfs_alloc.h" 31#include "xfs_alloc.h"
32#include "xfs_discard.h"
32 33
33/* 34/*
34 * Perform initial CIL structure initialisation. If the CIL is not 35 * Perform initial CIL structure initialisation. If the CIL is not
@@ -361,18 +362,28 @@ xlog_cil_committed(
361 int abort) 362 int abort)
362{ 363{
363 struct xfs_cil_ctx *ctx = args; 364 struct xfs_cil_ctx *ctx = args;
365 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
364 366
365 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, 367 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
366 ctx->start_lsn, abort); 368 ctx->start_lsn, abort);
367 369
368 xfs_alloc_busy_sort(&ctx->busy_extents); 370 xfs_alloc_busy_sort(&ctx->busy_extents);
369 xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, &ctx->busy_extents); 371 xfs_alloc_busy_clear(mp, &ctx->busy_extents,
372 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
370 373
371 spin_lock(&ctx->cil->xc_cil_lock); 374 spin_lock(&ctx->cil->xc_cil_lock);
372 list_del(&ctx->committing); 375 list_del(&ctx->committing);
373 spin_unlock(&ctx->cil->xc_cil_lock); 376 spin_unlock(&ctx->cil->xc_cil_lock);
374 377
375 xlog_cil_free_logvec(ctx->lv_chain); 378 xlog_cil_free_logvec(ctx->lv_chain);
379
380 if (!list_empty(&ctx->busy_extents)) {
381 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
382
383 xfs_discard_extents(mp, &ctx->busy_extents);
384 xfs_alloc_busy_clear(mp, &ctx->busy_extents, false);
385 }
386
376 kmem_free(ctx); 387 kmem_free(ctx);
377} 388}
378 389
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 19af0ab0d0c6..3d68bb267c5f 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -224,6 +224,7 @@ typedef struct xfs_mount {
224#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem 224#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
225 operations, typically for 225 operations, typically for
226 disk errors in metadata */ 226 disk errors in metadata */
227#define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */
227#define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to 228#define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to
228 user */ 229 user */
229#define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment 230#define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index d1f24858ccc4..7c7bc2b786bd 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -609,7 +609,7 @@ xfs_trans_free(
609 struct xfs_trans *tp) 609 struct xfs_trans *tp)
610{ 610{
611 xfs_alloc_busy_sort(&tp->t_busy); 611 xfs_alloc_busy_sort(&tp->t_busy);
612 xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy); 612 xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy, false);
613 613
614 atomic_dec(&tp->t_mountp->m_active_trans); 614 atomic_dec(&tp->t_mountp->m_active_trans);
615 xfs_trans_free_dqinfo(tp); 615 xfs_trans_free_dqinfo(tp);
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index bcbab3e4a3be..89b73e5d0fd0 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -1,4 +1,6 @@
1#ifdef __NR_chmod
1__NR_chmod, 2__NR_chmod,
3#endif
2__NR_fchmod, 4__NR_fchmod,
3#ifdef __NR_chown 5#ifdef __NR_chown
4__NR_chown, 6__NR_chown,
@@ -20,7 +22,9 @@ __NR_chown32,
20__NR_fchown32, 22__NR_fchown32,
21__NR_lchown32, 23__NR_lchown32,
22#endif 24#endif
25#ifdef __NR_link
23__NR_link, 26__NR_link,
27#endif
24#ifdef __NR_linkat 28#ifdef __NR_linkat
25__NR_linkat, 29__NR_linkat,
26#endif 30#endif
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
index 6621bd82cbe8..7b61db4fe72b 100644
--- a/include/asm-generic/audit_dir_write.h
+++ b/include/asm-generic/audit_dir_write.h
@@ -1,13 +1,27 @@
1#ifdef __NR_rename
1__NR_rename, 2__NR_rename,
3#endif
4#ifdef __NR_mkdir
2__NR_mkdir, 5__NR_mkdir,
6#endif
7#ifdef __NR_rmdir
3__NR_rmdir, 8__NR_rmdir,
9#endif
4#ifdef __NR_creat 10#ifdef __NR_creat
5__NR_creat, 11__NR_creat,
6#endif 12#endif
13#ifdef __NR_link
7__NR_link, 14__NR_link,
15#endif
16#ifdef __NR_unlink
8__NR_unlink, 17__NR_unlink,
18#endif
19#ifdef __NR_symlink
9__NR_symlink, 20__NR_symlink,
21#endif
22#ifdef __NR_mknod
10__NR_mknod, 23__NR_mknod,
24#endif
11#ifdef __NR_mkdirat 25#ifdef __NR_mkdirat
12__NR_mkdirat, 26__NR_mkdirat,
13__NR_mknodat, 27__NR_mknodat,
diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h
index 0e87464d9847..3b249cb857dc 100644
--- a/include/asm-generic/audit_read.h
+++ b/include/asm-generic/audit_read.h
@@ -1,4 +1,6 @@
1#ifdef __NR_readlink
1__NR_readlink, 2__NR_readlink,
3#endif
2__NR_quotactl, 4__NR_quotactl,
3__NR_listxattr, 5__NR_listxattr,
4__NR_llistxattr, 6__NR_llistxattr,
@@ -6,3 +8,6 @@ __NR_flistxattr,
6__NR_getxattr, 8__NR_getxattr,
7__NR_lgetxattr, 9__NR_lgetxattr,
8__NR_fgetxattr, 10__NR_fgetxattr,
11#ifdef __NR_readlinkat
12__NR_readlinkat,
13#endif
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
index c5f1c2c920e2..e7020c57b13b 100644
--- a/include/asm-generic/audit_write.h
+++ b/include/asm-generic/audit_write.h
@@ -4,7 +4,9 @@ __NR_acct,
4__NR_swapon, 4__NR_swapon,
5#endif 5#endif
6__NR_quotactl, 6__NR_quotactl,
7#ifdef __NR_truncate
7__NR_truncate, 8__NR_truncate,
9#endif
8#ifdef __NR_truncate64 10#ifdef __NR_truncate64
9__NR_truncate64, 11__NR_truncate64,
10#endif 12#endif
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 110fa700f853..71c778033f57 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_ 1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_ 2#define _ASM_GENERIC_BITOPS_FIND_H_
3 3
4#ifndef find_next_bit
4/** 5/**
5 * find_next_bit - find the next set bit in a memory region 6 * find_next_bit - find the next set bit in a memory region
6 * @addr: The address to base the search on 7 * @addr: The address to base the search on
@@ -9,7 +10,9 @@
9 */ 10 */
10extern unsigned long find_next_bit(const unsigned long *addr, unsigned long 11extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
11 size, unsigned long offset); 12 size, unsigned long offset);
13#endif
12 14
15#ifndef find_next_zero_bit
13/** 16/**
14 * find_next_zero_bit - find the next cleared bit in a memory region 17 * find_next_zero_bit - find the next cleared bit in a memory region
15 * @addr: The address to base the search on 18 * @addr: The address to base the search on
@@ -18,6 +21,7 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
18 */ 21 */
19extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned 22extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
20 long size, unsigned long offset); 23 long size, unsigned long offset);
24#endif
21 25
22#ifdef CONFIG_GENERIC_FIND_FIRST_BIT 26#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
23 27
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 946a21b1b5dc..f95c663a6a41 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -30,13 +30,20 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
30 30
31#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) 31#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
32 32
33#ifndef find_next_zero_bit_le
33extern unsigned long find_next_zero_bit_le(const void *addr, 34extern unsigned long find_next_zero_bit_le(const void *addr,
34 unsigned long size, unsigned long offset); 35 unsigned long size, unsigned long offset);
36#endif
37
38#ifndef find_next_bit_le
35extern unsigned long find_next_bit_le(const void *addr, 39extern unsigned long find_next_bit_le(const void *addr,
36 unsigned long size, unsigned long offset); 40 unsigned long size, unsigned long offset);
41#endif
37 42
43#ifndef find_first_zero_bit_le
38#define find_first_zero_bit_le(addr, size) \ 44#define find_first_zero_bit_le(addr, size) \
39 find_next_zero_bit_le((addr), (size), 0) 45 find_next_zero_bit_le((addr), (size), 0)
46#endif
40 47
41#else 48#else
42#error "Please fix <asm/byteorder.h>" 49#error "Please fix <asm/byteorder.h>"
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index e5a3f5880001..91784841e407 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -162,9 +162,46 @@ extern void warn_slowpath_null(const char *file, const int line);
162 unlikely(__ret_warn_once); \ 162 unlikely(__ret_warn_once); \
163}) 163})
164 164
165#ifdef CONFIG_PRINTK
166
165#define WARN_ON_RATELIMIT(condition, state) \ 167#define WARN_ON_RATELIMIT(condition, state) \
166 WARN_ON((condition) && __ratelimit(state)) 168 WARN_ON((condition) && __ratelimit(state))
167 169
170#define __WARN_RATELIMIT(condition, state, format...) \
171({ \
172 int rtn = 0; \
173 if (unlikely(__ratelimit(state))) \
174 rtn = WARN(condition, format); \
175 rtn; \
176})
177
178#define WARN_RATELIMIT(condition, format...) \
179({ \
180 static DEFINE_RATELIMIT_STATE(_rs, \
181 DEFAULT_RATELIMIT_INTERVAL, \
182 DEFAULT_RATELIMIT_BURST); \
183 __WARN_RATELIMIT(condition, &_rs, format); \
184})
185
186#else
187
188#define WARN_ON_RATELIMIT(condition, state) \
189 WARN_ON(condition)
190
191#define __WARN_RATELIMIT(condition, state, format...) \
192({ \
193 int rtn = WARN(condition, format); \
194 rtn; \
195})
196
197#define WARN_RATELIMIT(condition, format...) \
198({ \
199 int rtn = WARN(condition, format); \
200 rtn; \
201})
202
203#endif
204
168/* 205/*
169 * WARN_ON_SMP() is for cases that the warning is either 206 * WARN_ON_SMP() is for cases that the warning is either
170 * meaningless for !SMP or may even cause failures. 207 * meaningless for !SMP or may even cause failures.
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 57b5c3c82e86..87bc536ccde3 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -24,7 +24,10 @@
24#define flush_cache_vunmap(start, end) do { } while (0) 24#define flush_cache_vunmap(start, end) do { } while (0)
25 25
26#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 26#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
27 memcpy(dst, src, len) 27 do { \
28 memcpy(dst, src, len); \
29 flush_icache_user_range(vma, page, vaddr, len); \
30 } while (0)
28#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 31#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
29 memcpy(dst, src, len) 32 memcpy(dst, src, len)
30 33
diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h
new file mode 100644
index 000000000000..82e674f6b337
--- /dev/null
+++ b/include/asm-generic/ptrace.h
@@ -0,0 +1,74 @@
1/*
2 * Common low level (register) ptrace helpers
3 *
4 * Copyright 2004-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __ASM_GENERIC_PTRACE_H__
10#define __ASM_GENERIC_PTRACE_H__
11
12#ifndef __ASSEMBLY__
13
14/* Helpers for working with the instruction pointer */
15#ifndef GET_IP
16#define GET_IP(regs) ((regs)->pc)
17#endif
18#ifndef SET_IP
19#define SET_IP(regs, val) (GET_IP(regs) = (val))
20#endif
21
22static inline unsigned long instruction_pointer(struct pt_regs *regs)
23{
24 return GET_IP(regs);
25}
26static inline void instruction_pointer_set(struct pt_regs *regs,
27 unsigned long val)
28{
29 SET_IP(regs, val);
30}
31
32#ifndef profile_pc
33#define profile_pc(regs) instruction_pointer(regs)
34#endif
35
36/* Helpers for working with the user stack pointer */
37#ifndef GET_USP
38#define GET_USP(regs) ((regs)->usp)
39#endif
40#ifndef SET_USP
41#define SET_USP(regs, val) (GET_USP(regs) = (val))
42#endif
43
44static inline unsigned long user_stack_pointer(struct pt_regs *regs)
45{
46 return GET_USP(regs);
47}
48static inline void user_stack_pointer_set(struct pt_regs *regs,
49 unsigned long val)
50{
51 SET_USP(regs, val);
52}
53
54/* Helpers for working with the frame pointer */
55#ifndef GET_FP
56#define GET_FP(regs) ((regs)->fp)
57#endif
58#ifndef SET_FP
59#define SET_FP(regs, val) (GET_FP(regs) = (val))
60#endif
61
62static inline unsigned long frame_pointer(struct pt_regs *regs)
63{
64 return GET_FP(regs);
65}
66static inline void frame_pointer_set(struct pt_regs *regs,
67 unsigned long val)
68{
69 SET_FP(regs, val);
70}
71
72#endif /* __ASSEMBLY__ */
73
74#endif
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index 587566f95f6c..61fa862fe08d 100644
--- a/include/asm-generic/resource.h
+++ b/include/asm-generic/resource.h
@@ -78,7 +78,7 @@
78 [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ 78 [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
79 [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 79 [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
80 [RLIMIT_NPROC] = { 0, 0 }, \ 80 [RLIMIT_NPROC] = { 0, 0 }, \
81 [RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \ 81 [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \
82 [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ 82 [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
83 [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 83 [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
84 [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 84 [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e43f9766259f..e58fa777fa09 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -5,6 +5,8 @@
5 * Copyright 2001 Red Hat, Inc. 5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others. 6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 * 7 *
8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 *
8 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
@@ -17,97 +19,111 @@
17#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
18#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
19 21
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
20/* 23/*
21 * For UP we don't need to worry about TLB flush 24 * Semi RCU freeing of the page directories.
22 * and page free order so much.. 25 *
26 * This is needed by some architectures to implement software pagetable walkers.
27 *
28 * gup_fast() and other software pagetable walkers do a lockless page-table
29 * walk and therefore needs some synchronization with the freeing of the page
30 * directories. The chosen means to accomplish that is by disabling IRQs over
31 * the walk.
32 *
33 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
34 * since we unlink the page, flush TLBs, free the page. Since the disabling of
35 * IRQs delays the completion of the TLB flush we can never observe an already
36 * freed page.
37 *
38 * Architectures that do not have this (PPC) need to delay the freeing by some
39 * other means, this is that means.
40 *
41 * What we do is batch the freed directory pages (tables) and RCU free them.
42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
43 * holds off grace periods.
44 *
45 * However, in order to batch these pages we need to allocate storage, this
46 * allocation is deep inside the MM code and can thus easily fail on memory
47 * pressure. To guarantee progress we fall back to single table freeing, see
48 * the implementation of tlb_remove_table_one().
49 *
23 */ 50 */
24#ifdef CONFIG_SMP 51struct mmu_table_batch {
25 #ifdef ARCH_FREE_PTR_NR 52 struct rcu_head rcu;
26 #define FREE_PTR_NR ARCH_FREE_PTR_NR 53 unsigned int nr;
27 #else 54 void *tables[0];
28 #define FREE_PTE_NR 506 55};
29 #endif 56
30 #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 57#define MAX_TABLE_BATCH \
31#else 58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
32 #define FREE_PTE_NR 1 59
33 #define tlb_fast_mode(tlb) 1 60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
34#endif 63#endif
35 64
36/* struct mmu_gather is an opaque type used by the mm code for passing around 65/*
37 * any data needed by arch specific code for tlb_remove_page. 66 * If we can't allocate a page to make a big batch of page pointers
67 * to work on, then just handle a few from the on-stack structure.
38 */ 68 */
39struct mmu_gather { 69#define MMU_GATHER_BUNDLE 8
40 struct mm_struct *mm; 70
41 unsigned int nr; /* set to ~0U means fast mode */ 71struct mmu_gather_batch {
42 unsigned int need_flush;/* Really unmapped some ptes? */ 72 struct mmu_gather_batch *next;
43 unsigned int fullmm; /* non-zero means full mm flush */ 73 unsigned int nr;
44 struct page * pages[FREE_PTE_NR]; 74 unsigned int max;
75 struct page *pages[0];
45}; 76};
46 77
47/* Users of the generic TLB shootdown code must declare this storage space. */ 78#define MAX_GATHER_BATCH \
48DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
49 80
50/* tlb_gather_mmu 81/* struct mmu_gather is an opaque type used by the mm code for passing around
51 * Return a pointer to an initialized struct mmu_gather. 82 * any data needed by arch specific code for tlb_remove_page.
52 */ 83 */
53static inline struct mmu_gather * 84struct mmu_gather {
54tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 85 struct mm_struct *mm;
55{ 86#ifdef CONFIG_HAVE_RCU_TABLE_FREE
56 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 87 struct mmu_table_batch *batch;
57 88#endif
58 tlb->mm = mm; 89 unsigned int need_flush : 1, /* Did free PTEs */
90 fast_mode : 1; /* No batching */
59 91
60 /* Use fast mode if only one CPU is online */ 92 unsigned int fullmm;
61 tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
62 93
63 tlb->fullmm = full_mm_flush; 94 struct mmu_gather_batch *active;
95 struct mmu_gather_batch local;
96 struct page *__pages[MMU_GATHER_BUNDLE];
97};
64 98
65 return tlb; 99#define HAVE_GENERIC_MMU_GATHER
66}
67 100
68static inline void 101static inline int tlb_fast_mode(struct mmu_gather *tlb)
69tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
70{ 102{
71 if (!tlb->need_flush) 103#ifdef CONFIG_SMP
72 return; 104 return tlb->fast_mode;
73 tlb->need_flush = 0; 105#else
74 tlb_flush(tlb); 106 /*
75 if (!tlb_fast_mode(tlb)) { 107 * For UP we don't need to worry about TLB flush
76 free_pages_and_swap_cache(tlb->pages, tlb->nr); 108 * and page free order so much..
77 tlb->nr = 0; 109 */
78 } 110 return 1;
111#endif
79} 112}
80 113
81/* tlb_finish_mmu 114void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
82 * Called at the end of the shootdown operation to free up any resources 115void tlb_flush_mmu(struct mmu_gather *tlb);
83 * that were required. 116void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
84 */ 117int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
85static inline void
86tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
87{
88 tlb_flush_mmu(tlb, start, end);
89
90 /* keep the page table cache within bounds */
91 check_pgt_cache();
92
93 put_cpu_var(mmu_gathers);
94}
95 118
96/* tlb_remove_page 119/* tlb_remove_page
97 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while 120 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
98 * handling the additional races in SMP caused by other CPUs caching valid 121 * required.
99 * mappings in their TLBs.
100 */ 122 */
101static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 123static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
102{ 124{
103 tlb->need_flush = 1; 125 if (!__tlb_remove_page(tlb, page))
104 if (tlb_fast_mode(tlb)) { 126 tlb_flush_mmu(tlb);
105 free_page_and_swap_cache(page);
106 return;
107 }
108 tlb->pages[tlb->nr++] = page;
109 if (tlb->nr >= FREE_PTE_NR)
110 tlb_flush_mmu(tlb, 0, 0);
111} 127}
112 128
113/** 129/**
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 07c40d5149de..33d524704883 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -24,16 +24,24 @@
24#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) 24#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
25#endif 25#endif
26 26
27#ifdef __SYSCALL_COMPAT
28#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _comp)
29#define __SC_COMP_3264(_nr, _32, _64, _comp) __SYSCALL(_nr, _comp)
30#else
31#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _sys)
32#define __SC_COMP_3264(_nr, _32, _64, _comp) __SC_3264(_nr, _32, _64)
33#endif
34
27#define __NR_io_setup 0 35#define __NR_io_setup 0
28__SYSCALL(__NR_io_setup, sys_io_setup) 36__SC_COMP(__NR_io_setup, sys_io_setup, compat_sys_io_setup)
29#define __NR_io_destroy 1 37#define __NR_io_destroy 1
30__SYSCALL(__NR_io_destroy, sys_io_destroy) 38__SYSCALL(__NR_io_destroy, sys_io_destroy)
31#define __NR_io_submit 2 39#define __NR_io_submit 2
32__SYSCALL(__NR_io_submit, sys_io_submit) 40__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
33#define __NR_io_cancel 3 41#define __NR_io_cancel 3
34__SYSCALL(__NR_io_cancel, sys_io_cancel) 42__SYSCALL(__NR_io_cancel, sys_io_cancel)
35#define __NR_io_getevents 4 43#define __NR_io_getevents 4
36__SYSCALL(__NR_io_getevents, sys_io_getevents) 44__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
37 45
38/* fs/xattr.c */ 46/* fs/xattr.c */
39#define __NR_setxattr 5 47#define __NR_setxattr 5
@@ -67,7 +75,7 @@ __SYSCALL(__NR_getcwd, sys_getcwd)
67 75
68/* fs/cookies.c */ 76/* fs/cookies.c */
69#define __NR_lookup_dcookie 18 77#define __NR_lookup_dcookie 18
70__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie) 78__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
71 79
72/* fs/eventfd.c */ 80/* fs/eventfd.c */
73#define __NR_eventfd2 19 81#define __NR_eventfd2 19
@@ -79,7 +87,7 @@ __SYSCALL(__NR_epoll_create1, sys_epoll_create1)
79#define __NR_epoll_ctl 21 87#define __NR_epoll_ctl 21
80__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) 88__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
81#define __NR_epoll_pwait 22 89#define __NR_epoll_pwait 22
82__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) 90__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
83 91
84/* fs/fcntl.c */ 92/* fs/fcntl.c */
85#define __NR_dup 23 93#define __NR_dup 23
@@ -87,7 +95,7 @@ __SYSCALL(__NR_dup, sys_dup)
87#define __NR_dup3 24 95#define __NR_dup3 24
88__SYSCALL(__NR_dup3, sys_dup3) 96__SYSCALL(__NR_dup3, sys_dup3)
89#define __NR3264_fcntl 25 97#define __NR3264_fcntl 25
90__SC_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl) 98__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
91 99
92/* fs/inotify_user.c */ 100/* fs/inotify_user.c */
93#define __NR_inotify_init1 26 101#define __NR_inotify_init1 26
@@ -99,7 +107,7 @@ __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
99 107
100/* fs/ioctl.c */ 108/* fs/ioctl.c */
101#define __NR_ioctl 29 109#define __NR_ioctl 29
102__SYSCALL(__NR_ioctl, sys_ioctl) 110__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
103 111
104/* fs/ioprio.c */ 112/* fs/ioprio.c */
105#define __NR_ioprio_set 30 113#define __NR_ioprio_set 30
@@ -129,26 +137,30 @@ __SYSCALL(__NR_renameat, sys_renameat)
129#define __NR_umount2 39 137#define __NR_umount2 39
130__SYSCALL(__NR_umount2, sys_umount) 138__SYSCALL(__NR_umount2, sys_umount)
131#define __NR_mount 40 139#define __NR_mount 40
132__SYSCALL(__NR_mount, sys_mount) 140__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
133#define __NR_pivot_root 41 141#define __NR_pivot_root 41
134__SYSCALL(__NR_pivot_root, sys_pivot_root) 142__SYSCALL(__NR_pivot_root, sys_pivot_root)
135 143
136/* fs/nfsctl.c */ 144/* fs/nfsctl.c */
137#define __NR_nfsservctl 42 145#define __NR_nfsservctl 42
138__SYSCALL(__NR_nfsservctl, sys_nfsservctl) 146__SC_COMP(__NR_nfsservctl, sys_nfsservctl, compat_sys_nfsservctl)
139 147
140/* fs/open.c */ 148/* fs/open.c */
141#define __NR3264_statfs 43 149#define __NR3264_statfs 43
142__SC_3264(__NR3264_statfs, sys_statfs64, sys_statfs) 150__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
151 compat_sys_statfs64)
143#define __NR3264_fstatfs 44 152#define __NR3264_fstatfs 44
144__SC_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs) 153__SC_COMP_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs, \
154 compat_sys_fstatfs64)
145#define __NR3264_truncate 45 155#define __NR3264_truncate 45
146__SC_3264(__NR3264_truncate, sys_truncate64, sys_truncate) 156__SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
157 compat_sys_truncate64)
147#define __NR3264_ftruncate 46 158#define __NR3264_ftruncate 46
148__SC_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate) 159__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
160 compat_sys_ftruncate64)
149 161
150#define __NR_fallocate 47 162#define __NR_fallocate 47
151__SYSCALL(__NR_fallocate, sys_fallocate) 163__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
152#define __NR_faccessat 48 164#define __NR_faccessat 48
153__SYSCALL(__NR_faccessat, sys_faccessat) 165__SYSCALL(__NR_faccessat, sys_faccessat)
154#define __NR_chdir 49 166#define __NR_chdir 49
@@ -166,7 +178,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
166#define __NR_fchown 55 178#define __NR_fchown 55
167__SYSCALL(__NR_fchown, sys_fchown) 179__SYSCALL(__NR_fchown, sys_fchown)
168#define __NR_openat 56 180#define __NR_openat 56
169__SYSCALL(__NR_openat, sys_openat) 181__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
170#define __NR_close 57 182#define __NR_close 57
171__SYSCALL(__NR_close, sys_close) 183__SYSCALL(__NR_close, sys_close)
172#define __NR_vhangup 58 184#define __NR_vhangup 58
@@ -182,7 +194,7 @@ __SYSCALL(__NR_quotactl, sys_quotactl)
182 194
183/* fs/readdir.c */ 195/* fs/readdir.c */
184#define __NR_getdents64 61 196#define __NR_getdents64 61
185__SYSCALL(__NR_getdents64, sys_getdents64) 197__SC_COMP(__NR_getdents64, sys_getdents64, compat_sys_getdents64)
186 198
187/* fs/read_write.c */ 199/* fs/read_write.c */
188#define __NR3264_lseek 62 200#define __NR3264_lseek 62
@@ -192,17 +204,17 @@ __SYSCALL(__NR_read, sys_read)
192#define __NR_write 64 204#define __NR_write 64
193__SYSCALL(__NR_write, sys_write) 205__SYSCALL(__NR_write, sys_write)
194#define __NR_readv 65 206#define __NR_readv 65
195__SYSCALL(__NR_readv, sys_readv) 207__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
196#define __NR_writev 66 208#define __NR_writev 66
197__SYSCALL(__NR_writev, sys_writev) 209__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
198#define __NR_pread64 67 210#define __NR_pread64 67
199__SYSCALL(__NR_pread64, sys_pread64) 211__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
200#define __NR_pwrite64 68 212#define __NR_pwrite64 68
201__SYSCALL(__NR_pwrite64, sys_pwrite64) 213__SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
202#define __NR_preadv 69 214#define __NR_preadv 69
203__SYSCALL(__NR_preadv, sys_preadv) 215__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
204#define __NR_pwritev 70 216#define __NR_pwritev 70
205__SYSCALL(__NR_pwritev, sys_pwritev) 217__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
206 218
207/* fs/sendfile.c */ 219/* fs/sendfile.c */
208#define __NR3264_sendfile 71 220#define __NR3264_sendfile 71
@@ -210,17 +222,17 @@ __SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
210 222
211/* fs/select.c */ 223/* fs/select.c */
212#define __NR_pselect6 72 224#define __NR_pselect6 72
213__SYSCALL(__NR_pselect6, sys_pselect6) 225__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
214#define __NR_ppoll 73 226#define __NR_ppoll 73
215__SYSCALL(__NR_ppoll, sys_ppoll) 227__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
216 228
217/* fs/signalfd.c */ 229/* fs/signalfd.c */
218#define __NR_signalfd4 74 230#define __NR_signalfd4 74
219__SYSCALL(__NR_signalfd4, sys_signalfd4) 231__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
220 232
221/* fs/splice.c */ 233/* fs/splice.c */
222#define __NR_vmsplice 75 234#define __NR_vmsplice 75
223__SYSCALL(__NR_vmsplice, sys_vmsplice) 235__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
224#define __NR_splice 76 236#define __NR_splice 76
225__SYSCALL(__NR_splice, sys_splice) 237__SYSCALL(__NR_splice, sys_splice)
226#define __NR_tee 77 238#define __NR_tee 77
@@ -243,23 +255,27 @@ __SYSCALL(__NR_fsync, sys_fsync)
243__SYSCALL(__NR_fdatasync, sys_fdatasync) 255__SYSCALL(__NR_fdatasync, sys_fdatasync)
244#ifdef __ARCH_WANT_SYNC_FILE_RANGE2 256#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
245#define __NR_sync_file_range2 84 257#define __NR_sync_file_range2 84
246__SYSCALL(__NR_sync_file_range2, sys_sync_file_range2) 258__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
259 compat_sys_sync_file_range2)
247#else 260#else
248#define __NR_sync_file_range 84 261#define __NR_sync_file_range 84
249__SYSCALL(__NR_sync_file_range, sys_sync_file_range) 262__SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
263 compat_sys_sync_file_range)
250#endif 264#endif
251 265
252/* fs/timerfd.c */ 266/* fs/timerfd.c */
253#define __NR_timerfd_create 85 267#define __NR_timerfd_create 85
254__SYSCALL(__NR_timerfd_create, sys_timerfd_create) 268__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
255#define __NR_timerfd_settime 86 269#define __NR_timerfd_settime 86
256__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 270__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
271 compat_sys_timerfd_settime)
257#define __NR_timerfd_gettime 87 272#define __NR_timerfd_gettime 87
258__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 273__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
274 compat_sys_timerfd_gettime)
259 275
260/* fs/utimes.c */ 276/* fs/utimes.c */
261#define __NR_utimensat 88 277#define __NR_utimensat 88
262__SYSCALL(__NR_utimensat, sys_utimensat) 278__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
263 279
264/* kernel/acct.c */ 280/* kernel/acct.c */
265#define __NR_acct 89 281#define __NR_acct 89
@@ -281,7 +297,7 @@ __SYSCALL(__NR_exit, sys_exit)
281#define __NR_exit_group 94 297#define __NR_exit_group 94
282__SYSCALL(__NR_exit_group, sys_exit_group) 298__SYSCALL(__NR_exit_group, sys_exit_group)
283#define __NR_waitid 95 299#define __NR_waitid 95
284__SYSCALL(__NR_waitid, sys_waitid) 300__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
285 301
286/* kernel/fork.c */ 302/* kernel/fork.c */
287#define __NR_set_tid_address 96 303#define __NR_set_tid_address 96
@@ -291,25 +307,27 @@ __SYSCALL(__NR_unshare, sys_unshare)
291 307
292/* kernel/futex.c */ 308/* kernel/futex.c */
293#define __NR_futex 98 309#define __NR_futex 98
294__SYSCALL(__NR_futex, sys_futex) 310__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
295#define __NR_set_robust_list 99 311#define __NR_set_robust_list 99
296__SYSCALL(__NR_set_robust_list, sys_set_robust_list) 312__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
313 compat_sys_set_robust_list)
297#define __NR_get_robust_list 100 314#define __NR_get_robust_list 100
298__SYSCALL(__NR_get_robust_list, sys_get_robust_list) 315__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
316 compat_sys_get_robust_list)
299 317
300/* kernel/hrtimer.c */ 318/* kernel/hrtimer.c */
301#define __NR_nanosleep 101 319#define __NR_nanosleep 101
302__SYSCALL(__NR_nanosleep, sys_nanosleep) 320__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
303 321
304/* kernel/itimer.c */ 322/* kernel/itimer.c */
305#define __NR_getitimer 102 323#define __NR_getitimer 102
306__SYSCALL(__NR_getitimer, sys_getitimer) 324__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
307#define __NR_setitimer 103 325#define __NR_setitimer 103
308__SYSCALL(__NR_setitimer, sys_setitimer) 326__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
309 327
310/* kernel/kexec.c */ 328/* kernel/kexec.c */
311#define __NR_kexec_load 104 329#define __NR_kexec_load 104
312__SYSCALL(__NR_kexec_load, sys_kexec_load) 330__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
313 331
314/* kernel/module.c */ 332/* kernel/module.c */
315#define __NR_init_module 105 333#define __NR_init_module 105
@@ -319,23 +337,24 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
319 337
320/* kernel/posix-timers.c */ 338/* kernel/posix-timers.c */
321#define __NR_timer_create 107 339#define __NR_timer_create 107
322__SYSCALL(__NR_timer_create, sys_timer_create) 340__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
323#define __NR_timer_gettime 108 341#define __NR_timer_gettime 108
324__SYSCALL(__NR_timer_gettime, sys_timer_gettime) 342__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
325#define __NR_timer_getoverrun 109 343#define __NR_timer_getoverrun 109
326__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) 344__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
327#define __NR_timer_settime 110 345#define __NR_timer_settime 110
328__SYSCALL(__NR_timer_settime, sys_timer_settime) 346__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
329#define __NR_timer_delete 111 347#define __NR_timer_delete 111
330__SYSCALL(__NR_timer_delete, sys_timer_delete) 348__SYSCALL(__NR_timer_delete, sys_timer_delete)
331#define __NR_clock_settime 112 349#define __NR_clock_settime 112
332__SYSCALL(__NR_clock_settime, sys_clock_settime) 350__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
333#define __NR_clock_gettime 113 351#define __NR_clock_gettime 113
334__SYSCALL(__NR_clock_gettime, sys_clock_gettime) 352__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
335#define __NR_clock_getres 114 353#define __NR_clock_getres 114
336__SYSCALL(__NR_clock_getres, sys_clock_getres) 354__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
337#define __NR_clock_nanosleep 115 355#define __NR_clock_nanosleep 115
338__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep) 356__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
357 compat_sys_clock_nanosleep)
339 358
340/* kernel/printk.c */ 359/* kernel/printk.c */
341#define __NR_syslog 116 360#define __NR_syslog 116
@@ -355,9 +374,11 @@ __SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
355#define __NR_sched_getparam 121 374#define __NR_sched_getparam 121
356__SYSCALL(__NR_sched_getparam, sys_sched_getparam) 375__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
357#define __NR_sched_setaffinity 122 376#define __NR_sched_setaffinity 122
358__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity) 377__SC_COMP(__NR_sched_setaffinity, sys_sched_setaffinity, \
378 compat_sys_sched_setaffinity)
359#define __NR_sched_getaffinity 123 379#define __NR_sched_getaffinity 123
360__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity) 380__SC_COMP(__NR_sched_getaffinity, sys_sched_getaffinity, \
381 compat_sys_sched_getaffinity)
361#define __NR_sched_yield 124 382#define __NR_sched_yield 124
362__SYSCALL(__NR_sched_yield, sys_sched_yield) 383__SYSCALL(__NR_sched_yield, sys_sched_yield)
363#define __NR_sched_get_priority_max 125 384#define __NR_sched_get_priority_max 125
@@ -365,7 +386,8 @@ __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
365#define __NR_sched_get_priority_min 126 386#define __NR_sched_get_priority_min 126
366__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) 387__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
367#define __NR_sched_rr_get_interval 127 388#define __NR_sched_rr_get_interval 127
368__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval) 389__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
390 compat_sys_sched_rr_get_interval)
369 391
370/* kernel/signal.c */ 392/* kernel/signal.c */
371#define __NR_restart_syscall 128 393#define __NR_restart_syscall 128
@@ -377,21 +399,23 @@ __SYSCALL(__NR_tkill, sys_tkill)
377#define __NR_tgkill 131 399#define __NR_tgkill 131
378__SYSCALL(__NR_tgkill, sys_tgkill) 400__SYSCALL(__NR_tgkill, sys_tgkill)
379#define __NR_sigaltstack 132 401#define __NR_sigaltstack 132
380__SYSCALL(__NR_sigaltstack, sys_sigaltstack) 402__SC_COMP(__NR_sigaltstack, sys_sigaltstack, compat_sys_sigaltstack)
381#define __NR_rt_sigsuspend 133 403#define __NR_rt_sigsuspend 133
382__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 404__SC_COMP(__NR_rt_sigsuspend, sys_rt_sigsuspend, compat_sys_rt_sigsuspend)
383#define __NR_rt_sigaction 134 405#define __NR_rt_sigaction 134
384__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) /* __ARCH_WANT_SYS_RT_SIGACTION */ 406__SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
385#define __NR_rt_sigprocmask 135 407#define __NR_rt_sigprocmask 135
386__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask) 408__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
387#define __NR_rt_sigpending 136 409#define __NR_rt_sigpending 136
388__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending) 410__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
389#define __NR_rt_sigtimedwait 137 411#define __NR_rt_sigtimedwait 137
390__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait) 412__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
413 compat_sys_rt_sigtimedwait)
391#define __NR_rt_sigqueueinfo 138 414#define __NR_rt_sigqueueinfo 138
392__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) 415__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
416 compat_sys_rt_sigqueueinfo)
393#define __NR_rt_sigreturn 139 417#define __NR_rt_sigreturn 139
394__SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn) /* sys_rt_sigreturn_wrapper, */ 418__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
395 419
396/* kernel/sys.c */ 420/* kernel/sys.c */
397#define __NR_setpriority 140 421#define __NR_setpriority 140
@@ -421,7 +445,7 @@ __SYSCALL(__NR_setfsuid, sys_setfsuid)
421#define __NR_setfsgid 152 445#define __NR_setfsgid 152
422__SYSCALL(__NR_setfsgid, sys_setfsgid) 446__SYSCALL(__NR_setfsgid, sys_setfsgid)
423#define __NR_times 153 447#define __NR_times 153
424__SYSCALL(__NR_times, sys_times) 448__SC_COMP(__NR_times, sys_times, compat_sys_times)
425#define __NR_setpgid 154 449#define __NR_setpgid 154
426__SYSCALL(__NR_setpgid, sys_setpgid) 450__SYSCALL(__NR_setpgid, sys_setpgid)
427#define __NR_getpgid 155 451#define __NR_getpgid 155
@@ -441,11 +465,11 @@ __SYSCALL(__NR_sethostname, sys_sethostname)
441#define __NR_setdomainname 162 465#define __NR_setdomainname 162
442__SYSCALL(__NR_setdomainname, sys_setdomainname) 466__SYSCALL(__NR_setdomainname, sys_setdomainname)
443#define __NR_getrlimit 163 467#define __NR_getrlimit 163
444__SYSCALL(__NR_getrlimit, sys_getrlimit) 468__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
445#define __NR_setrlimit 164 469#define __NR_setrlimit 164
446__SYSCALL(__NR_setrlimit, sys_setrlimit) 470__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
447#define __NR_getrusage 165 471#define __NR_getrusage 165
448__SYSCALL(__NR_getrusage, sys_getrusage) 472__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
449#define __NR_umask 166 473#define __NR_umask 166
450__SYSCALL(__NR_umask, sys_umask) 474__SYSCALL(__NR_umask, sys_umask)
451#define __NR_prctl 167 475#define __NR_prctl 167
@@ -455,11 +479,11 @@ __SYSCALL(__NR_getcpu, sys_getcpu)
455 479
456/* kernel/time.c */ 480/* kernel/time.c */
457#define __NR_gettimeofday 169 481#define __NR_gettimeofday 169
458__SYSCALL(__NR_gettimeofday, sys_gettimeofday) 482__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
459#define __NR_settimeofday 170 483#define __NR_settimeofday 170
460__SYSCALL(__NR_settimeofday, sys_settimeofday) 484__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
461#define __NR_adjtimex 171 485#define __NR_adjtimex 171
462__SYSCALL(__NR_adjtimex, sys_adjtimex) 486__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
463 487
464/* kernel/timer.c */ 488/* kernel/timer.c */
465#define __NR_getpid 172 489#define __NR_getpid 172
@@ -477,39 +501,40 @@ __SYSCALL(__NR_getegid, sys_getegid)
477#define __NR_gettid 178 501#define __NR_gettid 178
478__SYSCALL(__NR_gettid, sys_gettid) 502__SYSCALL(__NR_gettid, sys_gettid)
479#define __NR_sysinfo 179 503#define __NR_sysinfo 179
480__SYSCALL(__NR_sysinfo, sys_sysinfo) 504__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
481 505
482/* ipc/mqueue.c */ 506/* ipc/mqueue.c */
483#define __NR_mq_open 180 507#define __NR_mq_open 180
484__SYSCALL(__NR_mq_open, sys_mq_open) 508__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
485#define __NR_mq_unlink 181 509#define __NR_mq_unlink 181
486__SYSCALL(__NR_mq_unlink, sys_mq_unlink) 510__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
487#define __NR_mq_timedsend 182 511#define __NR_mq_timedsend 182
488__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend) 512__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
489#define __NR_mq_timedreceive 183 513#define __NR_mq_timedreceive 183
490__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive) 514__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
515 compat_sys_mq_timedreceive)
491#define __NR_mq_notify 184 516#define __NR_mq_notify 184
492__SYSCALL(__NR_mq_notify, sys_mq_notify) 517__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
493#define __NR_mq_getsetattr 185 518#define __NR_mq_getsetattr 185
494__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr) 519__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
495 520
496/* ipc/msg.c */ 521/* ipc/msg.c */
497#define __NR_msgget 186 522#define __NR_msgget 186
498__SYSCALL(__NR_msgget, sys_msgget) 523__SYSCALL(__NR_msgget, sys_msgget)
499#define __NR_msgctl 187 524#define __NR_msgctl 187
500__SYSCALL(__NR_msgctl, sys_msgctl) 525__SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
501#define __NR_msgrcv 188 526#define __NR_msgrcv 188
502__SYSCALL(__NR_msgrcv, sys_msgrcv) 527__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
503#define __NR_msgsnd 189 528#define __NR_msgsnd 189
504__SYSCALL(__NR_msgsnd, sys_msgsnd) 529__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
505 530
506/* ipc/sem.c */ 531/* ipc/sem.c */
507#define __NR_semget 190 532#define __NR_semget 190
508__SYSCALL(__NR_semget, sys_semget) 533__SYSCALL(__NR_semget, sys_semget)
509#define __NR_semctl 191 534#define __NR_semctl 191
510__SYSCALL(__NR_semctl, sys_semctl) 535__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
511#define __NR_semtimedop 192 536#define __NR_semtimedop 192
512__SYSCALL(__NR_semtimedop, sys_semtimedop) 537__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
513#define __NR_semop 193 538#define __NR_semop 193
514__SYSCALL(__NR_semop, sys_semop) 539__SYSCALL(__NR_semop, sys_semop)
515 540
@@ -517,9 +542,9 @@ __SYSCALL(__NR_semop, sys_semop)
517#define __NR_shmget 194 542#define __NR_shmget 194
518__SYSCALL(__NR_shmget, sys_shmget) 543__SYSCALL(__NR_shmget, sys_shmget)
519#define __NR_shmctl 195 544#define __NR_shmctl 195
520__SYSCALL(__NR_shmctl, sys_shmctl) 545__SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
521#define __NR_shmat 196 546#define __NR_shmat 196
522__SYSCALL(__NR_shmat, sys_shmat) 547__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
523#define __NR_shmdt 197 548#define __NR_shmdt 197
524__SYSCALL(__NR_shmdt, sys_shmdt) 549__SYSCALL(__NR_shmdt, sys_shmdt)
525 550
@@ -543,21 +568,21 @@ __SYSCALL(__NR_getpeername, sys_getpeername)
543#define __NR_sendto 206 568#define __NR_sendto 206
544__SYSCALL(__NR_sendto, sys_sendto) 569__SYSCALL(__NR_sendto, sys_sendto)
545#define __NR_recvfrom 207 570#define __NR_recvfrom 207
546__SYSCALL(__NR_recvfrom, sys_recvfrom) 571__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
547#define __NR_setsockopt 208 572#define __NR_setsockopt 208
548__SYSCALL(__NR_setsockopt, sys_setsockopt) 573__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
549#define __NR_getsockopt 209 574#define __NR_getsockopt 209
550__SYSCALL(__NR_getsockopt, sys_getsockopt) 575__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
551#define __NR_shutdown 210 576#define __NR_shutdown 210
552__SYSCALL(__NR_shutdown, sys_shutdown) 577__SYSCALL(__NR_shutdown, sys_shutdown)
553#define __NR_sendmsg 211 578#define __NR_sendmsg 211
554__SYSCALL(__NR_sendmsg, sys_sendmsg) 579__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
555#define __NR_recvmsg 212 580#define __NR_recvmsg 212
556__SYSCALL(__NR_recvmsg, sys_recvmsg) 581__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
557 582
558/* mm/filemap.c */ 583/* mm/filemap.c */
559#define __NR_readahead 213 584#define __NR_readahead 213
560__SYSCALL(__NR_readahead, sys_readahead) 585__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
561 586
562/* mm/nommu.c, also with MMU */ 587/* mm/nommu.c, also with MMU */
563#define __NR_brk 214 588#define __NR_brk 214
@@ -573,19 +598,19 @@ __SYSCALL(__NR_add_key, sys_add_key)
573#define __NR_request_key 218 598#define __NR_request_key 218
574__SYSCALL(__NR_request_key, sys_request_key) 599__SYSCALL(__NR_request_key, sys_request_key)
575#define __NR_keyctl 219 600#define __NR_keyctl 219
576__SYSCALL(__NR_keyctl, sys_keyctl) 601__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
577 602
578/* arch/example/kernel/sys_example.c */ 603/* arch/example/kernel/sys_example.c */
579#define __NR_clone 220 604#define __NR_clone 220
580__SYSCALL(__NR_clone, sys_clone) /* .long sys_clone_wrapper */ 605__SYSCALL(__NR_clone, sys_clone)
581#define __NR_execve 221 606#define __NR_execve 221
582__SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ 607__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
583 608
584#define __NR3264_mmap 222 609#define __NR3264_mmap 222
585__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) 610__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
586/* mm/fadvise.c */ 611/* mm/fadvise.c */
587#define __NR3264_fadvise64 223 612#define __NR3264_fadvise64 223
588__SYSCALL(__NR3264_fadvise64, sys_fadvise64_64) 613__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
589 614
590/* mm/, CONFIG_MMU only */ 615/* mm/, CONFIG_MMU only */
591#ifndef __ARCH_NOMMU 616#ifndef __ARCH_NOMMU
@@ -612,25 +637,26 @@ __SYSCALL(__NR_madvise, sys_madvise)
612#define __NR_remap_file_pages 234 637#define __NR_remap_file_pages 234
613__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) 638__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
614#define __NR_mbind 235 639#define __NR_mbind 235
615__SYSCALL(__NR_mbind, sys_mbind) 640__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
616#define __NR_get_mempolicy 236 641#define __NR_get_mempolicy 236
617__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) 642__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
618#define __NR_set_mempolicy 237 643#define __NR_set_mempolicy 237
619__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) 644__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
620#define __NR_migrate_pages 238 645#define __NR_migrate_pages 238
621__SYSCALL(__NR_migrate_pages, sys_migrate_pages) 646__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
622#define __NR_move_pages 239 647#define __NR_move_pages 239
623__SYSCALL(__NR_move_pages, sys_move_pages) 648__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
624#endif 649#endif
625 650
626#define __NR_rt_tgsigqueueinfo 240 651#define __NR_rt_tgsigqueueinfo 240
627__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 652__SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
653 compat_sys_rt_tgsigqueueinfo)
628#define __NR_perf_event_open 241 654#define __NR_perf_event_open 241
629__SYSCALL(__NR_perf_event_open, sys_perf_event_open) 655__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
630#define __NR_accept4 242 656#define __NR_accept4 242
631__SYSCALL(__NR_accept4, sys_accept4) 657__SYSCALL(__NR_accept4, sys_accept4)
632#define __NR_recvmmsg 243 658#define __NR_recvmmsg 243
633__SYSCALL(__NR_recvmmsg, sys_recvmmsg) 659__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
634 660
635/* 661/*
636 * Architectures may provide up to 16 syscalls of their own 662 * Architectures may provide up to 16 syscalls of their own
@@ -639,19 +665,20 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg)
639#define __NR_arch_specific_syscall 244 665#define __NR_arch_specific_syscall 244
640 666
641#define __NR_wait4 260 667#define __NR_wait4 260
642__SYSCALL(__NR_wait4, sys_wait4) 668__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
643#define __NR_prlimit64 261 669#define __NR_prlimit64 261
644__SYSCALL(__NR_prlimit64, sys_prlimit64) 670__SYSCALL(__NR_prlimit64, sys_prlimit64)
645#define __NR_fanotify_init 262 671#define __NR_fanotify_init 262
646__SYSCALL(__NR_fanotify_init, sys_fanotify_init) 672__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
647#define __NR_fanotify_mark 263 673#define __NR_fanotify_mark 263
648__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) 674__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
649#define __NR_name_to_handle_at 264 675#define __NR_name_to_handle_at 264
650__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) 676__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
651#define __NR_open_by_handle_at 265 677#define __NR_open_by_handle_at 265
652__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) 678__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
679 compat_sys_open_by_handle_at)
653#define __NR_clock_adjtime 266 680#define __NR_clock_adjtime 266
654__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime) 681__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
655#define __NR_syncfs 267 682#define __NR_syncfs 267
656__SYSCALL(__NR_syncfs, sys_syncfs) 683__SYSCALL(__NR_syncfs, sys_syncfs)
657 684
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index cb1ded2bd545..01f636275057 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -4,6 +4,7 @@ header-y += caif/
4header-y += dvb/ 4header-y += dvb/
5header-y += hdlc/ 5header-y += hdlc/
6header-y += isdn/ 6header-y += isdn/
7header-y += mmc/
7header-y += nfsd/ 8header-y += nfsd/
8header-y += raid/ 9header-y += raid/
9header-y += spi/ 10header-y += spi/
@@ -302,6 +303,7 @@ header-y += ppp-comp.h
302header-y += ppp_defs.h 303header-y += ppp_defs.h
303header-y += pps.h 304header-y += pps.h
304header-y += prctl.h 305header-y += prctl.h
306header-y += ptp_clock.h
305header-y += ptrace.h 307header-y += ptrace.h
306header-y += qnx4_fs.h 308header-y += qnx4_fs.h
307header-y += qnxtypes.h 309header-y += qnxtypes.h
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 198087a16fc4..1ae12710d732 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -13,8 +13,64 @@
13#ifndef __BASIC_MMIO_GPIO_H 13#ifndef __BASIC_MMIO_GPIO_H
14#define __BASIC_MMIO_GPIO_H 14#define __BASIC_MMIO_GPIO_H
15 15
16#include <linux/gpio.h>
17#include <linux/types.h>
18#include <linux/compiler.h>
19
16struct bgpio_pdata { 20struct bgpio_pdata {
17 int base; 21 int base;
22 int ngpio;
18}; 23};
19 24
25struct device;
26
27struct bgpio_chip {
28 struct gpio_chip gc;
29
30 unsigned long (*read_reg)(void __iomem *reg);
31 void (*write_reg)(void __iomem *reg, unsigned long data);
32
33 void __iomem *reg_dat;
34 void __iomem *reg_set;
35 void __iomem *reg_clr;
36 void __iomem *reg_dir;
37
38 /* Number of bits (GPIOs): <register width> * 8. */
39 int bits;
40
41 /*
42 * Some GPIO controllers work with the big-endian bits notation,
43 * e.g. in a 8-bits register, GPIO7 is the least significant bit.
44 */
45 unsigned long (*pin2mask)(struct bgpio_chip *bgc, unsigned int pin);
46
47 /*
48 * Used to lock bgpio_chip->data. Also, this is needed to keep
49 * shadowed and real data registers writes together.
50 */
51 spinlock_t lock;
52
53 /* Shadowed data register to clear/set bits safely. */
54 unsigned long data;
55
56 /* Shadowed direction registers to clear/set direction safely. */
57 unsigned long dir;
58};
59
60static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
61{
62 return container_of(gc, struct bgpio_chip, gc);
63}
64
65int __devexit bgpio_remove(struct bgpio_chip *bgc);
66int __devinit bgpio_init(struct bgpio_chip *bgc,
67 struct device *dev,
68 unsigned long sz,
69 void __iomem *dat,
70 void __iomem *set,
71 void __iomem *clr,
72 void __iomem *dirout,
73 void __iomem *dirin,
74 bool big_endian);
75
20#endif /* __BASIC_MMIO_GPIO_H */ 76#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index daf8c480c786..dcafe0bf0005 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -55,7 +55,8 @@
55 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf 55 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
56 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf 56 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
57 * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf 57 * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
58 * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list 58 * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
59 * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
59 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region 60 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
60 * bitmap_release_region(bitmap, pos, order) Free specified bit region 61 * bitmap_release_region(bitmap, pos, order) Free specified bit region
61 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region 62 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
@@ -129,6 +130,8 @@ extern int bitmap_scnlistprintf(char *buf, unsigned int len,
129 const unsigned long *src, int nbits); 130 const unsigned long *src, int nbits);
130extern int bitmap_parselist(const char *buf, unsigned long *maskp, 131extern int bitmap_parselist(const char *buf, unsigned long *maskp,
131 int nmaskbits); 132 int nmaskbits);
133extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
134 unsigned long *dst, int nbits);
132extern void bitmap_remap(unsigned long *dst, const unsigned long *src, 135extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
133 const unsigned long *old, const unsigned long *new, int bits); 136 const unsigned long *old, const unsigned long *new, int bits);
134extern int bitmap_bitremap(int oldbit, 137extern int bitmap_bitremap(int oldbit,
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 2184c6b97aeb..a3ef66a2a083 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -148,7 +148,7 @@ static inline unsigned long __ffs64(u64 word)
148 148
149#ifdef __KERNEL__ 149#ifdef __KERNEL__
150 150
151#ifdef CONFIG_GENERIC_FIND_LAST_BIT 151#ifndef find_last_bit
152/** 152/**
153 * find_last_bit - find the last set bit in a memory region 153 * find_last_bit - find the last set bit in a memory region
154 * @addr: The address to start the search at 154 * @addr: The address to start the search at
@@ -158,7 +158,7 @@ static inline unsigned long __ffs64(u64 word)
158 */ 158 */
159extern unsigned long find_last_bit(const unsigned long *addr, 159extern unsigned long find_last_bit(const unsigned long *addr,
160 unsigned long size); 160 unsigned long size);
161#endif /* CONFIG_GENERIC_FIND_LAST_BIT */ 161#endif
162 162
163#endif /* __KERNEL__ */ 163#endif /* __KERNEL__ */
164#endif 164#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index be50d9e70a7d..2a7cea53ca0d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -151,7 +151,6 @@ enum rq_flag_bits {
151 __REQ_IO_STAT, /* account I/O stat */ 151 __REQ_IO_STAT, /* account I/O stat */
152 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 152 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
153 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ 153 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
154 __REQ_ON_PLUG, /* on plug list */
155 __REQ_NR_BITS, /* stops here */ 154 __REQ_NR_BITS, /* stops here */
156}; 155};
157 156
@@ -192,6 +191,5 @@ enum rq_flag_bits {
192#define REQ_IO_STAT (1 << __REQ_IO_STAT) 191#define REQ_IO_STAT (1 << __REQ_IO_STAT)
193#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 192#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
194#define REQ_SECURE (1 << __REQ_SECURE) 193#define REQ_SECURE (1 << __REQ_SECURE)
195#define REQ_ON_PLUG (1 << __REQ_ON_PLUG)
196 194
197#endif /* __LINUX_BLK_TYPES_H */ 195#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2ad95fa1d130..ae9091a68480 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -257,7 +257,7 @@ struct queue_limits {
257 unsigned char misaligned; 257 unsigned char misaligned;
258 unsigned char discard_misaligned; 258 unsigned char discard_misaligned;
259 unsigned char cluster; 259 unsigned char cluster;
260 signed char discard_zeroes_data; 260 unsigned char discard_zeroes_data;
261}; 261};
262 262
263struct request_queue 263struct request_queue
@@ -364,6 +364,8 @@ struct request_queue
364 * for flush operations 364 * for flush operations
365 */ 365 */
366 unsigned int flush_flags; 366 unsigned int flush_flags;
367 unsigned int flush_not_queueable:1;
368 unsigned int flush_queue_delayed:1;
367 unsigned int flush_pending_idx:1; 369 unsigned int flush_pending_idx:1;
368 unsigned int flush_running_idx:1; 370 unsigned int flush_running_idx:1;
369 unsigned long flush_pending_since; 371 unsigned long flush_pending_since;
@@ -843,6 +845,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
843extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 845extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
844extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 846extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
845extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 847extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
848extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
846extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 849extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
847 850
848extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 851extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -1066,13 +1069,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
1066{ 1069{
1067 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1070 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1068 1071
1072 if (!lim->max_discard_sectors)
1073 return 0;
1074
1069 return (lim->discard_granularity + lim->discard_alignment - alignment) 1075 return (lim->discard_granularity + lim->discard_alignment - alignment)
1070 & (lim->discard_granularity - 1); 1076 & (lim->discard_granularity - 1);
1071} 1077}
1072 1078
1073static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1079static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1074{ 1080{
1075 if (q->limits.discard_zeroes_data == 1) 1081 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1076 return 1; 1082 return 1;
1077 1083
1078 return 0; 1084 return 0;
@@ -1111,6 +1117,11 @@ static inline unsigned int block_size(struct block_device *bdev)
1111 return bdev->bd_block_size; 1117 return bdev->bd_block_size;
1112} 1118}
1113 1119
1120static inline bool queue_flush_queueable(struct request_queue *q)
1121{
1122 return !q->flush_not_queueable;
1123}
1124
1114typedef struct {struct page *v;} Sector; 1125typedef struct {struct page *v;} Sector;
1115 1126
1116unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1127unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 01eca1794e14..ab344a521105 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -99,24 +99,31 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
99 unsigned long align, 99 unsigned long align,
100 unsigned long goal); 100 unsigned long goal);
101 101
102#ifdef CONFIG_NO_BOOTMEM
103/* We are using top down, so it is safe to use 0 here */
104#define BOOTMEM_LOW_LIMIT 0
105#else
106#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
107#endif
108
102#define alloc_bootmem(x) \ 109#define alloc_bootmem(x) \
103 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 110 __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
104#define alloc_bootmem_align(x, align) \ 111#define alloc_bootmem_align(x, align) \
105 __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS)) 112 __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
106#define alloc_bootmem_nopanic(x) \ 113#define alloc_bootmem_nopanic(x) \
107 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 114 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
108#define alloc_bootmem_pages(x) \ 115#define alloc_bootmem_pages(x) \
109 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 116 __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
110#define alloc_bootmem_pages_nopanic(x) \ 117#define alloc_bootmem_pages_nopanic(x) \
111 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 118 __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
112#define alloc_bootmem_node(pgdat, x) \ 119#define alloc_bootmem_node(pgdat, x) \
113 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 120 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
114#define alloc_bootmem_node_nopanic(pgdat, x) \ 121#define alloc_bootmem_node_nopanic(pgdat, x) \
115 __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 122 __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
116#define alloc_bootmem_pages_node(pgdat, x) \ 123#define alloc_bootmem_pages_node(pgdat, x) \
117 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 124 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
118#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ 125#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
119 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 126 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
120 127
121#define alloc_bootmem_low(x) \ 128#define alloc_bootmem_low(x) \
122 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) 129 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index f5df23561b96..503c8a6b3079 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -217,8 +217,24 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
217 get_block_t *, loff_t *); 217 get_block_t *, loff_t *);
218int generic_cont_expand_simple(struct inode *inode, loff_t size); 218int generic_cont_expand_simple(struct inode *inode, loff_t size);
219int block_commit_write(struct page *page, unsigned from, unsigned to); 219int block_commit_write(struct page *page, unsigned from, unsigned to);
220int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
221 get_block_t get_block);
220int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, 222int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
221 get_block_t get_block); 223 get_block_t get_block);
224/* Convert errno to return value from ->page_mkwrite() call */
225static inline int block_page_mkwrite_return(int err)
226{
227 if (err == 0)
228 return VM_FAULT_LOCKED;
229 if (err == -EFAULT)
230 return VM_FAULT_NOPAGE;
231 if (err == -ENOMEM)
232 return VM_FAULT_OOM;
233 if (err == -EAGAIN)
234 return VM_FAULT_RETRY;
235 /* -ENOSPC, -EDQUOT, -EIO ... */
236 return VM_FAULT_SIGBUS;
237}
222sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 238sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
223int block_truncate_page(struct address_space *, loff_t, get_block_t *); 239int block_truncate_page(struct address_space *, loff_t, get_block_t *);
224int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, 240int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 2a5cd867c365..a2f7d7413f30 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -60,9 +60,6 @@ struct c2port_ops {
60 * Exported functions 60 * Exported functions
61 */ 61 */
62 62
63#define to_class_dev(obj) container_of((obj), struct class_device, kobj)
64#define to_c2port_device(obj) container_of((obj), struct c2port_device, class)
65
66extern struct c2port_device *c2port_device_register(char *name, 63extern struct c2port_device *c2port_device_register(char *name,
67 struct c2port_ops *ops, void *devdata); 64 struct c2port_ops *ops, void *devdata);
68extern void c2port_device_unregister(struct c2port_device *dev); 65extern void c2port_device_unregister(struct c2port_device *dev);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index b8e995fbd867..b8c60694b2b0 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -313,6 +313,7 @@ enum {
313 CEPH_MDS_OP_GETATTR = 0x00101, 313 CEPH_MDS_OP_GETATTR = 0x00101,
314 CEPH_MDS_OP_LOOKUPHASH = 0x00102, 314 CEPH_MDS_OP_LOOKUPHASH = 0x00102,
315 CEPH_MDS_OP_LOOKUPPARENT = 0x00103, 315 CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
316 CEPH_MDS_OP_LOOKUPINO = 0x00104,
316 317
317 CEPH_MDS_OP_SETXATTR = 0x01105, 318 CEPH_MDS_OP_SETXATTR = 0x01105,
318 CEPH_MDS_OP_RMXATTR = 0x01106, 319 CEPH_MDS_OP_RMXATTR = 0x01106,
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5ac7ebc36dbb..ab4ac0ccb857 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -467,12 +467,14 @@ struct cgroup_subsys {
467 int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 467 int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
468 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 468 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
469 int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 469 int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
470 struct task_struct *tsk, bool threadgroup); 470 struct task_struct *tsk);
471 int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
471 void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 472 void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
472 struct task_struct *tsk, bool threadgroup); 473 struct task_struct *tsk);
474 void (*pre_attach)(struct cgroup *cgrp);
475 void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
473 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 476 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
474 struct cgroup *old_cgrp, struct task_struct *tsk, 477 struct cgroup *old_cgrp, struct task_struct *tsk);
475 bool threadgroup);
476 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); 478 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
477 void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp, 479 void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
478 struct cgroup *old_cgrp, struct task_struct *task); 480 struct cgroup *old_cgrp, struct task_struct *task);
@@ -553,9 +555,6 @@ static inline struct cgroup* task_cgroup(struct task_struct *task,
553 return task_subsys_state(task, subsys_id)->cgroup; 555 return task_subsys_state(task, subsys_id)->cgroup;
554} 556}
555 557
556int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss,
557 char *nodename);
558
559/* A cgroup_iter should be treated as an opaque object */ 558/* A cgroup_iter should be treated as an opaque object */
560struct cgroup_iter { 559struct cgroup_iter {
561 struct list_head *cg_link; 560 struct list_head *cg_link;
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index cdbfcb8780ec..ac663c18776c 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -19,12 +19,6 @@ SUBSYS(debug)
19 19
20/* */ 20/* */
21 21
22#ifdef CONFIG_CGROUP_NS
23SUBSYS(ns)
24#endif
25
26/* */
27
28#ifdef CONFIG_CGROUP_SCHED 22#ifdef CONFIG_CGROUP_SCHED
29SUBSYS(cpu_cgroup) 23SUBSYS(cpu_cgroup)
30#endif 24#endif
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
new file mode 100644
index 000000000000..04ffb2e6c9d0
--- /dev/null
+++ b/include/linux/cleancache.h
@@ -0,0 +1,122 @@
1#ifndef _LINUX_CLEANCACHE_H
2#define _LINUX_CLEANCACHE_H
3
4#include <linux/fs.h>
5#include <linux/exportfs.h>
6#include <linux/mm.h>
7
8#define CLEANCACHE_KEY_MAX 6
9
10/*
11 * cleancache requires every file with a page in cleancache to have a
12 * unique key unless/until the file is removed/truncated. For some
13 * filesystems, the inode number is unique, but for "modern" filesystems
14 * an exportable filehandle is required (see exportfs.h)
15 */
16struct cleancache_filekey {
17 union {
18 ino_t ino;
19 __u32 fh[CLEANCACHE_KEY_MAX];
20 u32 key[CLEANCACHE_KEY_MAX];
21 } u;
22};
23
24struct cleancache_ops {
25 int (*init_fs)(size_t);
26 int (*init_shared_fs)(char *uuid, size_t);
27 int (*get_page)(int, struct cleancache_filekey,
28 pgoff_t, struct page *);
29 void (*put_page)(int, struct cleancache_filekey,
30 pgoff_t, struct page *);
31 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
32 void (*flush_inode)(int, struct cleancache_filekey);
33 void (*flush_fs)(int);
34};
35
36extern struct cleancache_ops
37 cleancache_register_ops(struct cleancache_ops *ops);
38extern void __cleancache_init_fs(struct super_block *);
39extern void __cleancache_init_shared_fs(char *, struct super_block *);
40extern int __cleancache_get_page(struct page *);
41extern void __cleancache_put_page(struct page *);
42extern void __cleancache_flush_page(struct address_space *, struct page *);
43extern void __cleancache_flush_inode(struct address_space *);
44extern void __cleancache_flush_fs(struct super_block *);
45extern int cleancache_enabled;
46
47#ifdef CONFIG_CLEANCACHE
48static inline bool cleancache_fs_enabled(struct page *page)
49{
50 return page->mapping->host->i_sb->cleancache_poolid >= 0;
51}
52static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
53{
54 return mapping->host->i_sb->cleancache_poolid >= 0;
55}
56#else
57#define cleancache_enabled (0)
58#define cleancache_fs_enabled(_page) (0)
59#define cleancache_fs_enabled_mapping(_page) (0)
60#endif
61
62/*
63 * The shim layer provided by these inline functions allows the compiler
64 * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
65 * is disabled, to a single global variable check if CONFIG_CLEANCACHE
66 * is enabled but no cleancache "backend" has dynamically enabled it,
67 * and, for the most frequent cleancache ops, to a single global variable
68 * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
69 * and a cleancache backend has dynamically enabled cleancache, but the
70 * filesystem referenced by that cleancache op has not enabled cleancache.
71 * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
72 * no measurable performance impact.
73 */
74
75static inline void cleancache_init_fs(struct super_block *sb)
76{
77 if (cleancache_enabled)
78 __cleancache_init_fs(sb);
79}
80
81static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb)
82{
83 if (cleancache_enabled)
84 __cleancache_init_shared_fs(uuid, sb);
85}
86
87static inline int cleancache_get_page(struct page *page)
88{
89 int ret = -1;
90
91 if (cleancache_enabled && cleancache_fs_enabled(page))
92 ret = __cleancache_get_page(page);
93 return ret;
94}
95
96static inline void cleancache_put_page(struct page *page)
97{
98 if (cleancache_enabled && cleancache_fs_enabled(page))
99 __cleancache_put_page(page);
100}
101
102static inline void cleancache_flush_page(struct address_space *mapping,
103 struct page *page)
104{
105 /* careful... page->mapping is NULL sometimes when this is called */
106 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
107 __cleancache_flush_page(mapping, page);
108}
109
110static inline void cleancache_flush_inode(struct address_space *mapping)
111{
112 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
113 __cleancache_flush_inode(mapping);
114}
115
116static inline void cleancache_flush_fs(struct super_block *sb)
117{
118 if (cleancache_enabled)
119 __cleancache_flush_fs(sb);
120}
121
122#endif /* _LINUX_CLEANCACHE_H */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5778b559d59c..ddcb7db38e67 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -12,6 +12,8 @@
12#include <linux/sem.h> 12#include <linux/sem.h>
13#include <linux/socket.h> 13#include <linux/socket.h>
14#include <linux/if.h> 14#include <linux/if.h>
15#include <linux/fs.h>
16#include <linux/aio_abi.h> /* for aio_context_t */
15 17
16#include <asm/compat.h> 18#include <asm/compat.h>
17#include <asm/siginfo.h> 19#include <asm/siginfo.h>
@@ -26,7 +28,7 @@ typedef __compat_gid32_t compat_gid_t;
26struct compat_sel_arg_struct; 28struct compat_sel_arg_struct;
27struct rusage; 29struct rusage;
28 30
29struct compat_itimerspec { 31struct compat_itimerspec {
30 struct compat_timespec it_interval; 32 struct compat_timespec it_interval;
31 struct compat_timespec it_value; 33 struct compat_timespec it_value;
32}; 34};
@@ -70,9 +72,9 @@ struct compat_timex {
70 compat_long_t stbcnt; 72 compat_long_t stbcnt;
71 compat_int_t tai; 73 compat_int_t tai;
72 74
73 compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; 75 compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
74 compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; 76 compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
75 compat_int_t :32; compat_int_t :32; compat_int_t :32; 77 compat_int_t:32; compat_int_t:32; compat_int_t:32;
76}; 78};
77 79
78#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) 80#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
@@ -81,8 +83,10 @@ typedef struct {
81 compat_sigset_word sig[_COMPAT_NSIG_WORDS]; 83 compat_sigset_word sig[_COMPAT_NSIG_WORDS];
82} compat_sigset_t; 84} compat_sigset_t;
83 85
84extern int get_compat_timespec(struct timespec *, const struct compat_timespec __user *); 86extern int get_compat_timespec(struct timespec *,
85extern int put_compat_timespec(const struct timespec *, struct compat_timespec __user *); 87 const struct compat_timespec __user *);
88extern int put_compat_timespec(const struct timespec *,
89 struct compat_timespec __user *);
86 90
87struct compat_iovec { 91struct compat_iovec {
88 compat_uptr_t iov_base; 92 compat_uptr_t iov_base;
@@ -113,7 +117,8 @@ struct compat_rusage {
113 compat_long_t ru_nivcsw; 117 compat_long_t ru_nivcsw;
114}; 118};
115 119
116extern int put_compat_rusage(const struct rusage *, struct compat_rusage __user *); 120extern int put_compat_rusage(const struct rusage *,
121 struct compat_rusage __user *);
117 122
118struct compat_siginfo; 123struct compat_siginfo;
119 124
@@ -166,8 +171,7 @@ struct compat_ifmap {
166 unsigned char port; 171 unsigned char port;
167}; 172};
168 173
169struct compat_if_settings 174struct compat_if_settings {
170{
171 unsigned int type; /* Type of physical device or protocol */ 175 unsigned int type; /* Type of physical device or protocol */
172 unsigned int size; /* Size of the data allocated by the caller */ 176 unsigned int size; /* Size of the data allocated by the caller */
173 compat_uptr_t ifs_ifsu; /* union of pointers */ 177 compat_uptr_t ifs_ifsu; /* union of pointers */
@@ -195,8 +199,8 @@ struct compat_ifreq {
195}; 199};
196 200
197struct compat_ifconf { 201struct compat_ifconf {
198 compat_int_t ifc_len; /* size of buffer */ 202 compat_int_t ifc_len; /* size of buffer */
199 compat_caddr_t ifcbuf; 203 compat_caddr_t ifcbuf;
200}; 204};
201 205
202struct compat_robust_list { 206struct compat_robust_list {
@@ -209,6 +213,18 @@ struct compat_robust_list_head {
209 compat_uptr_t list_op_pending; 213 compat_uptr_t list_op_pending;
210}; 214};
211 215
216struct compat_statfs;
217struct compat_statfs64;
218struct compat_old_linux_dirent;
219struct compat_linux_dirent;
220struct linux_dirent64;
221struct compat_msghdr;
222struct compat_mmsghdr;
223struct compat_sysinfo;
224struct compat_sysctl_args;
225struct compat_kexec_segment;
226struct compat_mq_attr;
227
212extern void compat_exit_robust_list(struct task_struct *curr); 228extern void compat_exit_robust_list(struct task_struct *curr);
213 229
214asmlinkage long 230asmlinkage long
@@ -243,8 +259,8 @@ asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
243 const struct compat_iovec __user *vec, 259 const struct compat_iovec __user *vec,
244 unsigned long vlen, u32 pos_low, u32 pos_high); 260 unsigned long vlen, u32 pos_low, u32 pos_high);
245 261
246int compat_do_execve(char * filename, compat_uptr_t __user *argv, 262int compat_do_execve(char *filename, compat_uptr_t __user *argv,
247 compat_uptr_t __user *envp, struct pt_regs * regs); 263 compat_uptr_t __user *envp, struct pt_regs *regs);
248 264
249asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, 265asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
250 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 266 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
@@ -331,12 +347,18 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
331 const compat_sigset_t __user *sigmask, 347 const compat_sigset_t __user *sigmask,
332 compat_size_t sigsetsize); 348 compat_size_t sigsetsize);
333 349
334asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filename, 350asmlinkage long compat_sys_utime(const char __user *filename,
335 struct compat_timespec __user *t, int flags); 351 struct compat_utimbuf __user *t);
352asmlinkage long compat_sys_utimensat(unsigned int dfd,
353 const char __user *filename,
354 struct compat_timespec __user *t,
355 int flags);
336 356
357asmlinkage long compat_sys_time(compat_time_t __user *tloc);
358asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
337asmlinkage long compat_sys_signalfd(int ufd, 359asmlinkage long compat_sys_signalfd(int ufd,
338 const compat_sigset_t __user *sigmask, 360 const compat_sigset_t __user *sigmask,
339 compat_size_t sigsetsize); 361 compat_size_t sigsetsize);
340asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, 362asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
341 const struct compat_itimerspec __user *utmr, 363 const struct compat_itimerspec __user *utmr,
342 struct compat_itimerspec __user *otmr); 364 struct compat_itimerspec __user *otmr);
@@ -348,16 +370,190 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
348 const int __user *nodes, 370 const int __user *nodes,
349 int __user *status, 371 int __user *status,
350 int flags); 372 int flags);
351asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filename, 373asmlinkage long compat_sys_futimesat(unsigned int dfd,
374 const char __user *filename,
352 struct compat_timeval __user *t); 375 struct compat_timeval __user *t);
353asmlinkage long compat_sys_newfstatat(unsigned int dfd, const char __user * filename, 376asmlinkage long compat_sys_utimes(const char __user *filename,
377 struct compat_timeval __user *t);
378asmlinkage long compat_sys_newstat(const char __user *filename,
379 struct compat_stat __user *statbuf);
380asmlinkage long compat_sys_newlstat(const char __user *filename,
381 struct compat_stat __user *statbuf);
382asmlinkage long compat_sys_newfstatat(unsigned int dfd,
383 const char __user *filename,
354 struct compat_stat __user *statbuf, 384 struct compat_stat __user *statbuf,
355 int flag); 385 int flag);
386asmlinkage long compat_sys_newfstat(unsigned int fd,
387 struct compat_stat __user *statbuf);
388asmlinkage long compat_sys_statfs(const char __user *pathname,
389 struct compat_statfs __user *buf);
390asmlinkage long compat_sys_fstatfs(unsigned int fd,
391 struct compat_statfs __user *buf);
392asmlinkage long compat_sys_statfs64(const char __user *pathname,
393 compat_size_t sz,
394 struct compat_statfs64 __user *buf);
395asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
396 struct compat_statfs64 __user *buf);
397asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
398 unsigned long arg);
399asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
400 unsigned long arg);
401asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
402asmlinkage long compat_sys_io_getevents(aio_context_t ctx_id,
403 unsigned long min_nr,
404 unsigned long nr,
405 struct io_event __user *events,
406 struct compat_timespec __user *timeout);
407asmlinkage long compat_sys_io_submit(aio_context_t ctx_id, int nr,
408 u32 __user *iocb);
409asmlinkage long compat_sys_mount(const char __user *dev_name,
410 const char __user *dir_name,
411 const char __user *type, unsigned long flags,
412 const void __user *data);
413asmlinkage long compat_sys_old_readdir(unsigned int fd,
414 struct compat_old_linux_dirent __user *,
415 unsigned int count);
416asmlinkage long compat_sys_getdents(unsigned int fd,
417 struct compat_linux_dirent __user *dirent,
418 unsigned int count);
419asmlinkage long compat_sys_getdents64(unsigned int fd,
420 struct linux_dirent64 __user *dirent,
421 unsigned int count);
422asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
423 unsigned int nr_segs, unsigned int flags);
424asmlinkage long compat_sys_open(const char __user *filename, int flags,
425 int mode);
356asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, 426asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
357 int flags, int mode); 427 int flags, int mode);
428asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
429 struct file_handle __user *handle,
430 int flags);
431asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
432 compat_ulong_t __user *outp,
433 compat_ulong_t __user *exp,
434 struct compat_timespec __user *tsp,
435 void __user *sig);
436asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
437 unsigned int nfds,
438 struct compat_timespec __user *tsp,
439 const compat_sigset_t __user *sigmask,
440 compat_size_t sigsetsize);
441#if (defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)) && \
442 !defined(CONFIG_NFSD_DEPRECATED)
443union compat_nfsctl_res;
444struct compat_nfsctl_arg;
445asmlinkage long compat_sys_nfsservctl(int cmd,
446 struct compat_nfsctl_arg __user *arg,
447 union compat_nfsctl_res __user *res);
448#else
449asmlinkage long compat_sys_nfsservctl(int cmd, void *notused, void *notused2);
450#endif
451asmlinkage long compat_sys_signalfd4(int ufd,
452 const compat_sigset_t __user *sigmask,
453 compat_size_t sigsetsize, int flags);
454asmlinkage long compat_sys_get_mempolicy(int __user *policy,
455 compat_ulong_t __user *nmask,
456 compat_ulong_t maxnode,
457 compat_ulong_t addr,
458 compat_ulong_t flags);
459asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
460 compat_ulong_t maxnode);
461asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
462 compat_ulong_t mode,
463 compat_ulong_t __user *nmask,
464 compat_ulong_t maxnode, compat_ulong_t flags);
465
466asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
467 char __user *optval, unsigned int optlen);
468asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
469 unsigned flags);
470asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
471 unsigned int flags);
472asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len,
473 unsigned flags);
474asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
475 unsigned flags, struct sockaddr __user *addr,
476 int __user *addrlen);
477asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
478 unsigned vlen, unsigned int flags,
479 struct compat_timespec __user *timeout);
480asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
481 struct compat_timespec __user *rmtp);
482asmlinkage long compat_sys_getitimer(int which,
483 struct compat_itimerval __user *it);
484asmlinkage long compat_sys_setitimer(int which,
485 struct compat_itimerval __user *in,
486 struct compat_itimerval __user *out);
487asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
488asmlinkage long compat_sys_setrlimit(unsigned int resource,
489 struct compat_rlimit __user *rlim);
490asmlinkage long compat_sys_getrlimit(unsigned int resource,
491 struct compat_rlimit __user *rlim);
492asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
493asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
494 unsigned int len,
495 compat_ulong_t __user *user_mask_ptr);
496asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
497 unsigned int len,
498 compat_ulong_t __user *user_mask_ptr);
499asmlinkage long compat_sys_timer_create(clockid_t which_clock,
500 struct compat_sigevent __user *timer_event_spec,
501 timer_t __user *created_timer_id);
502asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
503 struct compat_itimerspec __user *new,
504 struct compat_itimerspec __user *old);
505asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
506 struct compat_itimerspec __user *setting);
507asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
508 struct compat_timespec __user *tp);
509asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
510 struct compat_timespec __user *tp);
511asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock,
512 struct compat_timex __user *tp);
513asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
514 struct compat_timespec __user *tp);
515asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
516 struct compat_timespec __user *rqtp,
517 struct compat_timespec __user *rmtp);
518asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
519 struct compat_siginfo __user *uinfo,
520 struct compat_timespec __user *uts, compat_size_t sigsetsize);
521asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
522 compat_size_t sigsetsize);
523asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
524asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
525 unsigned long arg);
526asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
527 struct compat_timespec __user *utime, u32 __user *uaddr2,
528 u32 val3);
529asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
530 char __user *optval, int __user *optlen);
531asmlinkage long compat_sys_kexec_load(unsigned long entry,
532 unsigned long nr_segments,
533 struct compat_kexec_segment __user *,
534 unsigned long flags);
535asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
536 const struct compat_mq_attr __user *u_mqstat,
537 struct compat_mq_attr __user *u_omqstat);
538asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
539 const struct compat_sigevent __user *u_notification);
540asmlinkage long compat_sys_mq_open(const char __user *u_name,
541 int oflag, compat_mode_t mode,
542 struct compat_mq_attr __user *u_attr);
543asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
544 const char __user *u_msg_ptr,
545 size_t msg_len, unsigned int msg_prio,
546 const struct compat_timespec __user *u_abs_timeout);
547asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
548 char __user *u_msg_ptr,
549 size_t msg_len, unsigned int __user *u_msg_prio,
550 const struct compat_timespec __user *u_abs_timeout);
551asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
552asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
358 553
359extern ssize_t compat_rw_copy_check_uvector(int type, 554extern ssize_t compat_rw_copy_check_uvector(int type,
360 const struct compat_iovec __user *uvector, unsigned long nr_segs, 555 const struct compat_iovec __user *uvector,
556 unsigned long nr_segs,
361 unsigned long fast_segs, struct iovec *fast_pointer, 557 unsigned long fast_segs, struct iovec *fast_pointer,
362 struct iovec **ret_pointer); 558 struct iovec **ret_pointer);
363 559
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cb4c1eb7778e..59e4028e833d 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -34,8 +34,12 @@
34 __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ 34 __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
35 (typeof(ptr)) (__ptr + (off)); }) 35 (typeof(ptr)) (__ptr + (off)); })
36 36
37#ifdef __CHECKER__
38#define __must_be_array(arr) 0
39#else
37/* &a[0] degrades to a pointer: a different type from an array */ 40/* &a[0] degrades to a pointer: a different type from an array */
38#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) 41#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
42#endif
39 43
40/* 44/*
41 * Force always-inline if the user requests it so via the .config, 45 * Force always-inline if the user requests it so via the .config,
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 64b7c003fd7a..dfadc96e9d63 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -51,7 +51,7 @@
51#if __GNUC_MINOR__ > 0 51#if __GNUC_MINOR__ > 0
52#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) 52#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
53#endif 53#endif
54#if __GNUC_MINOR__ >= 4 54#if __GNUC_MINOR__ >= 4 && !defined(__CHECKER__)
55#define __compiletime_warning(message) __attribute__((warning(message))) 55#define __compiletime_warning(message) __attribute__((warning(message)))
56#define __compiletime_error(message) __attribute__((error(message))) 56#define __compiletime_error(message) __attribute__((error(message)))
57#endif 57#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bae6fe24d1f9..b24ac56477b4 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -547,6 +547,21 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
547} 547}
548 548
549/** 549/**
550 * cpumask_parselist_user - extract a cpumask from a user string
551 * @buf: the buffer to extract from
552 * @len: the length of the buffer
553 * @dstp: the cpumask to set.
554 *
555 * Returns -errno, or 0 for success.
556 */
557static inline int cpumask_parselist_user(const char __user *buf, int len,
558 struct cpumask *dstp)
559{
560 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
561 nr_cpumask_bits);
562}
563
564/**
550 * cpulist_scnprintf - print a cpumask into a string as comma-separated list 565 * cpulist_scnprintf - print a cpumask into a string as comma-separated list
551 * @buf: the buffer to sprintf into 566 * @buf: the buffer to sprintf into
552 * @len: the length of the buffer 567 * @len: the length of the buffer
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 088cd4ace4ef..74054074e876 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -66,6 +66,11 @@ static inline void vmcore_unusable(void)
66 if (is_kdump_kernel()) 66 if (is_kdump_kernel())
67 elfcorehdr_addr = ELFCORE_ADDR_ERR; 67 elfcorehdr_addr = ELFCORE_ADDR_ERR;
68} 68}
69
70#define HAVE_OLDMEM_PFN_IS_RAM 1
71extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
72extern void unregister_oldmem_pfn_is_ram(void);
73
69#else /* !CONFIG_CRASH_DUMP */ 74#else /* !CONFIG_CRASH_DUMP */
70static inline int is_kdump_kernel(void) { return 0; } 75static inline int is_kdump_kernel(void) { return 0; }
71#endif /* CONFIG_CRASH_DUMP */ 76#endif /* CONFIG_CRASH_DUMP */
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index cec467f5d676..9e5f5607eba3 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -38,7 +38,7 @@
38 38
39/* Although the Linux source code makes a difference between 39/* Although the Linux source code makes a difference between
40 generic endianness and the bitfields' endianness, there is no 40 generic endianness and the bitfields' endianness, there is no
41 architecture as of Linux-2.6.24-rc4 where the bitfileds' endianness 41 architecture as of Linux-2.6.24-rc4 where the bitfields' endianness
42 does not match the generic endianness. */ 42 does not match the generic endianness. */
43 43
44#if __BYTE_ORDER == __LITTLE_ENDIAN 44#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.10" 56#define REL_VERSION "8.3.11"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 96 59#define PRO_VERSION_MAX 96
@@ -195,7 +195,7 @@ enum drbd_conns {
195 C_WF_REPORT_PARAMS, /* we have a socket */ 195 C_WF_REPORT_PARAMS, /* we have a socket */
196 C_CONNECTED, /* we have introduced each other */ 196 C_CONNECTED, /* we have introduced each other */
197 C_STARTING_SYNC_S, /* starting full sync by admin request. */ 197 C_STARTING_SYNC_S, /* starting full sync by admin request. */
198 C_STARTING_SYNC_T, /* stariing full sync by admin request. */ 198 C_STARTING_SYNC_T, /* starting full sync by admin request. */
199 C_WF_BITMAP_S, 199 C_WF_BITMAP_S,
200 C_WF_BITMAP_T, 200 C_WF_BITMAP_T,
201 C_WF_SYNC_UUID, 201 C_WF_SYNC_UUID,
@@ -236,7 +236,7 @@ union drbd_state {
236 * pointed out by Maxim Uvarov q<muvarov@ru.mvista.com> 236 * pointed out by Maxim Uvarov q<muvarov@ru.mvista.com>
237 * even though we transmit as "cpu_to_be32(state)", 237 * even though we transmit as "cpu_to_be32(state)",
238 * the offsets of the bitfields still need to be swapped 238 * the offsets of the bitfields still need to be swapped
239 * on different endianess. 239 * on different endianness.
240 */ 240 */
241 struct { 241 struct {
242#if defined(__LITTLE_ENDIAN_BITFIELD) 242#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -266,7 +266,7 @@ union drbd_state {
266 unsigned peer:2 ; /* 3/4 primary/secondary/unknown */ 266 unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
267 unsigned role:2 ; /* 3/4 primary/secondary/unknown */ 267 unsigned role:2 ; /* 3/4 primary/secondary/unknown */
268#else 268#else
269# error "this endianess is not supported" 269# error "this endianness is not supported"
270#endif 270#endif
271 }; 271 };
272 unsigned int i; 272 unsigned int i;
diff --git a/include/linux/drbd_tag_magic.h b/include/linux/drbd_tag_magic.h
index f14a165e82dc..069543190516 100644
--- a/include/linux/drbd_tag_magic.h
+++ b/include/linux/drbd_tag_magic.h
@@ -30,7 +30,7 @@ enum packet_types {
30 int tag_and_len ## member; 30 int tag_and_len ## member;
31#include "linux/drbd_nl.h" 31#include "linux/drbd_nl.h"
32 32
33/* declate tag-list-sizes */ 33/* declare tag-list-sizes */
34static const int tag_list_sizes[] = { 34static const int tag_list_sizes[] = {
35#define NL_PACKET(name, number, fields) 2 fields , 35#define NL_PACKET(name, number, fields) 2 fields ,
36#define NL_INTEGER(pn, pr, member) + 4 + 4 36#define NL_INTEGER(pn, pr, member) + 4 + 4
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
index ebeb2f3ad068..6843cf193a44 100644
--- a/include/linux/flex_array.h
+++ b/include/linux/flex_array.h
@@ -21,6 +21,8 @@ struct flex_array {
21 struct { 21 struct {
22 int element_size; 22 int element_size;
23 int total_nr_elements; 23 int total_nr_elements;
24 int elems_per_part;
25 u32 reciprocal_elems;
24 struct flex_array_part *parts[]; 26 struct flex_array_part *parts[];
25 }; 27 };
26 /* 28 /*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cdf9495df204..241609346dfb 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -23,7 +23,8 @@
23 23
24/* Fixed constants first: */ 24/* Fixed constants first: */
25#undef NR_OPEN 25#undef NR_OPEN
26#define INR_OPEN 1024 /* Initial setting for nfile rlimits */ 26#define INR_OPEN_CUR 1024 /* Initial setting for nfile rlimits */
27#define INR_OPEN_MAX 4096 /* Hard limit for nfile rlimits */
27 28
28#define BLOCK_SIZE_BITS 10 29#define BLOCK_SIZE_BITS 10
29#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) 30#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
@@ -634,8 +635,7 @@ struct address_space {
634 unsigned int i_mmap_writable;/* count VM_SHARED mappings */ 635 unsigned int i_mmap_writable;/* count VM_SHARED mappings */
635 struct prio_tree_root i_mmap; /* tree of private and shared mappings */ 636 struct prio_tree_root i_mmap; /* tree of private and shared mappings */
636 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 637 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
637 spinlock_t i_mmap_lock; /* protect tree, count, list */ 638 struct mutex i_mmap_mutex; /* protect tree, count, list */
638 unsigned int truncate_count; /* Cover race condition with truncate */
639 unsigned long nrpages; /* number of total pages */ 639 unsigned long nrpages; /* number of total pages */
640 pgoff_t writeback_index;/* writeback starts here */ 640 pgoff_t writeback_index;/* writeback starts here */
641 const struct address_space_operations *a_ops; /* methods */ 641 const struct address_space_operations *a_ops; /* methods */
@@ -644,7 +644,6 @@ struct address_space {
644 spinlock_t private_lock; /* for use by the address_space */ 644 spinlock_t private_lock; /* for use by the address_space */
645 struct list_head private_list; /* ditto */ 645 struct list_head private_list; /* ditto */
646 struct address_space *assoc_mapping; /* ditto */ 646 struct address_space *assoc_mapping; /* ditto */
647 struct mutex unmap_mutex; /* to protect unmapping */
648} __attribute__((aligned(sizeof(long)))); 647} __attribute__((aligned(sizeof(long))));
649 /* 648 /*
650 * On most architectures that alignment is already the case; but 649 * On most architectures that alignment is already the case; but
@@ -1429,6 +1428,11 @@ struct super_block {
1429 */ 1428 */
1430 char __rcu *s_options; 1429 char __rcu *s_options;
1431 const struct dentry_operations *s_d_op; /* default d_op for dentries */ 1430 const struct dentry_operations *s_d_op; /* default d_op for dentries */
1431
1432 /*
1433 * Saved pool identifier for cleancache (-1 means none)
1434 */
1435 int cleancache_poolid;
1432}; 1436};
1433 1437
1434extern struct timespec current_fs_time(struct super_block *sb); 1438extern struct timespec current_fs_time(struct super_block *sb);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 76427e688d15..af095b54502e 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -100,17 +100,6 @@ struct fscache_operation {
100 100
101 /* operation releaser */ 101 /* operation releaser */
102 fscache_operation_release_t release; 102 fscache_operation_release_t release;
103
104#ifdef CONFIG_WORKQUEUE_DEBUGFS
105 struct work_struct put_work; /* work to delay operation put */
106 const char *name; /* operation name */
107 const char *state; /* operation state */
108#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
109#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
110#else
111#define fscache_set_op_name(OP, N) do { } while(0)
112#define fscache_set_op_state(OP, S) do { } while(0)
113#endif
114}; 103};
115 104
116extern atomic_t fscache_op_debug_id; 105extern atomic_t fscache_op_debug_id;
@@ -137,7 +126,6 @@ static inline void fscache_operation_init(struct fscache_operation *op,
137 op->processor = processor; 126 op->processor = processor;
138 op->release = release; 127 op->release = release;
139 INIT_LIST_HEAD(&op->pend_link); 128 INIT_LIST_HEAD(&op->pend_link);
140 fscache_set_op_state(op, "Init");
141} 129}
142 130
143/* 131/*
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 9869ef3674ac..5bbebda78b02 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -9,6 +9,8 @@
9 */ 9 */
10 10
11 11
12#ifndef __GENALLOC_H__
13#define __GENALLOC_H__
12/* 14/*
13 * General purpose special memory pool descriptor. 15 * General purpose special memory pool descriptor.
14 */ 16 */
@@ -24,13 +26,34 @@ struct gen_pool {
24struct gen_pool_chunk { 26struct gen_pool_chunk {
25 spinlock_t lock; 27 spinlock_t lock;
26 struct list_head next_chunk; /* next chunk in pool */ 28 struct list_head next_chunk; /* next chunk in pool */
29 phys_addr_t phys_addr; /* physical starting address of memory chunk */
27 unsigned long start_addr; /* starting address of memory chunk */ 30 unsigned long start_addr; /* starting address of memory chunk */
28 unsigned long end_addr; /* ending address of memory chunk */ 31 unsigned long end_addr; /* ending address of memory chunk */
29 unsigned long bits[0]; /* bitmap for allocating memory chunk */ 32 unsigned long bits[0]; /* bitmap for allocating memory chunk */
30}; 33};
31 34
32extern struct gen_pool *gen_pool_create(int, int); 35extern struct gen_pool *gen_pool_create(int, int);
33extern int gen_pool_add(struct gen_pool *, unsigned long, size_t, int); 36extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
37extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
38 size_t, int);
39/**
40 * gen_pool_add - add a new chunk of special memory to the pool
41 * @pool: pool to add new memory chunk to
42 * @addr: starting address of memory chunk to add to pool
43 * @size: size in bytes of the memory chunk to add to pool
44 * @nid: node id of the node the chunk structure and bitmap should be
45 * allocated on, or -1
46 *
47 * Add a new chunk of special memory to the specified pool.
48 *
49 * Returns 0 on success or a -ve errno on failure.
50 */
51static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
52 size_t size, int nid)
53{
54 return gen_pool_add_virt(pool, addr, -1, size, nid);
55}
34extern void gen_pool_destroy(struct gen_pool *); 56extern void gen_pool_destroy(struct gen_pool *);
35extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); 57extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
36extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); 58extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
59#endif /* __GENALLOC_H__ */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index d764a426e9fd..b78956b3c2e7 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -100,7 +100,6 @@ struct hd_struct {
100 sector_t start_sect; 100 sector_t start_sect;
101 sector_t nr_sects; 101 sector_t nr_sects;
102 sector_t alignment_offset; 102 sector_t alignment_offset;
103 unsigned int discard_alignment;
104 struct device __dev; 103 struct device __dev;
105 struct kobject *holder_dir; 104 struct kobject *holder_dir;
106 int policy, partno; 105 int policy, partno;
@@ -127,6 +126,7 @@ struct hd_struct {
127#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 126#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
128#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ 127#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
129#define GENHD_FL_NATIVE_CAPACITY 128 128#define GENHD_FL_NATIVE_CAPACITY 128
129#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
130 130
131enum { 131enum {
132 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 132 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 56d8fc87fbbc..cb4089254f01 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -249,14 +249,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
249 249
250 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & 250 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
251 ((1 << ZONES_SHIFT) - 1); 251 ((1 << ZONES_SHIFT) - 1);
252 252 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
253 if (__builtin_constant_p(bit))
254 BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
255 else {
256#ifdef CONFIG_DEBUG_VM
257 BUG_ON((GFP_ZONE_BAD >> bit) & 1);
258#endif
259 }
260 return z; 253 return z;
261} 254}
262 255
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8847c8c29791..48c32ebf65a7 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -92,12 +92,8 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
92#define wait_split_huge_page(__anon_vma, __pmd) \ 92#define wait_split_huge_page(__anon_vma, __pmd) \
93 do { \ 93 do { \
94 pmd_t *____pmd = (__pmd); \ 94 pmd_t *____pmd = (__pmd); \
95 spin_unlock_wait(&(__anon_vma)->root->lock); \ 95 anon_vma_lock(__anon_vma); \
96 /* \ 96 anon_vma_unlock(__anon_vma); \
97 * spin_unlock_wait() is just a loop in C and so the \
98 * CPU can reorder anything around it. \
99 */ \
100 smp_mb(); \
101 BUG_ON(pmd_trans_splitting(*____pmd) || \ 97 BUG_ON(pmd_trans_splitting(*____pmd) || \
102 pmd_trans_huge(*____pmd)); \ 98 pmd_trans_huge(*____pmd)); \
103 } while (0) 99 } while (0)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 943c76b3d4bb..59225ef27d15 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_HUGETLB_H 1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H 2#define _LINUX_HUGETLB_H
3 3
4#include <linux/mm_types.h>
4#include <linux/fs.h> 5#include <linux/fs.h>
5#include <linux/hugetlb_inline.h> 6#include <linux/hugetlb_inline.h>
6 7
@@ -41,7 +42,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
41 unsigned long address, unsigned int flags); 42 unsigned long address, unsigned int flags);
42int hugetlb_reserve_pages(struct inode *inode, long from, long to, 43int hugetlb_reserve_pages(struct inode *inode, long from, long to,
43 struct vm_area_struct *vma, 44 struct vm_area_struct *vma,
44 int acctflags); 45 vm_flags_t vm_flags);
45void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); 46void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
46int dequeue_hwpoisoned_huge_page(struct page *page); 47int dequeue_hwpoisoned_huge_page(struct page *page);
47void copy_huge_page(struct page *dst, struct page *src); 48void copy_huge_page(struct page *dst, struct page *src);
@@ -168,7 +169,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
168 169
169extern const struct file_operations hugetlbfs_file_operations; 170extern const struct file_operations hugetlbfs_file_operations;
170extern const struct vm_operations_struct hugetlb_vm_ops; 171extern const struct vm_operations_struct hugetlb_vm_ops;
171struct file *hugetlb_file_setup(const char *name, size_t size, int acct, 172struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
172 struct user_struct **user, int creat_flags); 173 struct user_struct **user, int creat_flags);
173int hugetlb_get_quota(struct address_space *mapping, long delta); 174int hugetlb_get_quota(struct address_space *mapping, long delta);
174void hugetlb_put_quota(struct address_space *mapping, long delta); 175void hugetlb_put_quota(struct address_space *mapping, long delta);
@@ -192,7 +193,7 @@ static inline void set_file_hugepages(struct file *file)
192#define is_file_hugepages(file) 0 193#define is_file_hugepages(file) 0
193#define set_file_hugepages(file) BUG() 194#define set_file_hugepages(file) BUG()
194static inline struct file *hugetlb_file_setup(const char *name, size_t size, 195static inline struct file *hugetlb_file_setup(const char *name, size_t size,
195 int acctflag, struct user_struct **user, int creat_flags) 196 vm_flags_t acctflag, struct user_struct **user, int creat_flags)
196{ 197{
197 return ERR_PTR(-ENOSYS); 198 return ERR_PTR(-ENOSYS);
198} 199}
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index 6931489a5c14..2bb681fbeb35 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -7,7 +7,7 @@
7 7
8static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) 8static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
9{ 9{
10 return vma->vm_flags & VM_HUGETLB; 10 return !!(vma->vm_flags & VM_HUGETLB);
11} 11}
12 12
13#else 13#else
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index f1e3ff5880a9..a6c652ef516d 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -409,7 +409,7 @@ void i2c_unlock_adapter(struct i2c_adapter *);
409/* i2c adapter classes (bitmask) */ 409/* i2c adapter classes (bitmask) */
410#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ 410#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
411#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ 411#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
412#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ 412#define I2C_CLASS_SPD (1<<7) /* Memory modules */
413 413
414/* Internal numbers to terminate lists */ 414/* Internal numbers to terminate lists */
415#define I2C_CLIENT_END 0xfffeU 415#define I2C_CLIENT_END 0xfffeU
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 0c0d1ae79981..ba4f88624fcd 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -91,6 +91,7 @@
91#define BCI_INTR_OFFSET 2 91#define BCI_INTR_OFFSET 2
92#define MADC_INTR_OFFSET 3 92#define MADC_INTR_OFFSET 3
93#define USB_INTR_OFFSET 4 93#define USB_INTR_OFFSET 4
94#define CHARGERFAULT_INTR_OFFSET 5
94#define BCI_PRES_INTR_OFFSET 9 95#define BCI_PRES_INTR_OFFSET 9
95#define USB_PRES_INTR_OFFSET 10 96#define USB_PRES_INTR_OFFSET 10
96#define RTC_INTR_OFFSET 11 97#define RTC_INTR_OFFSET 11
@@ -150,7 +151,12 @@
150#define MMC_PU (0x1 << 3) 151#define MMC_PU (0x1 << 3)
151#define MMC_PD (0x1 << 2) 152#define MMC_PD (0x1 << 2)
152 153
153 154#define TWL_SIL_TYPE(rev) ((rev) & 0x00FFFFFF)
155#define TWL_SIL_REV(rev) ((rev) >> 24)
156#define TWL_SIL_5030 0x09002F
157#define TWL5030_REV_1_0 0x00
158#define TWL5030_REV_1_1 0x10
159#define TWL5030_REV_1_2 0x30
154 160
155#define TWL4030_CLASS_ID 0x4030 161#define TWL4030_CLASS_ID 0x4030
156#define TWL6030_CLASS_ID 0x6030 162#define TWL6030_CLASS_ID 0x6030
@@ -165,6 +171,8 @@ static inline int twl_class_is_ ##class(void) \
165TWL_CLASS_IS(4030, TWL4030_CLASS_ID) 171TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
166TWL_CLASS_IS(6030, TWL6030_CLASS_ID) 172TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
167 173
174#define TWL6025_SUBCLASS BIT(4) /* TWL6025 has changed registers */
175
168/* 176/*
169 * Read and write single 8-bit registers 177 * Read and write single 8-bit registers
170 */ 178 */
@@ -180,6 +188,9 @@ int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
180int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); 188int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
181int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); 189int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
182 190
191int twl_get_type(void);
192int twl_get_version(void);
193
183int twl6030_interrupt_unmask(u8 bit_mask, u8 offset); 194int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
184int twl6030_interrupt_mask(u8 bit_mask, u8 offset); 195int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
185 196
@@ -279,7 +290,12 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
279 *(Use TWL_4030_MODULE_INTBR) 290 *(Use TWL_4030_MODULE_INTBR)
280 */ 291 */
281 292
293#define REG_IDCODE_7_0 0x00
294#define REG_IDCODE_15_8 0x01
295#define REG_IDCODE_16_23 0x02
296#define REG_IDCODE_31_24 0x03
282#define REG_GPPUPDCTR1 0x0F 297#define REG_GPPUPDCTR1 0x0F
298#define REG_UNLOCK_TEST_REG 0x12
283 299
284/*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */ 300/*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */
285 301
@@ -288,6 +304,8 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
288#define SR_I2C_SCL_CTRL_PU BIT(4) 304#define SR_I2C_SCL_CTRL_PU BIT(4)
289#define SR_I2C_SDA_CTRL_PU BIT(6) 305#define SR_I2C_SDA_CTRL_PU BIT(6)
290 306
307#define TWL_EEPROM_R_UNLOCK 0x49
308
291/*----------------------------------------------------------------------*/ 309/*----------------------------------------------------------------------*/
292 310
293/* 311/*
@@ -501,7 +519,7 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
501#define RES_32KCLKOUT 26 519#define RES_32KCLKOUT 26
502#define RES_RESET 27 520#define RES_RESET 27
503/* Power Reference */ 521/* Power Reference */
504#define RES_Main_Ref 28 522#define RES_MAIN_REF 28
505 523
506#define TOTAL_RESOURCES 28 524#define TOTAL_RESOURCES 28
507/* 525/*
@@ -593,6 +611,7 @@ enum twl4030_usb_mode {
593 611
594struct twl4030_usb_data { 612struct twl4030_usb_data {
595 enum twl4030_usb_mode usb_mode; 613 enum twl4030_usb_mode usb_mode;
614 unsigned long features;
596 615
597 int (*phy_init)(struct device *dev); 616 int (*phy_init)(struct device *dev);
598 int (*phy_exit)(struct device *dev); 617 int (*phy_exit)(struct device *dev);
@@ -699,6 +718,20 @@ struct twl4030_platform_data {
699 struct regulator_init_data *vcxio; 718 struct regulator_init_data *vcxio;
700 struct regulator_init_data *vusb; 719 struct regulator_init_data *vusb;
701 struct regulator_init_data *clk32kg; 720 struct regulator_init_data *clk32kg;
721 /* TWL6025 LDO regulators */
722 struct regulator_init_data *ldo1;
723 struct regulator_init_data *ldo2;
724 struct regulator_init_data *ldo3;
725 struct regulator_init_data *ldo4;
726 struct regulator_init_data *ldo5;
727 struct regulator_init_data *ldo6;
728 struct regulator_init_data *ldo7;
729 struct regulator_init_data *ldoln;
730 struct regulator_init_data *ldousb;
731 /* TWL6025 DCDC regulators */
732 struct regulator_init_data *smps3;
733 struct regulator_init_data *smps4;
734 struct regulator_init_data *vio6025;
702}; 735};
703 736
704/*----------------------------------------------------------------------*/ 737/*----------------------------------------------------------------------*/
@@ -780,4 +813,21 @@ static inline int twl4030charger_usb_en(int enable) { return 0; }
780#define TWL6030_REG_VRTC 47 813#define TWL6030_REG_VRTC 47
781#define TWL6030_REG_CLK32KG 48 814#define TWL6030_REG_CLK32KG 48
782 815
816/* LDOs on 6025 have different names */
817#define TWL6025_REG_LDO2 49
818#define TWL6025_REG_LDO4 50
819#define TWL6025_REG_LDO3 51
820#define TWL6025_REG_LDO5 52
821#define TWL6025_REG_LDO1 53
822#define TWL6025_REG_LDO7 54
823#define TWL6025_REG_LDO6 55
824#define TWL6025_REG_LDOLN 56
825#define TWL6025_REG_LDOUSB 57
826
827/* 6025 DCDC supplies */
828#define TWL6025_REG_SMPS3 58
829#define TWL6025_REG_SMPS4 59
830#define TWL6025_REG_VIO 60
831
832
783#endif /* End of __TWL4030_H */ 833#endif /* End of __TWL4030_H */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f4a2e6b1b864..0ee969a5593d 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -136,6 +136,7 @@ enum {
136 IFLA_PORT_SELF, 136 IFLA_PORT_SELF,
137 IFLA_AF_SPEC, 137 IFLA_AF_SPEC,
138 IFLA_GROUP, /* Group the device belongs to */ 138 IFLA_GROUP, /* Group the device belongs to */
139 IFLA_NET_NS_FD,
139 __IFLA_MAX 140 __IFLA_MAX
140}; 141};
141 142
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 290bd8ac94cf..dc01681fbb42 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -110,6 +110,11 @@ static inline void vlan_group_set_device(struct vlan_group *vg,
110 array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; 110 array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
111} 111}
112 112
113static inline int is_vlan_dev(struct net_device *dev)
114{
115 return dev->priv_flags & IFF_802_1Q_VLAN;
116}
117
113#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) 118#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
114#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) 119#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
115 120
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index bafc58c00fc3..580f70c02391 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -22,6 +22,14 @@
22extern struct files_struct init_files; 22extern struct files_struct init_files;
23extern struct fs_struct init_fs; 23extern struct fs_struct init_fs;
24 24
25#ifdef CONFIG_CGROUPS
26#define INIT_THREADGROUP_FORK_LOCK(sig) \
27 .threadgroup_fork_lock = \
28 __RWSEM_INITIALIZER(sig.threadgroup_fork_lock),
29#else
30#define INIT_THREADGROUP_FORK_LOCK(sig)
31#endif
32
25#define INIT_SIGNALS(sig) { \ 33#define INIT_SIGNALS(sig) { \
26 .nr_threads = 1, \ 34 .nr_threads = 1, \
27 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 35 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
@@ -38,6 +46,7 @@ extern struct fs_struct init_fs;
38 }, \ 46 }, \
39 .cred_guard_mutex = \ 47 .cred_guard_mutex = \
40 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 48 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
49 INIT_THREADGROUP_FORK_LOCK(sig) \
41} 50}
42 51
43extern struct nsproxy init_nsproxy; 52extern struct nsproxy init_nsproxy;
diff --git a/include/linux/input/pmic8xxx-keypad.h b/include/linux/input/pmic8xxx-keypad.h
new file mode 100644
index 000000000000..5f1e2f9ad959
--- /dev/null
+++ b/include/linux/input/pmic8xxx-keypad.h
@@ -0,0 +1,52 @@
1/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef __PMIC8XXX_KEYPAD_H__
14#define __PMIC8XXX_KEYPAD_H__
15
16#include <linux/input/matrix_keypad.h>
17
18#define PM8XXX_KEYPAD_DEV_NAME "pm8xxx-keypad"
19
20/**
21 * struct pm8xxx_keypad_platform_data - platform data for keypad
22 * @keymap_data - matrix keymap data
23 * @input_name - input device name
24 * @input_phys_device - input device name
25 * @num_cols - number of columns of keypad
26 * @num_rows - number of row of keypad
27 * @debounce_ms - debounce period in milliseconds
28 * @scan_delay_ms - scan delay in milliseconds
29 * @row_hold_ns - row hold period in nanoseconds
30 * @wakeup - configure keypad as wakeup
31 * @rep - enable or disable key repeat bit
32 */
33struct pm8xxx_keypad_platform_data {
34 const struct matrix_keymap_data *keymap_data;
35
36 const char *input_name;
37 const char *input_phys_device;
38
39 unsigned int num_cols;
40 unsigned int num_rows;
41 unsigned int rows_gpio_start;
42 unsigned int cols_gpio_start;
43
44 unsigned int debounce_ms;
45 unsigned int scan_delay_ms;
46 unsigned int row_hold_ns;
47
48 bool wakeup;
49 bool rep;
50};
51
52#endif /*__PMIC8XXX_KEYPAD_H__ */
diff --git a/include/linux/input/pmic8xxx-pwrkey.h b/include/linux/input/pmic8xxx-pwrkey.h
new file mode 100644
index 000000000000..6d2974e57109
--- /dev/null
+++ b/include/linux/input/pmic8xxx-pwrkey.h
@@ -0,0 +1,31 @@
1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef __PMIC8XXX_PWRKEY_H__
14#define __PMIC8XXX_PWRKEY_H__
15
16#define PM8XXX_PWRKEY_DEV_NAME "pm8xxx-pwrkey"
17
18/**
19 * struct pm8xxx_pwrkey_platform_data - platform data for pwrkey driver
20 * @pull up: power on register control for pull up/down configuration
21 * @kpd_trigger_delay_us: time delay for power key state change interrupt
22 * trigger.
23 * @wakeup: configure power key as wakeup source
24 */
25struct pm8xxx_pwrkey_platform_data {
26 bool pull_up;
27 u32 kpd_trigger_delay_us;
28 u32 wakeup;
29};
30
31#endif /* __PMIC8XXX_PWRKEY_H__ */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 906590aa6907..204f9cd26c16 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -236,7 +236,7 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
236 directory for this interface. Note that the entry will 236 directory for this interface. Note that the entry will
237 automatically be dstroyed when the interface is destroyed. */ 237 automatically be dstroyed when the interface is destroyed. */
238int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 238int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
239 read_proc_t *read_proc, 239 const struct file_operations *proc_ops,
240 void *data); 240 void *data);
241 241
242#endif /* __LINUX_IPMI_SMI_H */ 242#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index a32dcaec04e1..4ecb7b16b278 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -529,9 +529,10 @@ struct transaction_s
529 enum { 529 enum {
530 T_RUNNING, 530 T_RUNNING,
531 T_LOCKED, 531 T_LOCKED,
532 T_RUNDOWN,
533 T_FLUSH, 532 T_FLUSH,
534 T_COMMIT, 533 T_COMMIT,
534 T_COMMIT_DFLUSH,
535 T_COMMIT_JFLUSH,
535 T_FINISHED 536 T_FINISHED
536 } t_state; 537 } t_state;
537 538
@@ -658,7 +659,9 @@ struct transaction_s
658 * waiting for it to finish. 659 * waiting for it to finish.
659 */ 660 */
660 unsigned int t_synchronous_commit:1; 661 unsigned int t_synchronous_commit:1;
661 unsigned int t_flushed_data_blocks:1; 662
663 /* Disk flush needs to be sent to fs partition [no locking] */
664 int t_need_data_flush;
662 665
663 /* 666 /*
664 * For use by the filesystem to store fs-specific data 667 * For use by the filesystem to store fs-specific data
@@ -1228,6 +1231,7 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
1228int jbd2_journal_force_commit_nested(journal_t *journal); 1231int jbd2_journal_force_commit_nested(journal_t *journal);
1229int jbd2_log_wait_commit(journal_t *journal, tid_t tid); 1232int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
1230int jbd2_log_do_checkpoint(journal_t *journal); 1233int jbd2_log_do_checkpoint(journal_t *journal);
1234int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
1231 1235
1232void __jbd2_log_wait_for_space(journal_t *journal); 1236void __jbd2_log_wait_for_space(journal_t *journal);
1233extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); 1237extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f37ba716ef8b..fb0e7329fee1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -248,6 +248,37 @@ int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
248int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); 248int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
249int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); 249int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
250 250
251int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
252int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
253int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
254int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
255int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
256int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
257int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
258int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
259int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
260int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
261
262static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
263{
264 return kstrtoull_from_user(s, count, base, res);
265}
266
267static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
268{
269 return kstrtoll_from_user(s, count, base, res);
270}
271
272static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
273{
274 return kstrtouint_from_user(s, count, base, res);
275}
276
277static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
278{
279 return kstrtoint_from_user(s, count, base, res);
280}
281
251extern unsigned long simple_strtoul(const char *,char **,unsigned int); 282extern unsigned long simple_strtoul(const char *,char **,unsigned int);
252extern long simple_strtol(const char *,char **,unsigned int); 283extern long simple_strtol(const char *,char **,unsigned int);
253extern unsigned long long simple_strtoull(const char *,char **,unsigned int); 284extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
@@ -638,6 +669,13 @@ struct sysinfo {
638 char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */ 669 char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
639}; 670};
640 671
672#ifdef __CHECKER__
673#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
674#define BUILD_BUG_ON_ZERO(e)
675#define BUILD_BUG_ON_NULL(e)
676#define BUILD_BUG_ON(condition)
677#else /* __CHECKER__ */
678
641/* Force a compilation error if a constant expression is not a power of 2 */ 679/* Force a compilation error if a constant expression is not a power of 2 */
642#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ 680#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
643 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) 681 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
@@ -674,6 +712,7 @@ extern int __build_bug_on_failed;
674 if (condition) __build_bug_on_failed = 1; \ 712 if (condition) __build_bug_on_failed = 1; \
675 } while(0) 713 } while(0)
676#endif 714#endif
715#endif /* __CHECKER__ */
677 716
678/* Trap pasters of __FUNCTION__ at compile-time */ 717/* Trap pasters of __FUNCTION__ at compile-time */
679#define __FUNCTION__ (__func__) 718#define __FUNCTION__ (__func__)
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index f158eb1149aa..b8d6fffed4d8 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -25,7 +25,7 @@ enum pca9532_state {
25}; 25};
26 26
27enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED, 27enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED,
28 PCA9532_TYPE_N2100_BEEP }; 28 PCA9532_TYPE_N2100_BEEP, PCA9532_TYPE_GPIO };
29 29
30struct pca9532_led { 30struct pca9532_led {
31 u8 id; 31 u8 id;
@@ -41,6 +41,7 @@ struct pca9532_platform_data {
41 struct pca9532_led leds[16]; 41 struct pca9532_led leds[16];
42 u8 pwm[2]; 42 u8 pwm[2];
43 u8 psc[2]; 43 u8 psc[2];
44 int gpio_base;
44}; 45};
45 46
46#endif /* __LINUX_PCA9532_H */ 47#endif /* __LINUX_PCA9532_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 61e0340a4b77..5884def15a24 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -207,5 +207,7 @@ struct gpio_led_platform_data {
207 unsigned long *delay_off); 207 unsigned long *delay_off);
208}; 208};
209 209
210struct platform_device *gpio_led_register_device(
211 int id, const struct gpio_led_platform_data *pdata);
210 212
211#endif /* __LINUX_LEDS_H_INCLUDED */ 213#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 4aef1dda6406..ef820a3c378b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -487,12 +487,15 @@ static inline void print_irqtrace_events(struct task_struct *curr)
487#ifdef CONFIG_DEBUG_LOCK_ALLOC 487#ifdef CONFIG_DEBUG_LOCK_ALLOC
488# ifdef CONFIG_PROVE_LOCKING 488# ifdef CONFIG_PROVE_LOCKING
489# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 489# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
490# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
490# else 491# else
491# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) 492# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
493# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
492# endif 494# endif
493# define mutex_release(l, n, i) lock_release(l, n, i) 495# define mutex_release(l, n, i) lock_release(l, n, i)
494#else 496#else
495# define mutex_acquire(l, s, t, i) do { } while (0) 497# define mutex_acquire(l, s, t, i) do { } while (0)
498# define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
496# define mutex_release(l, n, i) do { } while (0) 499# define mutex_release(l, n, i) do { } while (0)
497#endif 500#endif
498 501
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 6a4fab7c6e09..7a71ffad037c 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -139,9 +139,9 @@ write intent log information, three of which are mentioned here.
139 * .list is on one of three lists: 139 * .list is on one of three lists:
140 * in_use: currently in use (refcnt > 0, lc_number != LC_FREE) 140 * in_use: currently in use (refcnt > 0, lc_number != LC_FREE)
141 * lru: unused but ready to be reused or recycled 141 * lru: unused but ready to be reused or recycled
142 * (ts_refcnt == 0, lc_number != LC_FREE), 142 * (lc_refcnt == 0, lc_number != LC_FREE),
143 * free: unused but ready to be recycled 143 * free: unused but ready to be recycled
144 * (ts_refcnt == 0, lc_number == LC_FREE), 144 * (lc_refcnt == 0, lc_number == LC_FREE),
145 * 145 *
146 * an element is said to be "in the active set", 146 * an element is said to be "in the active set",
147 * if either on "in_use" or "lru", i.e. lc_number != LC_FREE. 147 * if either on "in_use" or "lru", i.e. lc_number != LC_FREE.
@@ -160,8 +160,8 @@ struct lc_element {
160 struct hlist_node colision; 160 struct hlist_node colision;
161 struct list_head list; /* LRU list or free list */ 161 struct list_head list; /* LRU list or free list */
162 unsigned refcnt; 162 unsigned refcnt;
163 /* back "pointer" into ts_cache->element[index], 163 /* back "pointer" into lc_cache->element[index],
164 * for paranoia, and for "ts_element_to_index" */ 164 * for paranoia, and for "lc_element_to_index" */
165 unsigned lc_index; 165 unsigned lc_index;
166 /* if we want to track a larger set of objects, 166 /* if we want to track a larger set of objects,
167 * it needs to become arch independend u64 */ 167 * it needs to become arch independend u64 */
@@ -190,8 +190,8 @@ struct lru_cache {
190 /* Arbitrary limit on maximum tracked objects. Practical limit is much 190 /* Arbitrary limit on maximum tracked objects. Practical limit is much
191 * lower due to allocation failures, probably. For typical use cases, 191 * lower due to allocation failures, probably. For typical use cases,
192 * nr_elements should be a few thousand at most. 192 * nr_elements should be a few thousand at most.
193 * This also limits the maximum value of ts_element.ts_index, allowing the 193 * This also limits the maximum value of lc_element.lc_index, allowing the
194 * 8 high bits of .ts_index to be overloaded with flags in the future. */ 194 * 8 high bits of .lc_index to be overloaded with flags in the future. */
195#define LC_MAX_ACTIVE (1<<24) 195#define LC_MAX_ACTIVE (1<<24)
196 196
197 /* statistics */ 197 /* statistics */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 62a10c2a11f2..7525e38c434d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,6 +2,8 @@
2#define _LINUX_MEMBLOCK_H 2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#define MEMBLOCK_ERROR 0
6
5#ifdef CONFIG_HAVE_MEMBLOCK 7#ifdef CONFIG_HAVE_MEMBLOCK
6/* 8/*
7 * Logical memory blocks. 9 * Logical memory blocks.
@@ -20,7 +22,6 @@
20#include <asm/memblock.h> 22#include <asm/memblock.h>
21 23
22#define INIT_MEMBLOCK_REGIONS 128 24#define INIT_MEMBLOCK_REGIONS 128
23#define MEMBLOCK_ERROR 0
24 25
25struct memblock_region { 26struct memblock_region {
26 phys_addr_t base; 27 phys_addr_t base;
@@ -160,6 +161,12 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
160#define __initdata_memblock 161#define __initdata_memblock
161#endif 162#endif
162 163
164#else
165static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
166{
167 return MEMBLOCK_ERROR;
168}
169
163#endif /* CONFIG_HAVE_MEMBLOCK */ 170#endif /* CONFIG_HAVE_MEMBLOCK */
164 171
165#endif /* __KERNEL__ */ 172#endif /* __KERNEL__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5e9840f50980..9724a38ee69d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -20,6 +20,8 @@
20#ifndef _LINUX_MEMCONTROL_H 20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h> 22#include <linux/cgroup.h>
23#include <linux/vm_event_item.h>
24
23struct mem_cgroup; 25struct mem_cgroup;
24struct page_cgroup; 26struct page_cgroup;
25struct page; 27struct page;
@@ -106,9 +108,10 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
106 */ 108 */
107int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 109int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
108int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); 110int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
109unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 111int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
110 struct zone *zone, 112unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
111 enum lru_list lru); 113 struct zone *zone,
114 enum lru_list lru);
112struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 115struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
113 struct zone *zone); 116 struct zone *zone);
114struct zone_reclaim_stat* 117struct zone_reclaim_stat*
@@ -144,9 +147,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
144} 147}
145 148
146unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 149unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
147 gfp_t gfp_mask); 150 gfp_t gfp_mask,
151 unsigned long *total_scanned);
148u64 mem_cgroup_get_limit(struct mem_cgroup *mem); 152u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
149 153
154void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
150#ifdef CONFIG_TRANSPARENT_HUGEPAGE 155#ifdef CONFIG_TRANSPARENT_HUGEPAGE
151void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 156void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
152#endif 157#endif
@@ -302,8 +307,8 @@ mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
302} 307}
303 308
304static inline unsigned long 309static inline unsigned long
305mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, 310mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, struct zone *zone,
306 enum lru_list lru) 311 enum lru_list lru)
307{ 312{
308 return 0; 313 return 0;
309} 314}
@@ -338,7 +343,8 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
338 343
339static inline 344static inline
340unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 345unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
341 gfp_t gfp_mask) 346 gfp_t gfp_mask,
347 unsigned long *total_scanned)
342{ 348{
343 return 0; 349 return 0;
344} 350}
@@ -354,6 +360,10 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head,
354{ 360{
355} 361}
356 362
363static inline
364void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
365{
366}
357#endif /* CONFIG_CGROUP_MEM_CONT */ 367#endif /* CONFIG_CGROUP_MEM_CONT */
358 368
359#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 369#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 31ac26ca4acf..7978eec1b7d9 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -199,6 +199,9 @@ void mpol_free_shared_policy(struct shared_policy *p);
199struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 199struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
200 unsigned long idx); 200 unsigned long idx);
201 201
202struct mempolicy *get_vma_policy(struct task_struct *tsk,
203 struct vm_area_struct *vma, unsigned long addr);
204
202extern void numa_default_policy(void); 205extern void numa_default_policy(void);
203extern void numa_policy_init(void); 206extern void numa_policy_init(void);
204extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 207extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
@@ -228,10 +231,10 @@ int do_migrate_pages(struct mm_struct *mm,
228 231
229#ifdef CONFIG_TMPFS 232#ifdef CONFIG_TMPFS
230extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); 233extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
234#endif
231 235
232extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 236extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
233 int no_context); 237 int no_context);
234#endif
235 238
236/* Check if a vma is migratable */ 239/* Check if a vma is migratable */
237static inline int vma_migratable(struct vm_area_struct *vma) 240static inline int vma_migratable(struct vm_area_struct *vma)
@@ -368,13 +371,13 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
368{ 371{
369 return 1; /* error */ 372 return 1; /* error */
370} 373}
374#endif
371 375
372static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 376static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
373 int no_context) 377 int no_context)
374{ 378{
375 return 0; 379 return 0;
376} 380}
377#endif
378 381
379#endif /* CONFIG_NUMA */ 382#endif /* CONFIG_NUMA */
380#endif /* __KERNEL__ */ 383#endif /* __KERNEL__ */
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 8fba7972ff5f..63b4fb8e3b6f 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -330,6 +330,11 @@ struct pm860x_led_pdata {
330 unsigned long flags; 330 unsigned long flags;
331}; 331};
332 332
333struct pm860x_rtc_pdata {
334 int (*sync)(unsigned int ticks);
335 int vrtc;
336};
337
333struct pm860x_touch_pdata { 338struct pm860x_touch_pdata {
334 int gpadc_prebias; 339 int gpadc_prebias;
335 int slot_cycle; 340 int slot_cycle;
@@ -349,6 +354,7 @@ struct pm860x_power_pdata {
349struct pm860x_platform_data { 354struct pm860x_platform_data {
350 struct pm860x_backlight_pdata *backlight; 355 struct pm860x_backlight_pdata *backlight;
351 struct pm860x_led_pdata *led; 356 struct pm860x_led_pdata *led;
357 struct pm860x_rtc_pdata *rtc;
352 struct pm860x_touch_pdata *touch; 358 struct pm860x_touch_pdata *touch;
353 struct pm860x_power_pdata *power; 359 struct pm860x_power_pdata *power;
354 struct regulator_init_data *regulator; 360 struct regulator_init_data *regulator;
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 7d9b6ae1c203..896b5e47f16e 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -34,6 +34,13 @@
34#define AB5500_2_0 0x21 34#define AB5500_2_0 0x21
35#define AB5500_2_1 0x22 35#define AB5500_2_1 0x22
36 36
37/* AB8500 CIDs*/
38#define AB8500_CUTEARLY 0x00
39#define AB8500_CUT1P0 0x10
40#define AB8500_CUT1P1 0x11
41#define AB8500_CUT2P0 0x20
42#define AB8500_CUT3P0 0x30
43
37/* 44/*
38 * AB3100, EVENTA1, A2 and A3 event register flags 45 * AB3100, EVENTA1, A2 and A3 event register flags
39 * these are catenated into a single 32-bit flag in the code 46 * these are catenated into a single 32-bit flag in the code
@@ -186,6 +193,7 @@ struct abx500_init_settings {
186struct ab3550_platform_data { 193struct ab3550_platform_data {
187 struct {unsigned int base; unsigned int count; } irq; 194 struct {unsigned int base; unsigned int count; } irq;
188 void *dev_data[AB3550_NUM_DEVICES]; 195 void *dev_data[AB3550_NUM_DEVICES];
196 size_t dev_data_sz[AB3550_NUM_DEVICES];
189 struct abx500_init_settings *init_settings; 197 struct abx500_init_settings *init_settings;
190 unsigned int init_settings_sz; 198 unsigned int init_settings_sz;
191}; 199};
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
index de3c4ad19afb..ed793b77a1c5 100644
--- a/include/linux/mfd/asic3.h
+++ b/include/linux/mfd/asic3.h
@@ -16,6 +16,13 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18 18
19struct led_classdev;
20struct asic3_led {
21 const char *name;
22 const char *default_trigger;
23 struct led_classdev *cdev;
24};
25
19struct asic3_platform_data { 26struct asic3_platform_data {
20 u16 *gpio_config; 27 u16 *gpio_config;
21 unsigned int gpio_config_num; 28 unsigned int gpio_config_num;
@@ -23,6 +30,8 @@ struct asic3_platform_data {
23 unsigned int irq_base; 30 unsigned int irq_base;
24 31
25 unsigned int gpio_base; 32 unsigned int gpio_base;
33
34 struct asic3_led *leds;
26}; 35};
27 36
28#define ASIC3_NUM_GPIO_BANKS 4 37#define ASIC3_NUM_GPIO_BANKS 4
@@ -111,9 +120,9 @@ struct asic3_platform_data {
111#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0) 120#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0)
112#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0) 121#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0)
113#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0) 122#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0)
114#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 1, 0) 123#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0)
115#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 1, 0) 124#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0)
116#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 1, 0) 125#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0)
117#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0) 126#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0)
118#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0) 127#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0)
119#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0) 128#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0)
@@ -152,6 +161,7 @@ struct asic3_platform_data {
152#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */ 161#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */
153#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */ 162#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */
154 163
164#define ASIC3_NUM_LEDS 3
155#define ASIC3_LED_0_Base 0x0700 165#define ASIC3_LED_0_Base 0x0700
156#define ASIC3_LED_1_Base 0x0800 166#define ASIC3_LED_1_Base 0x0800
157#define ASIC3_LED_2_Base 0x0900 167#define ASIC3_LED_2_Base 0x0900
@@ -287,10 +297,17 @@ struct asic3_platform_data {
287 * 297 *
288 *****************************************************************************/ 298 *****************************************************************************/
289#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */ 299#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */
300#define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */
290#define ASIC3_SD_CTRL_BASE 0x1000 301#define ASIC3_SD_CTRL_BASE 0x1000
291#define ASIC3_SDIO_CTRL_BASE 0x1200 302#define ASIC3_SDIO_CTRL_BASE 0x1200
292 303
293#define ASIC3_MAP_SIZE_32BIT 0x2000 304#define ASIC3_MAP_SIZE_32BIT 0x2000
294#define ASIC3_MAP_SIZE_16BIT 0x1000 305#define ASIC3_MAP_SIZE_16BIT 0x1000
295 306
307/* Functions needed by leds-asic3 */
308
309struct asic3;
310extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val);
311extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg);
312
296#endif /* __ASIC3_H__ */ 313#endif /* __ASIC3_H__ */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index aef23309a742..4e76163dd862 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -33,8 +33,9 @@ struct mfd_cell {
33 int (*suspend)(struct platform_device *dev); 33 int (*suspend)(struct platform_device *dev);
34 int (*resume)(struct platform_device *dev); 34 int (*resume)(struct platform_device *dev);
35 35
36 /* mfd_data can be used to pass data to client drivers */ 36 /* platform data passed to the sub devices drivers */
37 void *mfd_data; 37 void *platform_data;
38 size_t pdata_size;
38 39
39 /* 40 /*
40 * These resources can be specified relative to the parent device. 41 * These resources can be specified relative to the parent device.
@@ -89,24 +90,6 @@ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
89 return pdev->mfd_cell; 90 return pdev->mfd_cell;
90} 91}
91 92
92/*
93 * Given a platform device that's been created by mfd_add_devices(), fetch
94 * the .mfd_data entry from the mfd_cell that created it.
95 * Otherwise just return the platform_data pointer.
96 * This maintains compatibility with platform drivers whose devices aren't
97 * created by the mfd layer, and expect platform_data to contain what would've
98 * otherwise been in mfd_data.
99 */
100static inline void *mfd_get_data(struct platform_device *pdev)
101{
102 const struct mfd_cell *cell = mfd_get_cell(pdev);
103
104 if (cell)
105 return cell->mfd_data;
106 else
107 return pdev->dev.platform_data;
108}
109
110extern int mfd_add_devices(struct device *parent, int id, 93extern int mfd_add_devices(struct device *parent, int id,
111 struct mfd_cell *cells, int n_devs, 94 struct mfd_cell *cells, int n_devs,
112 struct resource *mem_base, 95 struct resource *mem_base,
diff --git a/include/linux/mfd/db5500-prcmu.h b/include/linux/mfd/db5500-prcmu.h
new file mode 100644
index 000000000000..f0977986402c
--- /dev/null
+++ b/include/linux/mfd/db5500-prcmu.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 *
6 * U5500 PRCMU API.
7 */
8#ifndef __MACH_PRCMU_U5500_H
9#define __MACH_PRCMU_U5500_H
10
11#ifdef CONFIG_UX500_SOC_DB5500
12
13void db5500_prcmu_early_init(void);
14
15int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
16int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
17
18#else /* !CONFIG_UX500_SOC_DB5500 */
19
20static inline void db5500_prcmu_early_init(void)
21{
22}
23
24static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
25{
26 return -ENOSYS;
27}
28
29static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
30{
31 return -ENOSYS;
32}
33
34#endif /* CONFIG_UX500_SOC_DB5500 */
35
36static inline int db5500_prcmu_config_abb_event_readout(u32 abb_events)
37{
38#ifdef CONFIG_MACH_U5500_SIMULATOR
39 return 0;
40#else
41 return -1;
42#endif
43}
44
45#endif /* __MACH_PRCMU_U5500_H */
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
new file mode 100644
index 000000000000..917dbcab701c
--- /dev/null
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -0,0 +1,978 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7 *
8 * PRCMU f/w APIs
9 */
10#ifndef __MFD_DB8500_PRCMU_H
11#define __MFD_DB8500_PRCMU_H
12
13#include <linux/interrupt.h>
14#include <linux/notifier.h>
15
16/* This portion previously known as <mach/prcmu-fw-defs_v1.h> */
17
18/**
19 * enum state - ON/OFF state definition
20 * @OFF: State is ON
21 * @ON: State is OFF
22 *
23 */
24enum state {
25 OFF = 0x0,
26 ON = 0x1,
27};
28
29/**
30 * enum ret_state - general purpose On/Off/Retention states
31 *
32 */
33enum ret_state {
34 OFFST = 0,
35 ONST = 1,
36 RETST = 2
37};
38
39/**
40 * enum clk_arm - ARM Cortex A9 clock schemes
41 * @A9_OFF:
42 * @A9_BOOT:
43 * @A9_OPPT1:
44 * @A9_OPPT2:
45 * @A9_EXTCLK:
46 */
47enum clk_arm {
48 A9_OFF,
49 A9_BOOT,
50 A9_OPPT1,
51 A9_OPPT2,
52 A9_EXTCLK
53};
54
55/**
56 * enum clk_gen - GEN#0/GEN#1 clock schemes
57 * @GEN_OFF:
58 * @GEN_BOOT:
59 * @GEN_OPPT1:
60 */
61enum clk_gen {
62 GEN_OFF,
63 GEN_BOOT,
64 GEN_OPPT1,
65};
66
67/* some information between arm and xp70 */
68
69/**
70 * enum romcode_write - Romcode message written by A9 AND read by XP70
71 * @RDY_2_DS: Value set when ApDeepSleep state can be executed by XP70
72 * @RDY_2_XP70_RST: Value set when 0x0F has been successfully polled by the
73 * romcode. The xp70 will go into self-reset
74 */
75enum romcode_write {
76 RDY_2_DS = 0x09,
77 RDY_2_XP70_RST = 0x10
78};
79
80/**
81 * enum romcode_read - Romcode message written by XP70 and read by A9
82 * @INIT: Init value when romcode field is not used
83 * @FS_2_DS: Value set when power state is going from ApExecute to
84 * ApDeepSleep
85 * @END_DS: Value set when ApDeepSleep power state is reached coming from
86 * ApExecute state
87 * @DS_TO_FS: Value set when power state is going from ApDeepSleep to
88 * ApExecute
89 * @END_FS: Value set when ApExecute power state is reached coming from
90 * ApDeepSleep state
91 * @SWR: Value set when power state is going to ApReset
92 * @END_SWR: Value set when the xp70 finished executing ApReset actions and
93 * waits for romcode acknowledgment to go to self-reset
94 */
95enum romcode_read {
96 INIT = 0x00,
97 FS_2_DS = 0x0A,
98 END_DS = 0x0B,
99 DS_TO_FS = 0x0C,
100 END_FS = 0x0D,
101 SWR = 0x0E,
102 END_SWR = 0x0F
103};
104
105/**
106 * enum ap_pwrst - current power states defined in PRCMU firmware
107 * @NO_PWRST: Current power state init
108 * @AP_BOOT: Current power state is apBoot
109 * @AP_EXECUTE: Current power state is apExecute
110 * @AP_DEEP_SLEEP: Current power state is apDeepSleep
111 * @AP_SLEEP: Current power state is apSleep
112 * @AP_IDLE: Current power state is apIdle
113 * @AP_RESET: Current power state is apReset
114 */
115enum ap_pwrst {
116 NO_PWRST = 0x00,
117 AP_BOOT = 0x01,
118 AP_EXECUTE = 0x02,
119 AP_DEEP_SLEEP = 0x03,
120 AP_SLEEP = 0x04,
121 AP_IDLE = 0x05,
122 AP_RESET = 0x06
123};
124
125/**
126 * enum ap_pwrst_trans - Transition states defined in PRCMU firmware
127 * @NO_TRANSITION: No power state transition
128 * @APEXECUTE_TO_APSLEEP: Power state transition from ApExecute to ApSleep
129 * @APIDLE_TO_APSLEEP: Power state transition from ApIdle to ApSleep
130 * @APBOOT_TO_APEXECUTE: Power state transition from ApBoot to ApExecute
131 * @APEXECUTE_TO_APDEEPSLEEP: Power state transition from ApExecute to
132 * ApDeepSleep
133 * @APEXECUTE_TO_APIDLE: Power state transition from ApExecute to ApIdle
134 */
135enum ap_pwrst_trans {
136 NO_TRANSITION = 0x00,
137 APEXECUTE_TO_APSLEEP = 0x01,
138 APIDLE_TO_APSLEEP = 0x02, /* To be removed */
139 PRCMU_AP_SLEEP = 0x01,
140 APBOOT_TO_APEXECUTE = 0x03,
141 APEXECUTE_TO_APDEEPSLEEP = 0x04, /* To be removed */
142 PRCMU_AP_DEEP_SLEEP = 0x04,
143 APEXECUTE_TO_APIDLE = 0x05, /* To be removed */
144 PRCMU_AP_IDLE = 0x05,
145 PRCMU_AP_DEEP_IDLE = 0x07,
146};
147
148/**
149 * enum ddr_pwrst - DDR power states definition
150 * @DDR_PWR_STATE_UNCHANGED: SDRAM and DDR controller state is unchanged
151 * @DDR_PWR_STATE_ON:
152 * @DDR_PWR_STATE_OFFLOWLAT:
153 * @DDR_PWR_STATE_OFFHIGHLAT:
154 */
155enum ddr_pwrst {
156 DDR_PWR_STATE_UNCHANGED = 0x00,
157 DDR_PWR_STATE_ON = 0x01,
158 DDR_PWR_STATE_OFFLOWLAT = 0x02,
159 DDR_PWR_STATE_OFFHIGHLAT = 0x03
160};
161
162/**
163 * enum arm_opp - ARM OPP states definition
164 * @ARM_OPP_INIT:
165 * @ARM_NO_CHANGE: The ARM operating point is unchanged
166 * @ARM_100_OPP: The new ARM operating point is arm100opp
167 * @ARM_50_OPP: The new ARM operating point is arm50opp
168 * @ARM_MAX_OPP: Operating point is "max" (more than 100)
169 * @ARM_MAX_FREQ100OPP: Set max opp if available, else 100
170 * @ARM_EXTCLK: The new ARM operating point is armExtClk
171 */
172enum arm_opp {
173 ARM_OPP_INIT = 0x00,
174 ARM_NO_CHANGE = 0x01,
175 ARM_100_OPP = 0x02,
176 ARM_50_OPP = 0x03,
177 ARM_MAX_OPP = 0x04,
178 ARM_MAX_FREQ100OPP = 0x05,
179 ARM_EXTCLK = 0x07
180};
181
182/**
183 * enum ape_opp - APE OPP states definition
184 * @APE_OPP_INIT:
185 * @APE_NO_CHANGE: The APE operating point is unchanged
186 * @APE_100_OPP: The new APE operating point is ape100opp
187 * @APE_50_OPP: 50%
188 */
189enum ape_opp {
190 APE_OPP_INIT = 0x00,
191 APE_NO_CHANGE = 0x01,
192 APE_100_OPP = 0x02,
193 APE_50_OPP = 0x03
194};
195
196/**
197 * enum hw_acc_state - State definition for hardware accelerator
198 * @HW_NO_CHANGE: The hardware accelerator state must remain unchanged
199 * @HW_OFF: The hardware accelerator must be switched off
200 * @HW_OFF_RAMRET: The hardware accelerator must be switched off with its
201 * internal RAM in retention
202 * @HW_ON: The hwa hardware accelerator hwa must be switched on
203 *
204 * NOTE! Deprecated, to be removed when all users switched over to use the
205 * regulator API.
206 */
207enum hw_acc_state {
208 HW_NO_CHANGE = 0x00,
209 HW_OFF = 0x01,
210 HW_OFF_RAMRET = 0x02,
211 HW_ON = 0x04
212};
213
214/**
215 * enum mbox_2_arm_stat - Status messages definition for mbox_arm
216 * @BOOT_TO_EXECUTEOK: The apBoot to apExecute state transition has been
217 * completed
218 * @DEEPSLEEPOK: The apExecute to apDeepSleep state transition has been
219 * completed
220 * @SLEEPOK: The apExecute to apSleep state transition has been completed
221 * @IDLEOK: The apExecute to apIdle state transition has been completed
222 * @SOFTRESETOK: The A9 watchdog/ SoftReset state has been completed
223 * @SOFTRESETGO : The A9 watchdog/SoftReset state is on going
224 * @BOOT_TO_EXECUTE: The apBoot to apExecute state transition is on going
225 * @EXECUTE_TO_DEEPSLEEP: The apExecute to apDeepSleep state transition is on
226 * going
227 * @DEEPSLEEP_TO_EXECUTE: The apDeepSleep to apExecute state transition is on
228 * going
229 * @DEEPSLEEP_TO_EXECUTEOK: The apDeepSleep to apExecute state transition has
230 * been completed
231 * @EXECUTE_TO_SLEEP: The apExecute to apSleep state transition is on going
232 * @SLEEP_TO_EXECUTE: The apSleep to apExecute state transition is on going
233 * @SLEEP_TO_EXECUTEOK: The apSleep to apExecute state transition has been
234 * completed
235 * @EXECUTE_TO_IDLE: The apExecute to apIdle state transition is on going
236 * @IDLE_TO_EXECUTE: The apIdle to apExecute state transition is on going
237 * @IDLE_TO_EXECUTEOK: The apIdle to apExecute state transition has been
238 * completed
239 * @INIT_STATUS: Status init
240 */
241enum ap_pwrsttr_status {
242 BOOT_TO_EXECUTEOK = 0xFF,
243 DEEPSLEEPOK = 0xFE,
244 SLEEPOK = 0xFD,
245 IDLEOK = 0xFC,
246 SOFTRESETOK = 0xFB,
247 SOFTRESETGO = 0xFA,
248 BOOT_TO_EXECUTE = 0xF9,
249 EXECUTE_TO_DEEPSLEEP = 0xF8,
250 DEEPSLEEP_TO_EXECUTE = 0xF7,
251 DEEPSLEEP_TO_EXECUTEOK = 0xF6,
252 EXECUTE_TO_SLEEP = 0xF5,
253 SLEEP_TO_EXECUTE = 0xF4,
254 SLEEP_TO_EXECUTEOK = 0xF3,
255 EXECUTE_TO_IDLE = 0xF2,
256 IDLE_TO_EXECUTE = 0xF1,
257 IDLE_TO_EXECUTEOK = 0xF0,
258 RDYTODS_RETURNTOEXE = 0xEF,
259 NORDYTODS_RETURNTOEXE = 0xEE,
260 EXETOSLEEP_RETURNTOEXE = 0xED,
261 EXETOIDLE_RETURNTOEXE = 0xEC,
262 INIT_STATUS = 0xEB,
263
264 /*error messages */
265 INITERROR = 0x00,
266 PLLARMLOCKP_ER = 0x01,
267 PLLDDRLOCKP_ER = 0x02,
268 PLLSOCLOCKP_ER = 0x03,
269 PLLSOCK1LOCKP_ER = 0x04,
270 ARMWFI_ER = 0x05,
271 SYSCLKOK_ER = 0x06,
272 I2C_NACK_DATA_ER = 0x07,
273 BOOT_ER = 0x08,
274 I2C_STATUS_ALWAYS_1 = 0x0A,
275 I2C_NACK_REG_ADDR_ER = 0x0B,
276 I2C_NACK_DATA0123_ER = 0x1B,
277 I2C_NACK_ADDR_ER = 0x1F,
278 CURAPPWRSTISNOT_BOOT = 0x20,
279 CURAPPWRSTISNOT_EXECUTE = 0x21,
280 CURAPPWRSTISNOT_SLEEPMODE = 0x22,
281 CURAPPWRSTISNOT_CORRECTFORIT10 = 0x23,
282 FIFO4500WUISNOT_WUPEVENT = 0x24,
283 PLL32KLOCKP_ER = 0x29,
284 DDRDEEPSLEEPOK_ER = 0x2A,
285 ROMCODEREADY_ER = 0x50,
286 WUPBEFOREDS = 0x51,
287 DDRCONFIG_ER = 0x52,
288 WUPBEFORESLEEP = 0x53,
289 WUPBEFOREIDLE = 0x54
290}; /* earlier called as mbox_2_arm_stat */
291
292/**
293 * enum dvfs_stat - DVFS status messages definition
294 * @DVFS_GO: A state transition DVFS is on going
295 * @DVFS_ARM100OPPOK: The state transition DVFS has been completed for 100OPP
296 * @DVFS_ARM50OPPOK: The state transition DVFS has been completed for 50OPP
297 * @DVFS_ARMEXTCLKOK: The state transition DVFS has been completed for EXTCLK
298 * @DVFS_NOCHGTCLKOK: The state transition DVFS has been completed for
299 * NOCHGCLK
300 * @DVFS_INITSTATUS: Value init
301 */
302enum dvfs_stat {
303 DVFS_GO = 0xFF,
304 DVFS_ARM100OPPOK = 0xFE,
305 DVFS_ARM50OPPOK = 0xFD,
306 DVFS_ARMEXTCLKOK = 0xFC,
307 DVFS_NOCHGTCLKOK = 0xFB,
308 DVFS_INITSTATUS = 0x00
309};
310
311/**
312 * enum sva_mmdsp_stat - SVA MMDSP status messages
313 * @SVA_MMDSP_GO: SVAMMDSP interrupt has happened
314 * @SVA_MMDSP_INIT: Status init
315 */
316enum sva_mmdsp_stat {
317 SVA_MMDSP_GO = 0xFF,
318 SVA_MMDSP_INIT = 0x00
319};
320
321/**
322 * enum sia_mmdsp_stat - SIA MMDSP status messages
323 * @SIA_MMDSP_GO: SIAMMDSP interrupt has happened
324 * @SIA_MMDSP_INIT: Status init
325 */
326enum sia_mmdsp_stat {
327 SIA_MMDSP_GO = 0xFF,
328 SIA_MMDSP_INIT = 0x00
329};
330
331/**
332 * enum mbox_to_arm_err - Error messages definition
333 * @INIT_ERR: Init value
334 * @PLLARMLOCKP_ERR: PLLARM has not been correctly locked in given time
335 * @PLLDDRLOCKP_ERR: PLLDDR has not been correctly locked in the given time
336 * @PLLSOC0LOCKP_ERR: PLLSOC0 has not been correctly locked in the given time
337 * @PLLSOC1LOCKP_ERR: PLLSOC1 has not been correctly locked in the given time
338 * @ARMWFI_ERR: The ARM WFI has not been correctly executed in the given time
339 * @SYSCLKOK_ERR: The SYSCLK is not available in the given time
340 * @BOOT_ERR: Romcode has not validated the XP70 self reset in the given time
341 * @ROMCODESAVECONTEXT: The Romcode didn.t correctly save it secure context
342 * @VARMHIGHSPEEDVALTO_ERR: The ARM high speed supply value transfered
343 * through I2C has not been correctly executed in the given time
344 * @VARMHIGHSPEEDACCESS_ERR: The command value of VarmHighSpeedVal transfered
345 * through I2C has not been correctly executed in the given time
346 * @VARMLOWSPEEDVALTO_ERR:The ARM low speed supply value transfered through
347 * I2C has not been correctly executed in the given time
348 * @VARMLOWSPEEDACCESS_ERR: The command value of VarmLowSpeedVal transfered
349 * through I2C has not been correctly executed in the given time
350 * @VARMRETENTIONVALTO_ERR: The ARM retention supply value transfered through
351 * I2C has not been correctly executed in the given time
352 * @VARMRETENTIONACCESS_ERR: The command value of VarmRetentionVal transfered
353 * through I2C has not been correctly executed in the given time
354 * @VAPEHIGHSPEEDVALTO_ERR: The APE highspeed supply value transfered through
355 * I2C has not been correctly executed in the given time
356 * @VSAFEHPVALTO_ERR: The SAFE high power supply value transfered through I2C
357 * has not been correctly executed in the given time
358 * @VMODSEL1VALTO_ERR: The MODEM sel1 supply value transfered through I2C has
359 * not been correctly executed in the given time
360 * @VMODSEL2VALTO_ERR: The MODEM sel2 supply value transfered through I2C has
361 * not been correctly executed in the given time
362 * @VARMOFFACCESS_ERR: The command value of Varm ON/OFF transfered through
363 * I2C has not been correctly executed in the given time
364 * @VAPEOFFACCESS_ERR: The command value of Vape ON/OFF transfered through
365 * I2C has not been correctly executed in the given time
366 * @VARMRETACCES_ERR: The command value of Varm retention ON/OFF transfered
367 * through I2C has not been correctly executed in the given time
368 * @CURAPPWRSTISNOTBOOT:Generated when Arm want to do power state transition
369 * ApBoot to ApExecute but the power current state is not Apboot
370 * @CURAPPWRSTISNOTEXECUTE: Generated when Arm want to do power state
371 * transition from ApExecute to others power state but the
372 * power current state is not ApExecute
373 * @CURAPPWRSTISNOTSLEEPMODE: Generated when wake up events are transmitted
374 * but the power current state is not ApDeepSleep/ApSleep/ApIdle
375 * @CURAPPWRSTISNOTCORRECTDBG: Generated when wake up events are transmitted
376 * but the power current state is not correct
377 * @ARMREGU1VALTO_ERR:The ArmRegu1 value transferred through I2C has not
378 * been correctly executed in the given time
379 * @ARMREGU2VALTO_ERR: The ArmRegu2 value transferred through I2C has not
380 * been correctly executed in the given time
381 * @VAPEREGUVALTO_ERR: The VApeRegu value transfered through I2C has not
382 * been correctly executed in the given time
383 * @VSMPS3REGUVALTO_ERR: The VSmps3Regu value transfered through I2C has not
384 * been correctly executed in the given time
385 * @VMODREGUVALTO_ERR: The VModemRegu value transfered through I2C has not
386 * been correctly executed in the given time
387 */
388enum mbox_to_arm_err {
389 INIT_ERR = 0x00,
390 PLLARMLOCKP_ERR = 0x01,
391 PLLDDRLOCKP_ERR = 0x02,
392 PLLSOC0LOCKP_ERR = 0x03,
393 PLLSOC1LOCKP_ERR = 0x04,
394 ARMWFI_ERR = 0x05,
395 SYSCLKOK_ERR = 0x06,
396 BOOT_ERR = 0x07,
397 ROMCODESAVECONTEXT = 0x08,
398 VARMHIGHSPEEDVALTO_ERR = 0x10,
399 VARMHIGHSPEEDACCESS_ERR = 0x11,
400 VARMLOWSPEEDVALTO_ERR = 0x12,
401 VARMLOWSPEEDACCESS_ERR = 0x13,
402 VARMRETENTIONVALTO_ERR = 0x14,
403 VARMRETENTIONACCESS_ERR = 0x15,
404 VAPEHIGHSPEEDVALTO_ERR = 0x16,
405 VSAFEHPVALTO_ERR = 0x17,
406 VMODSEL1VALTO_ERR = 0x18,
407 VMODSEL2VALTO_ERR = 0x19,
408 VARMOFFACCESS_ERR = 0x1A,
409 VAPEOFFACCESS_ERR = 0x1B,
410 VARMRETACCES_ERR = 0x1C,
411 CURAPPWRSTISNOTBOOT = 0x20,
412 CURAPPWRSTISNOTEXECUTE = 0x21,
413 CURAPPWRSTISNOTSLEEPMODE = 0x22,
414 CURAPPWRSTISNOTCORRECTDBG = 0x23,
415 ARMREGU1VALTO_ERR = 0x24,
416 ARMREGU2VALTO_ERR = 0x25,
417 VAPEREGUVALTO_ERR = 0x26,
418 VSMPS3REGUVALTO_ERR = 0x27,
419 VMODREGUVALTO_ERR = 0x28
420};
421
422enum hw_acc {
423 SVAMMDSP = 0,
424 SVAPIPE = 1,
425 SIAMMDSP = 2,
426 SIAPIPE = 3,
427 SGA = 4,
428 B2R2MCDE = 5,
429 ESRAM12 = 6,
430 ESRAM34 = 7,
431};
432
433enum cs_pwrmgt {
434 PWRDNCS0 = 0,
435 WKUPCS0 = 1,
436 PWRDNCS1 = 2,
437 WKUPCS1 = 3
438};
439
440/* Defs related to autonomous power management */
441
442/**
443 * enum sia_sva_pwr_policy - Power policy
444 * @NO_CHGT: No change
445 * @DSPOFF_HWPOFF:
446 * @DSPOFFRAMRET_HWPOFF:
447 * @DSPCLKOFF_HWPOFF:
448 * @DSPCLKOFF_HWPCLKOFF:
449 *
450 */
451enum sia_sva_pwr_policy {
452 NO_CHGT = 0x0,
453 DSPOFF_HWPOFF = 0x1,
454 DSPOFFRAMRET_HWPOFF = 0x2,
455 DSPCLKOFF_HWPOFF = 0x3,
456 DSPCLKOFF_HWPCLKOFF = 0x4,
457};
458
459/**
460 * enum auto_enable - Auto Power enable
461 * @AUTO_OFF:
462 * @AUTO_ON:
463 *
464 */
465enum auto_enable {
466 AUTO_OFF = 0x0,
467 AUTO_ON = 0x1,
468};
469
470/* End of file previously known as prcmu-fw-defs_v1.h */
471
472/* PRCMU Wakeup defines */
473enum prcmu_wakeup_index {
474 PRCMU_WAKEUP_INDEX_RTC,
475 PRCMU_WAKEUP_INDEX_RTT0,
476 PRCMU_WAKEUP_INDEX_RTT1,
477 PRCMU_WAKEUP_INDEX_HSI0,
478 PRCMU_WAKEUP_INDEX_HSI1,
479 PRCMU_WAKEUP_INDEX_USB,
480 PRCMU_WAKEUP_INDEX_ABB,
481 PRCMU_WAKEUP_INDEX_ABB_FIFO,
482 PRCMU_WAKEUP_INDEX_ARM,
483 NUM_PRCMU_WAKEUP_INDICES
484};
485#define PRCMU_WAKEUP(_name) (BIT(PRCMU_WAKEUP_INDEX_##_name))
486
487/* PRCMU QoS APE OPP class */
488#define PRCMU_QOS_APE_OPP 1
489#define PRCMU_QOS_DDR_OPP 2
490#define PRCMU_QOS_DEFAULT_VALUE -1
491
492/**
493 * enum hw_acc_dev - enum for hw accelerators
494 * @HW_ACC_SVAMMDSP: for SVAMMDSP
495 * @HW_ACC_SVAPIPE: for SVAPIPE
496 * @HW_ACC_SIAMMDSP: for SIAMMDSP
497 * @HW_ACC_SIAPIPE: for SIAPIPE
498 * @HW_ACC_SGA: for SGA
499 * @HW_ACC_B2R2: for B2R2
500 * @HW_ACC_MCDE: for MCDE
501 * @HW_ACC_ESRAM1: for ESRAM1
502 * @HW_ACC_ESRAM2: for ESRAM2
503 * @HW_ACC_ESRAM3: for ESRAM3
504 * @HW_ACC_ESRAM4: for ESRAM4
505 * @NUM_HW_ACC: number of hardware accelerators
506 *
507 * Different hw accelerators which can be turned ON/
508 * OFF or put into retention (MMDSPs and ESRAMs).
509 * Used with EPOD API.
510 *
511 * NOTE! Deprecated, to be removed when all users switched over to use the
512 * regulator API.
513 */
514enum hw_acc_dev {
515 HW_ACC_SVAMMDSP,
516 HW_ACC_SVAPIPE,
517 HW_ACC_SIAMMDSP,
518 HW_ACC_SIAPIPE,
519 HW_ACC_SGA,
520 HW_ACC_B2R2,
521 HW_ACC_MCDE,
522 HW_ACC_ESRAM1,
523 HW_ACC_ESRAM2,
524 HW_ACC_ESRAM3,
525 HW_ACC_ESRAM4,
526 NUM_HW_ACC
527};
528
529/*
530 * Ids for all EPODs (power domains)
531 * - EPOD_ID_SVAMMDSP: power domain for SVA MMDSP
532 * - EPOD_ID_SVAPIPE: power domain for SVA pipe
533 * - EPOD_ID_SIAMMDSP: power domain for SIA MMDSP
534 * - EPOD_ID_SIAPIPE: power domain for SIA pipe
535 * - EPOD_ID_SGA: power domain for SGA
536 * - EPOD_ID_B2R2_MCDE: power domain for B2R2 and MCDE
537 * - EPOD_ID_ESRAM12: power domain for ESRAM 1 and 2
538 * - EPOD_ID_ESRAM34: power domain for ESRAM 3 and 4
539 * - NUM_EPOD_ID: number of power domains
540 */
541#define EPOD_ID_SVAMMDSP 0
542#define EPOD_ID_SVAPIPE 1
543#define EPOD_ID_SIAMMDSP 2
544#define EPOD_ID_SIAPIPE 3
545#define EPOD_ID_SGA 4
546#define EPOD_ID_B2R2_MCDE 5
547#define EPOD_ID_ESRAM12 6
548#define EPOD_ID_ESRAM34 7
549#define NUM_EPOD_ID 8
550
551/*
552 * state definition for EPOD (power domain)
553 * - EPOD_STATE_NO_CHANGE: The EPOD should remain unchanged
554 * - EPOD_STATE_OFF: The EPOD is switched off
555 * - EPOD_STATE_RAMRET: The EPOD is switched off with its internal RAM in
556 * retention
557 * - EPOD_STATE_ON_CLK_OFF: The EPOD is switched on, clock is still off
558 * - EPOD_STATE_ON: Same as above, but with clock enabled
559 */
560#define EPOD_STATE_NO_CHANGE 0x00
561#define EPOD_STATE_OFF 0x01
562#define EPOD_STATE_RAMRET 0x02
563#define EPOD_STATE_ON_CLK_OFF 0x03
564#define EPOD_STATE_ON 0x04
565
566/*
567 * CLKOUT sources
568 */
569#define PRCMU_CLKSRC_CLK38M 0x00
570#define PRCMU_CLKSRC_ACLK 0x01
571#define PRCMU_CLKSRC_SYSCLK 0x02
572#define PRCMU_CLKSRC_LCDCLK 0x03
573#define PRCMU_CLKSRC_SDMMCCLK 0x04
574#define PRCMU_CLKSRC_TVCLK 0x05
575#define PRCMU_CLKSRC_TIMCLK 0x06
576#define PRCMU_CLKSRC_CLK009 0x07
577/* These are only valid for CLKOUT1: */
578#define PRCMU_CLKSRC_SIAMMDSPCLK 0x40
579#define PRCMU_CLKSRC_I2CCLK 0x41
580#define PRCMU_CLKSRC_MSP02CLK 0x42
581#define PRCMU_CLKSRC_ARMPLL_OBSCLK 0x43
582#define PRCMU_CLKSRC_HSIRXCLK 0x44
583#define PRCMU_CLKSRC_HSITXCLK 0x45
584#define PRCMU_CLKSRC_ARMCLKFIX 0x46
585#define PRCMU_CLKSRC_HDMICLK 0x47
586
587/*
588 * Definitions for autonomous power management configuration.
589 */
590
591#define PRCMU_AUTO_PM_OFF 0
592#define PRCMU_AUTO_PM_ON 1
593
594#define PRCMU_AUTO_PM_POWER_ON_HSEM BIT(0)
595#define PRCMU_AUTO_PM_POWER_ON_ABB_FIFO_IT BIT(1)
596
597enum prcmu_auto_pm_policy {
598 PRCMU_AUTO_PM_POLICY_NO_CHANGE,
599 PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF,
600 PRCMU_AUTO_PM_POLICY_DSP_OFF_RAMRET_HWP_OFF,
601 PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_OFF,
602 PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_CLK_OFF,
603};
604
605/**
606 * struct prcmu_auto_pm_config - Autonomous power management configuration.
607 * @sia_auto_pm_enable: SIA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON})
608 * @sia_power_on: SIA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask)
609 * @sia_policy: SIA power policy. (enum prcmu_auto_pm_policy)
610 * @sva_auto_pm_enable: SVA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON})
611 * @sva_power_on: SVA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask)
612 * @sva_policy: SVA power policy. (enum prcmu_auto_pm_policy)
613 */
614struct prcmu_auto_pm_config {
615 u8 sia_auto_pm_enable;
616 u8 sia_power_on;
617 u8 sia_policy;
618 u8 sva_auto_pm_enable;
619 u8 sva_power_on;
620 u8 sva_policy;
621};
622
623/**
624 * enum ddr_opp - DDR OPP states definition
625 * @DDR_100_OPP: The new DDR operating point is ddr100opp
626 * @DDR_50_OPP: The new DDR operating point is ddr50opp
627 * @DDR_25_OPP: The new DDR operating point is ddr25opp
628 */
629enum ddr_opp {
630 DDR_100_OPP = 0x00,
631 DDR_50_OPP = 0x01,
632 DDR_25_OPP = 0x02,
633};
634
635/*
636 * Clock identifiers.
637 */
638enum prcmu_clock {
639 PRCMU_SGACLK,
640 PRCMU_UARTCLK,
641 PRCMU_MSP02CLK,
642 PRCMU_MSP1CLK,
643 PRCMU_I2CCLK,
644 PRCMU_SDMMCCLK,
645 PRCMU_SLIMCLK,
646 PRCMU_PER1CLK,
647 PRCMU_PER2CLK,
648 PRCMU_PER3CLK,
649 PRCMU_PER5CLK,
650 PRCMU_PER6CLK,
651 PRCMU_PER7CLK,
652 PRCMU_LCDCLK,
653 PRCMU_BMLCLK,
654 PRCMU_HSITXCLK,
655 PRCMU_HSIRXCLK,
656 PRCMU_HDMICLK,
657 PRCMU_APEATCLK,
658 PRCMU_APETRACECLK,
659 PRCMU_MCDECLK,
660 PRCMU_IPI2CCLK,
661 PRCMU_DSIALTCLK,
662 PRCMU_DMACLK,
663 PRCMU_B2R2CLK,
664 PRCMU_TVCLK,
665 PRCMU_SSPCLK,
666 PRCMU_RNGCLK,
667 PRCMU_UICCCLK,
668 PRCMU_NUM_REG_CLOCKS,
669 PRCMU_SYSCLK = PRCMU_NUM_REG_CLOCKS,
670 PRCMU_TIMCLK,
671};
672
673/*
674 * Definitions for controlling ESRAM0 in deep sleep.
675 */
676#define ESRAM0_DEEP_SLEEP_STATE_OFF 1
677#define ESRAM0_DEEP_SLEEP_STATE_RET 2
678
679#ifdef CONFIG_MFD_DB8500_PRCMU
680void __init prcmu_early_init(void);
681int prcmu_set_display_clocks(void);
682int prcmu_disable_dsipll(void);
683int prcmu_enable_dsipll(void);
684#else
685static inline void __init prcmu_early_init(void) {}
686#endif
687
688#ifdef CONFIG_MFD_DB8500_PRCMU
689
690int prcmu_set_rc_a2p(enum romcode_write);
691enum romcode_read prcmu_get_rc_p2a(void);
692enum ap_pwrst prcmu_get_xp70_current_state(void);
693int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll);
694
695void prcmu_enable_wakeups(u32 wakeups);
696static inline void prcmu_disable_wakeups(void)
697{
698 prcmu_enable_wakeups(0);
699}
700
701void prcmu_config_abb_event_readout(u32 abb_events);
702void prcmu_get_abb_event_buffer(void __iomem **buf);
703int prcmu_set_arm_opp(u8 opp);
704int prcmu_get_arm_opp(void);
705bool prcmu_has_arm_maxopp(void);
706bool prcmu_is_u8400(void);
707int prcmu_set_ape_opp(u8 opp);
708int prcmu_get_ape_opp(void);
709int prcmu_request_ape_opp_100_voltage(bool enable);
710int prcmu_release_usb_wakeup_state(void);
711int prcmu_set_ddr_opp(u8 opp);
712int prcmu_get_ddr_opp(void);
713unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
714void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
715/* NOTE! Use regulator framework instead */
716int prcmu_set_hwacc(u16 hw_acc_dev, u8 state);
717int prcmu_set_epod(u16 epod_id, u8 epod_state);
718void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
719 struct prcmu_auto_pm_config *idle);
720bool prcmu_is_auto_pm_enabled(void);
721
722int prcmu_config_clkout(u8 clkout, u8 source, u8 div);
723int prcmu_request_clock(u8 clock, bool enable);
724int prcmu_set_clock_divider(u8 clock, u8 divider);
725int prcmu_config_esram0_deep_sleep(u8 state);
726int prcmu_config_hotdog(u8 threshold);
727int prcmu_config_hotmon(u8 low, u8 high);
728int prcmu_start_temp_sense(u16 cycles32k);
729int prcmu_stop_temp_sense(void);
730int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
731int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
732
733void prcmu_ac_wake_req(void);
734void prcmu_ac_sleep_req(void);
735void prcmu_system_reset(u16 reset_code);
736void prcmu_modem_reset(void);
737bool prcmu_is_ac_wake_requested(void);
738void prcmu_enable_spi2(void);
739void prcmu_disable_spi2(void);
740
741#else /* !CONFIG_MFD_DB8500_PRCMU */
742
743static inline int prcmu_set_rc_a2p(enum romcode_write code)
744{
745 return 0;
746}
747
748static inline enum romcode_read prcmu_get_rc_p2a(void)
749{
750 return INIT;
751}
752
753static inline enum ap_pwrst prcmu_get_xp70_current_state(void)
754{
755 return AP_EXECUTE;
756}
757
758static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
759 bool keep_ap_pll)
760{
761 return 0;
762}
763
764static inline void prcmu_enable_wakeups(u32 wakeups) {}
765
766static inline void prcmu_disable_wakeups(void) {}
767
768static inline void prcmu_config_abb_event_readout(u32 abb_events) {}
769
770static inline int prcmu_set_arm_opp(u8 opp)
771{
772 return 0;
773}
774
775static inline int prcmu_get_arm_opp(void)
776{
777 return ARM_100_OPP;
778}
779
780static bool prcmu_has_arm_maxopp(void)
781{
782 return false;
783}
784
785static bool prcmu_is_u8400(void)
786{
787 return false;
788}
789
790static inline int prcmu_set_ape_opp(u8 opp)
791{
792 return 0;
793}
794
795static inline int prcmu_get_ape_opp(void)
796{
797 return APE_100_OPP;
798}
799
800static inline int prcmu_request_ape_opp_100_voltage(bool enable)
801{
802 return 0;
803}
804
805static inline int prcmu_release_usb_wakeup_state(void)
806{
807 return 0;
808}
809
810static inline int prcmu_set_ddr_opp(u8 opp)
811{
812 return 0;
813}
814
815static inline int prcmu_get_ddr_opp(void)
816{
817 return DDR_100_OPP;
818}
819
820static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
821{
822 return 0;
823}
824
825static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
826
827static inline int prcmu_set_hwacc(u16 hw_acc_dev, u8 state)
828{
829 return 0;
830}
831
832static inline void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
833 struct prcmu_auto_pm_config *idle)
834{
835}
836
837static inline bool prcmu_is_auto_pm_enabled(void)
838{
839 return false;
840}
841
842static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
843{
844 return 0;
845}
846
847static inline int prcmu_request_clock(u8 clock, bool enable)
848{
849 return 0;
850}
851
852static inline int prcmu_set_clock_divider(u8 clock, u8 divider)
853{
854 return 0;
855}
856
857int prcmu_config_esram0_deep_sleep(u8 state)
858{
859 return 0;
860}
861
862static inline int prcmu_config_hotdog(u8 threshold)
863{
864 return 0;
865}
866
867static inline int prcmu_config_hotmon(u8 low, u8 high)
868{
869 return 0;
870}
871
872static inline int prcmu_start_temp_sense(u16 cycles32k)
873{
874 return 0;
875}
876
877static inline int prcmu_stop_temp_sense(void)
878{
879 return 0;
880}
881
882static inline int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
883{
884 return -ENOSYS;
885}
886
887static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
888{
889 return -ENOSYS;
890}
891
892static inline void prcmu_ac_wake_req(void) {}
893
894static inline void prcmu_ac_sleep_req(void) {}
895
896static inline void prcmu_system_reset(u16 reset_code) {}
897
898static inline void prcmu_modem_reset(void) {}
899
900static inline bool prcmu_is_ac_wake_requested(void)
901{
902 return false;
903}
904
905#ifndef CONFIG_UX500_SOC_DB5500
906static inline int prcmu_set_display_clocks(void)
907{
908 return 0;
909}
910
911static inline int prcmu_disable_dsipll(void)
912{
913 return 0;
914}
915
916static inline int prcmu_enable_dsipll(void)
917{
918 return 0;
919}
920#endif
921
922static inline int prcmu_enable_spi2(void)
923{
924 return 0;
925}
926
927static inline int prcmu_disable_spi2(void)
928{
929 return 0;
930}
931
932#endif /* !CONFIG_MFD_DB8500_PRCMU */
933
934#ifdef CONFIG_UX500_PRCMU_QOS_POWER
935int prcmu_qos_requirement(int pm_qos_class);
936int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
937int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
938void prcmu_qos_remove_requirement(int pm_qos_class, char *name);
939int prcmu_qos_add_notifier(int prcmu_qos_class,
940 struct notifier_block *notifier);
941int prcmu_qos_remove_notifier(int prcmu_qos_class,
942 struct notifier_block *notifier);
943#else
944static inline int prcmu_qos_requirement(int prcmu_qos_class)
945{
946 return 0;
947}
948
949static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
950 char *name, s32 value)
951{
952 return 0;
953}
954
955static inline int prcmu_qos_update_requirement(int prcmu_qos_class,
956 char *name, s32 new_value)
957{
958 return 0;
959}
960
961static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
962{
963}
964
965static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
966 struct notifier_block *notifier)
967{
968 return 0;
969}
970static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
971 struct notifier_block *notifier)
972{
973 return 0;
974}
975
976#endif
977
978#endif /* __MFD_DB8500_PRCMU_H */
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 69d1010e2e51..5ff2400ad46c 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -311,10 +311,6 @@ enum max8997_irq {
311 MAX8997_IRQ_NR, 311 MAX8997_IRQ_NR,
312}; 312};
313 313
314#define MAX8997_REG_BUCK1DVS(x) (MAX8997_REG_BUCK1DVS1 + (x) - 1)
315#define MAX8997_REG_BUCK2DVS(x) (MAX8997_REG_BUCK2DVS1 + (x) - 1)
316#define MAX8997_REG_BUCK5DVS(x) (MAX8997_REG_BUCK5DVS1 + (x) - 1)
317
318#define MAX8997_NUM_GPIO 12 314#define MAX8997_NUM_GPIO 12
319struct max8997_dev { 315struct max8997_dev {
320 struct device *dev; 316 struct device *dev;
diff --git a/include/linux/mfd/pm8xxx/core.h b/include/linux/mfd/pm8xxx/core.h
new file mode 100644
index 000000000000..bd2f4f64e931
--- /dev/null
+++ b/include/linux/mfd/pm8xxx/core.h
@@ -0,0 +1,81 @@
1/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13/*
14 * Qualcomm PMIC 8xxx driver header file
15 *
16 */
17
18#ifndef __MFD_PM8XXX_CORE_H
19#define __MFD_PM8XXX_CORE_H
20
21#include <linux/mfd/core.h>
22
23struct pm8xxx_drvdata {
24 int (*pmic_readb) (const struct device *dev, u16 addr, u8 *val);
25 int (*pmic_writeb) (const struct device *dev, u16 addr, u8 val);
26 int (*pmic_read_buf) (const struct device *dev, u16 addr, u8 *buf,
27 int n);
28 int (*pmic_write_buf) (const struct device *dev, u16 addr, u8 *buf,
29 int n);
30 int (*pmic_read_irq_stat) (const struct device *dev, int irq);
31 void *pm_chip_data;
32};
33
34static inline int pm8xxx_readb(const struct device *dev, u16 addr, u8 *val)
35{
36 struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
37
38 if (!dd)
39 return -EINVAL;
40 return dd->pmic_readb(dev, addr, val);
41}
42
43static inline int pm8xxx_writeb(const struct device *dev, u16 addr, u8 val)
44{
45 struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
46
47 if (!dd)
48 return -EINVAL;
49 return dd->pmic_writeb(dev, addr, val);
50}
51
52static inline int pm8xxx_read_buf(const struct device *dev, u16 addr, u8 *buf,
53 int n)
54{
55 struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
56
57 if (!dd)
58 return -EINVAL;
59 return dd->pmic_read_buf(dev, addr, buf, n);
60}
61
62static inline int pm8xxx_write_buf(const struct device *dev, u16 addr, u8 *buf,
63 int n)
64{
65 struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
66
67 if (!dd)
68 return -EINVAL;
69 return dd->pmic_write_buf(dev, addr, buf, n);
70}
71
72static inline int pm8xxx_read_irq_stat(const struct device *dev, int irq)
73{
74 struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
75
76 if (!dd)
77 return -EINVAL;
78 return dd->pmic_read_irq_stat(dev, irq);
79}
80
81#endif
diff --git a/include/linux/mfd/pm8xxx/irq.h b/include/linux/mfd/pm8xxx/irq.h
new file mode 100644
index 000000000000..4b21769f4483
--- /dev/null
+++ b/include/linux/mfd/pm8xxx/irq.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13/*
14 * Qualcomm PMIC irq 8xxx driver header file
15 *
16 */
17
18#ifndef __MFD_PM8XXX_IRQ_H
19#define __MFD_PM8XXX_IRQ_H
20
21#include <linux/errno.h>
22#include <linux/err.h>
23
24struct pm8xxx_irq_core_data {
25 u32 rev;
26 int nirqs;
27};
28
29struct pm8xxx_irq_platform_data {
30 int irq_base;
31 struct pm8xxx_irq_core_data irq_cdata;
32 int devirq;
33 int irq_trigger_flag;
34};
35
36struct pm_irq_chip;
37
38#ifdef CONFIG_MFD_PM8XXX_IRQ
39int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq);
40struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev,
41 const struct pm8xxx_irq_platform_data *pdata);
42int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip);
43#else
44static inline int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq)
45{
46 return -ENXIO;
47}
48static inline struct pm_irq_chip * __devinit pm8xxx_irq_init(
49 const struct device *dev,
50 const struct pm8xxx_irq_platform_data *pdata)
51{
52 return ERR_PTR(-ENXIO);
53}
54static inline int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip)
55{
56 return -ENXIO;
57}
58#endif /* CONFIG_MFD_PM8XXX_IRQ */
59#endif /* __MFD_PM8XXX_IRQ_H */
diff --git a/include/linux/mfd/pm8xxx/pm8921.h b/include/linux/mfd/pm8xxx/pm8921.h
new file mode 100644
index 000000000000..d5517fd32d1b
--- /dev/null
+++ b/include/linux/mfd/pm8xxx/pm8921.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13/*
14 * Qualcomm PMIC 8921 driver header file
15 *
16 */
17
18#ifndef __MFD_PM8921_H
19#define __MFD_PM8921_H
20
21#include <linux/device.h>
22#include <linux/mfd/pm8xxx/irq.h>
23
24#define PM8921_NR_IRQS 256
25
26struct pm8921_platform_data {
27 int irq_base;
28 struct pm8xxx_irq_platform_data *irq_pdata;
29};
30
31#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8e70310ee945..5a90266c3a5a 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -4,6 +4,7 @@
4#include <linux/fb.h> 4#include <linux/fb.h>
5#include <linux/io.h> 5#include <linux/io.h>
6#include <linux/platform_device.h> 6#include <linux/platform_device.h>
7#include <linux/pm_runtime.h>
7 8
8#define tmio_ioread8(addr) readb(addr) 9#define tmio_ioread8(addr) readb(addr)
9#define tmio_ioread16(addr) readw(addr) 10#define tmio_ioread16(addr) readw(addr)
@@ -61,6 +62,12 @@
61 * Some controllers can support SDIO IRQ signalling. 62 * Some controllers can support SDIO IRQ signalling.
62 */ 63 */
63#define TMIO_MMC_SDIO_IRQ (1 << 2) 64#define TMIO_MMC_SDIO_IRQ (1 << 2)
65/*
66 * Some platforms can detect card insertion events with controller powered
67 * down, in which case they have to call tmio_mmc_cd_wakeup() to power up the
68 * controller and report the event to the driver.
69 */
70#define TMIO_MMC_HAS_COLD_CD (1 << 3)
64 71
65int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); 72int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
66int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); 73int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -82,11 +89,21 @@ struct tmio_mmc_data {
82 unsigned long flags; 89 unsigned long flags;
83 u32 ocr_mask; /* available voltages */ 90 u32 ocr_mask; /* available voltages */
84 struct tmio_mmc_dma *dma; 91 struct tmio_mmc_dma *dma;
92 struct device *dev;
93 bool power;
85 void (*set_pwr)(struct platform_device *host, int state); 94 void (*set_pwr)(struct platform_device *host, int state);
86 void (*set_clk_div)(struct platform_device *host, int state); 95 void (*set_clk_div)(struct platform_device *host, int state);
87 int (*get_cd)(struct platform_device *host); 96 int (*get_cd)(struct platform_device *host);
88}; 97};
89 98
99static inline void tmio_mmc_cd_wakeup(struct tmio_mmc_data *pdata)
100{
101 if (pdata && !pdata->power) {
102 pdata->power = true;
103 pm_runtime_get(pdata->dev);
104 }
105}
106
90/* 107/*
91 * data for the NAND controller 108 * data for the NAND controller
92 */ 109 */
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
new file mode 100644
index 000000000000..8bb85b930c07
--- /dev/null
+++ b/include/linux/mfd/tps65910.h
@@ -0,0 +1,800 @@
1/*
2 * tps65910.h -- TI TPS6591x
3 *
4 * Copyright 2010-2011 Texas Instruments Inc.
5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
8 * Author: Arnaud Deconinck <a-deconinck@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16
17#ifndef __LINUX_MFD_TPS65910_H
18#define __LINUX_MFD_TPS65910_H
19
20/* TPS chip id list */
21#define TPS65910 0
22#define TPS65911 1
23
24/* TPS regulator type list */
25#define REGULATOR_LDO 0
26#define REGULATOR_DCDC 1
27
28/*
29 * List of registers for component TPS65910
30 *
31 */
32
33#define TPS65910_SECONDS 0x0
34#define TPS65910_MINUTES 0x1
35#define TPS65910_HOURS 0x2
36#define TPS65910_DAYS 0x3
37#define TPS65910_MONTHS 0x4
38#define TPS65910_YEARS 0x5
39#define TPS65910_WEEKS 0x6
40#define TPS65910_ALARM_SECONDS 0x8
41#define TPS65910_ALARM_MINUTES 0x9
42#define TPS65910_ALARM_HOURS 0xA
43#define TPS65910_ALARM_DAYS 0xB
44#define TPS65910_ALARM_MONTHS 0xC
45#define TPS65910_ALARM_YEARS 0xD
46#define TPS65910_RTC_CTRL 0x10
47#define TPS65910_RTC_STATUS 0x11
48#define TPS65910_RTC_INTERRUPTS 0x12
49#define TPS65910_RTC_COMP_LSB 0x13
50#define TPS65910_RTC_COMP_MSB 0x14
51#define TPS65910_RTC_RES_PROG 0x15
52#define TPS65910_RTC_RESET_STATUS 0x16
53#define TPS65910_BCK1 0x17
54#define TPS65910_BCK2 0x18
55#define TPS65910_BCK3 0x19
56#define TPS65910_BCK4 0x1A
57#define TPS65910_BCK5 0x1B
58#define TPS65910_PUADEN 0x1C
59#define TPS65910_REF 0x1D
60#define TPS65910_VRTC 0x1E
61#define TPS65910_VIO 0x20
62#define TPS65910_VDD1 0x21
63#define TPS65910_VDD1_OP 0x22
64#define TPS65910_VDD1_SR 0x23
65#define TPS65910_VDD2 0x24
66#define TPS65910_VDD2_OP 0x25
67#define TPS65910_VDD2_SR 0x26
68#define TPS65910_VDD3 0x27
69#define TPS65910_VDIG1 0x30
70#define TPS65910_VDIG2 0x31
71#define TPS65910_VAUX1 0x32
72#define TPS65910_VAUX2 0x33
73#define TPS65910_VAUX33 0x34
74#define TPS65910_VMMC 0x35
75#define TPS65910_VPLL 0x36
76#define TPS65910_VDAC 0x37
77#define TPS65910_THERM 0x38
78#define TPS65910_BBCH 0x39
79#define TPS65910_DCDCCTRL 0x3E
80#define TPS65910_DEVCTRL 0x3F
81#define TPS65910_DEVCTRL2 0x40
82#define TPS65910_SLEEP_KEEP_LDO_ON 0x41
83#define TPS65910_SLEEP_KEEP_RES_ON 0x42
84#define TPS65910_SLEEP_SET_LDO_OFF 0x43
85#define TPS65910_SLEEP_SET_RES_OFF 0x44
86#define TPS65910_EN1_LDO_ASS 0x45
87#define TPS65910_EN1_SMPS_ASS 0x46
88#define TPS65910_EN2_LDO_ASS 0x47
89#define TPS65910_EN2_SMPS_ASS 0x48
90#define TPS65910_EN3_LDO_ASS 0x49
91#define TPS65910_SPARE 0x4A
92#define TPS65910_INT_STS 0x50
93#define TPS65910_INT_MSK 0x51
94#define TPS65910_INT_STS2 0x52
95#define TPS65910_INT_MSK2 0x53
96#define TPS65910_INT_STS3 0x54
97#define TPS65910_INT_MSK3 0x55
98#define TPS65910_GPIO0 0x60
99#define TPS65910_GPIO1 0x61
100#define TPS65910_GPIO2 0x62
101#define TPS65910_GPIO3 0x63
102#define TPS65910_GPIO4 0x64
103#define TPS65910_GPIO5 0x65
104#define TPS65910_GPIO6 0x66
105#define TPS65910_GPIO7 0x67
106#define TPS65910_GPIO8 0x68
107#define TPS65910_JTAGVERNUM 0x80
108#define TPS65910_MAX_REGISTER 0x80
109
110/*
111 * List of registers specific to TPS65911
112 */
113#define TPS65911_VDDCTRL 0x27
114#define TPS65911_VDDCTRL_OP 0x28
115#define TPS65911_VDDCTRL_SR 0x29
116#define TPS65911_LDO1 0x30
117#define TPS65911_LDO2 0x31
118#define TPS65911_LDO5 0x32
119#define TPS65911_LDO8 0x33
120#define TPS65911_LDO7 0x34
121#define TPS65911_LDO6 0x35
122#define TPS65911_LDO4 0x36
123#define TPS65911_LDO3 0x37
124#define TPS65911_VMBCH 0x6A
125#define TPS65911_VMBCH2 0x6B
126
127/*
128 * List of register bitfields for component TPS65910
129 *
130 */
131
132
133/*Register BCK1 (0x80) register.RegisterDescription */
134#define BCK1_BCKUP_MASK 0xFF
135#define BCK1_BCKUP_SHIFT 0
136
137
138/*Register BCK2 (0x80) register.RegisterDescription */
139#define BCK2_BCKUP_MASK 0xFF
140#define BCK2_BCKUP_SHIFT 0
141
142
143/*Register BCK3 (0x80) register.RegisterDescription */
144#define BCK3_BCKUP_MASK 0xFF
145#define BCK3_BCKUP_SHIFT 0
146
147
148/*Register BCK4 (0x80) register.RegisterDescription */
149#define BCK4_BCKUP_MASK 0xFF
150#define BCK4_BCKUP_SHIFT 0
151
152
153/*Register BCK5 (0x80) register.RegisterDescription */
154#define BCK5_BCKUP_MASK 0xFF
155#define BCK5_BCKUP_SHIFT 0
156
157
158/*Register PUADEN (0x80) register.RegisterDescription */
159#define PUADEN_EN3P_MASK 0x80
160#define PUADEN_EN3P_SHIFT 7
161#define PUADEN_I2CCTLP_MASK 0x40
162#define PUADEN_I2CCTLP_SHIFT 6
163#define PUADEN_I2CSRP_MASK 0x20
164#define PUADEN_I2CSRP_SHIFT 5
165#define PUADEN_PWRONP_MASK 0x10
166#define PUADEN_PWRONP_SHIFT 4
167#define PUADEN_SLEEPP_MASK 0x08
168#define PUADEN_SLEEPP_SHIFT 3
169#define PUADEN_PWRHOLDP_MASK 0x04
170#define PUADEN_PWRHOLDP_SHIFT 2
171#define PUADEN_BOOT1P_MASK 0x02
172#define PUADEN_BOOT1P_SHIFT 1
173#define PUADEN_BOOT0P_MASK 0x01
174#define PUADEN_BOOT0P_SHIFT 0
175
176
177/*Register REF (0x80) register.RegisterDescription */
178#define REF_VMBCH_SEL_MASK 0x0C
179#define REF_VMBCH_SEL_SHIFT 2
180#define REF_ST_MASK 0x03
181#define REF_ST_SHIFT 0
182
183
184/*Register VRTC (0x80) register.RegisterDescription */
185#define VRTC_VRTC_OFFMASK_MASK 0x08
186#define VRTC_VRTC_OFFMASK_SHIFT 3
187#define VRTC_ST_MASK 0x03
188#define VRTC_ST_SHIFT 0
189
190
191/*Register VIO (0x80) register.RegisterDescription */
192#define VIO_ILMAX_MASK 0xC0
193#define VIO_ILMAX_SHIFT 6
194#define VIO_SEL_MASK 0x0C
195#define VIO_SEL_SHIFT 2
196#define VIO_ST_MASK 0x03
197#define VIO_ST_SHIFT 0
198
199
200/*Register VDD1 (0x80) register.RegisterDescription */
201#define VDD1_VGAIN_SEL_MASK 0xC0
202#define VDD1_VGAIN_SEL_SHIFT 6
203#define VDD1_ILMAX_MASK 0x20
204#define VDD1_ILMAX_SHIFT 5
205#define VDD1_TSTEP_MASK 0x1C
206#define VDD1_TSTEP_SHIFT 2
207#define VDD1_ST_MASK 0x03
208#define VDD1_ST_SHIFT 0
209
210
211/*Register VDD1_OP (0x80) register.RegisterDescription */
212#define VDD1_OP_CMD_MASK 0x80
213#define VDD1_OP_CMD_SHIFT 7
214#define VDD1_OP_SEL_MASK 0x7F
215#define VDD1_OP_SEL_SHIFT 0
216
217
218/*Register VDD1_SR (0x80) register.RegisterDescription */
219#define VDD1_SR_SEL_MASK 0x7F
220#define VDD1_SR_SEL_SHIFT 0
221
222
223/*Register VDD2 (0x80) register.RegisterDescription */
224#define VDD2_VGAIN_SEL_MASK 0xC0
225#define VDD2_VGAIN_SEL_SHIFT 6
226#define VDD2_ILMAX_MASK 0x20
227#define VDD2_ILMAX_SHIFT 5
228#define VDD2_TSTEP_MASK 0x1C
229#define VDD2_TSTEP_SHIFT 2
230#define VDD2_ST_MASK 0x03
231#define VDD2_ST_SHIFT 0
232
233
234/*Register VDD2_OP (0x80) register.RegisterDescription */
235#define VDD2_OP_CMD_MASK 0x80
236#define VDD2_OP_CMD_SHIFT 7
237#define VDD2_OP_SEL_MASK 0x7F
238#define VDD2_OP_SEL_SHIFT 0
239
240/*Register VDD2_SR (0x80) register.RegisterDescription */
241#define VDD2_SR_SEL_MASK 0x7F
242#define VDD2_SR_SEL_SHIFT 0
243
244
245/*Registers VDD1, VDD2 voltage values definitions */
246#define VDD1_2_NUM_VOLTS 73
247#define VDD1_2_MIN_VOLT 6000
248#define VDD1_2_OFFSET 125
249
250
251/*Register VDD3 (0x80) register.RegisterDescription */
252#define VDD3_CKINEN_MASK 0x04
253#define VDD3_CKINEN_SHIFT 2
254#define VDD3_ST_MASK 0x03
255#define VDD3_ST_SHIFT 0
256#define VDDCTRL_MIN_VOLT 6000
257#define VDDCTRL_OFFSET 125
258
259/*Registers VDIG (0x80) to VDAC register.RegisterDescription */
260#define LDO_SEL_MASK 0x0C
261#define LDO_SEL_SHIFT 2
262#define LDO_ST_MASK 0x03
263#define LDO_ST_SHIFT 0
264#define LDO_ST_ON_BIT 0x01
265#define LDO_ST_MODE_BIT 0x02
266
267
268/* Registers LDO1 to LDO8 in tps65910 */
269#define LDO1_SEL_MASK 0xFC
270#define LDO3_SEL_MASK 0x7C
271#define LDO_MIN_VOLT 1000
272#define LDO_MAX_VOLT 3300;
273
274
275/*Register VDIG1 (0x80) register.RegisterDescription */
276#define VDIG1_SEL_MASK 0x0C
277#define VDIG1_SEL_SHIFT 2
278#define VDIG1_ST_MASK 0x03
279#define VDIG1_ST_SHIFT 0
280
281
282/*Register VDIG2 (0x80) register.RegisterDescription */
283#define VDIG2_SEL_MASK 0x0C
284#define VDIG2_SEL_SHIFT 2
285#define VDIG2_ST_MASK 0x03
286#define VDIG2_ST_SHIFT 0
287
288
289/*Register VAUX1 (0x80) register.RegisterDescription */
290#define VAUX1_SEL_MASK 0x0C
291#define VAUX1_SEL_SHIFT 2
292#define VAUX1_ST_MASK 0x03
293#define VAUX1_ST_SHIFT 0
294
295
296/*Register VAUX2 (0x80) register.RegisterDescription */
297#define VAUX2_SEL_MASK 0x0C
298#define VAUX2_SEL_SHIFT 2
299#define VAUX2_ST_MASK 0x03
300#define VAUX2_ST_SHIFT 0
301
302
303/*Register VAUX33 (0x80) register.RegisterDescription */
304#define VAUX33_SEL_MASK 0x0C
305#define VAUX33_SEL_SHIFT 2
306#define VAUX33_ST_MASK 0x03
307#define VAUX33_ST_SHIFT 0
308
309
310/*Register VMMC (0x80) register.RegisterDescription */
311#define VMMC_SEL_MASK 0x0C
312#define VMMC_SEL_SHIFT 2
313#define VMMC_ST_MASK 0x03
314#define VMMC_ST_SHIFT 0
315
316
317/*Register VPLL (0x80) register.RegisterDescription */
318#define VPLL_SEL_MASK 0x0C
319#define VPLL_SEL_SHIFT 2
320#define VPLL_ST_MASK 0x03
321#define VPLL_ST_SHIFT 0
322
323
324/*Register VDAC (0x80) register.RegisterDescription */
325#define VDAC_SEL_MASK 0x0C
326#define VDAC_SEL_SHIFT 2
327#define VDAC_ST_MASK 0x03
328#define VDAC_ST_SHIFT 0
329
330
331/*Register THERM (0x80) register.RegisterDescription */
332#define THERM_THERM_HD_MASK 0x20
333#define THERM_THERM_HD_SHIFT 5
334#define THERM_THERM_TS_MASK 0x10
335#define THERM_THERM_TS_SHIFT 4
336#define THERM_THERM_HDSEL_MASK 0x0C
337#define THERM_THERM_HDSEL_SHIFT 2
338#define THERM_RSVD1_MASK 0x02
339#define THERM_RSVD1_SHIFT 1
340#define THERM_THERM_STATE_MASK 0x01
341#define THERM_THERM_STATE_SHIFT 0
342
343
344/*Register BBCH (0x80) register.RegisterDescription */
345#define BBCH_BBSEL_MASK 0x06
346#define BBCH_BBSEL_SHIFT 1
347#define BBCH_BBCHEN_MASK 0x01
348#define BBCH_BBCHEN_SHIFT 0
349
350
351/*Register DCDCCTRL (0x80) register.RegisterDescription */
352#define DCDCCTRL_VDD2_PSKIP_MASK 0x20
353#define DCDCCTRL_VDD2_PSKIP_SHIFT 5
354#define DCDCCTRL_VDD1_PSKIP_MASK 0x10
355#define DCDCCTRL_VDD1_PSKIP_SHIFT 4
356#define DCDCCTRL_VIO_PSKIP_MASK 0x08
357#define DCDCCTRL_VIO_PSKIP_SHIFT 3
358#define DCDCCTRL_DCDCCKEXT_MASK 0x04
359#define DCDCCTRL_DCDCCKEXT_SHIFT 2
360#define DCDCCTRL_DCDCCKSYNC_MASK 0x03
361#define DCDCCTRL_DCDCCKSYNC_SHIFT 0
362
363
364/*Register DEVCTRL (0x80) register.RegisterDescription */
365#define DEVCTRL_RTC_PWDN_MASK 0x40
366#define DEVCTRL_RTC_PWDN_SHIFT 6
367#define DEVCTRL_CK32K_CTRL_MASK 0x20
368#define DEVCTRL_CK32K_CTRL_SHIFT 5
369#define DEVCTRL_SR_CTL_I2C_SEL_MASK 0x10
370#define DEVCTRL_SR_CTL_I2C_SEL_SHIFT 4
371#define DEVCTRL_DEV_OFF_RST_MASK 0x08
372#define DEVCTRL_DEV_OFF_RST_SHIFT 3
373#define DEVCTRL_DEV_ON_MASK 0x04
374#define DEVCTRL_DEV_ON_SHIFT 2
375#define DEVCTRL_DEV_SLP_MASK 0x02
376#define DEVCTRL_DEV_SLP_SHIFT 1
377#define DEVCTRL_DEV_OFF_MASK 0x01
378#define DEVCTRL_DEV_OFF_SHIFT 0
379
380
381/*Register DEVCTRL2 (0x80) register.RegisterDescription */
382#define DEVCTRL2_TSLOT_LENGTH_MASK 0x30
383#define DEVCTRL2_TSLOT_LENGTH_SHIFT 4
384#define DEVCTRL2_SLEEPSIG_POL_MASK 0x08
385#define DEVCTRL2_SLEEPSIG_POL_SHIFT 3
386#define DEVCTRL2_PWON_LP_OFF_MASK 0x04
387#define DEVCTRL2_PWON_LP_OFF_SHIFT 2
388#define DEVCTRL2_PWON_LP_RST_MASK 0x02
389#define DEVCTRL2_PWON_LP_RST_SHIFT 1
390#define DEVCTRL2_IT_POL_MASK 0x01
391#define DEVCTRL2_IT_POL_SHIFT 0
392
393
394/*Register SLEEP_KEEP_LDO_ON (0x80) register.RegisterDescription */
395#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_MASK 0x80
396#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_SHIFT 7
397#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_MASK 0x40
398#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_SHIFT 6
399#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_MASK 0x20
400#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_SHIFT 5
401#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_MASK 0x10
402#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_SHIFT 4
403#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_MASK 0x08
404#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_SHIFT 3
405#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_MASK 0x04
406#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_SHIFT 2
407#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_MASK 0x02
408#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_SHIFT 1
409#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_MASK 0x01
410#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_SHIFT 0
411
412
413/*Register SLEEP_KEEP_RES_ON (0x80) register.RegisterDescription */
414#define SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK 0x80
415#define SLEEP_KEEP_RES_ON_THERM_KEEPON_SHIFT 7
416#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK 0x40
417#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_SHIFT 6
418#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_MASK 0x20
419#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_SHIFT 5
420#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK 0x10
421#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_SHIFT 4
422#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_MASK 0x08
423#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_SHIFT 3
424#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_MASK 0x04
425#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_SHIFT 2
426#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_MASK 0x02
427#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_SHIFT 1
428#define SLEEP_KEEP_RES_ON_VIO_KEEPON_MASK 0x01
429#define SLEEP_KEEP_RES_ON_VIO_KEEPON_SHIFT 0
430
431
432/*Register SLEEP_SET_LDO_OFF (0x80) register.RegisterDescription */
433#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_MASK 0x80
434#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_SHIFT 7
435#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_MASK 0x40
436#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_SHIFT 6
437#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_MASK 0x20
438#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_SHIFT 5
439#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_MASK 0x10
440#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_SHIFT 4
441#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_MASK 0x08
442#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_SHIFT 3
443#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_MASK 0x04
444#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_SHIFT 2
445#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_MASK 0x02
446#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_SHIFT 1
447#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_MASK 0x01
448#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_SHIFT 0
449
450
451/*Register SLEEP_SET_RES_OFF (0x80) register.RegisterDescription */
452#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_MASK 0x80
453#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_SHIFT 7
454#define SLEEP_SET_RES_OFF_RSVD_MASK 0x60
455#define SLEEP_SET_RES_OFF_RSVD_SHIFT 5
456#define SLEEP_SET_RES_OFF_SPARE_SETOFF_MASK 0x10
457#define SLEEP_SET_RES_OFF_SPARE_SETOFF_SHIFT 4
458#define SLEEP_SET_RES_OFF_VDD3_SETOFF_MASK 0x08
459#define SLEEP_SET_RES_OFF_VDD3_SETOFF_SHIFT 3
460#define SLEEP_SET_RES_OFF_VDD2_SETOFF_MASK 0x04
461#define SLEEP_SET_RES_OFF_VDD2_SETOFF_SHIFT 2
462#define SLEEP_SET_RES_OFF_VDD1_SETOFF_MASK 0x02
463#define SLEEP_SET_RES_OFF_VDD1_SETOFF_SHIFT 1
464#define SLEEP_SET_RES_OFF_VIO_SETOFF_MASK 0x01
465#define SLEEP_SET_RES_OFF_VIO_SETOFF_SHIFT 0
466
467
468/*Register EN1_LDO_ASS (0x80) register.RegisterDescription */
469#define EN1_LDO_ASS_VDAC_EN1_MASK 0x80
470#define EN1_LDO_ASS_VDAC_EN1_SHIFT 7
471#define EN1_LDO_ASS_VPLL_EN1_MASK 0x40
472#define EN1_LDO_ASS_VPLL_EN1_SHIFT 6
473#define EN1_LDO_ASS_VAUX33_EN1_MASK 0x20
474#define EN1_LDO_ASS_VAUX33_EN1_SHIFT 5
475#define EN1_LDO_ASS_VAUX2_EN1_MASK 0x10
476#define EN1_LDO_ASS_VAUX2_EN1_SHIFT 4
477#define EN1_LDO_ASS_VAUX1_EN1_MASK 0x08
478#define EN1_LDO_ASS_VAUX1_EN1_SHIFT 3
479#define EN1_LDO_ASS_VDIG2_EN1_MASK 0x04
480#define EN1_LDO_ASS_VDIG2_EN1_SHIFT 2
481#define EN1_LDO_ASS_VDIG1_EN1_MASK 0x02
482#define EN1_LDO_ASS_VDIG1_EN1_SHIFT 1
483#define EN1_LDO_ASS_VMMC_EN1_MASK 0x01
484#define EN1_LDO_ASS_VMMC_EN1_SHIFT 0
485
486
487/*Register EN1_SMPS_ASS (0x80) register.RegisterDescription */
488#define EN1_SMPS_ASS_RSVD_MASK 0xE0
489#define EN1_SMPS_ASS_RSVD_SHIFT 5
490#define EN1_SMPS_ASS_SPARE_EN1_MASK 0x10
491#define EN1_SMPS_ASS_SPARE_EN1_SHIFT 4
492#define EN1_SMPS_ASS_VDD3_EN1_MASK 0x08
493#define EN1_SMPS_ASS_VDD3_EN1_SHIFT 3
494#define EN1_SMPS_ASS_VDD2_EN1_MASK 0x04
495#define EN1_SMPS_ASS_VDD2_EN1_SHIFT 2
496#define EN1_SMPS_ASS_VDD1_EN1_MASK 0x02
497#define EN1_SMPS_ASS_VDD1_EN1_SHIFT 1
498#define EN1_SMPS_ASS_VIO_EN1_MASK 0x01
499#define EN1_SMPS_ASS_VIO_EN1_SHIFT 0
500
501
502/*Register EN2_LDO_ASS (0x80) register.RegisterDescription */
503#define EN2_LDO_ASS_VDAC_EN2_MASK 0x80
504#define EN2_LDO_ASS_VDAC_EN2_SHIFT 7
505#define EN2_LDO_ASS_VPLL_EN2_MASK 0x40
506#define EN2_LDO_ASS_VPLL_EN2_SHIFT 6
507#define EN2_LDO_ASS_VAUX33_EN2_MASK 0x20
508#define EN2_LDO_ASS_VAUX33_EN2_SHIFT 5
509#define EN2_LDO_ASS_VAUX2_EN2_MASK 0x10
510#define EN2_LDO_ASS_VAUX2_EN2_SHIFT 4
511#define EN2_LDO_ASS_VAUX1_EN2_MASK 0x08
512#define EN2_LDO_ASS_VAUX1_EN2_SHIFT 3
513#define EN2_LDO_ASS_VDIG2_EN2_MASK 0x04
514#define EN2_LDO_ASS_VDIG2_EN2_SHIFT 2
515#define EN2_LDO_ASS_VDIG1_EN2_MASK 0x02
516#define EN2_LDO_ASS_VDIG1_EN2_SHIFT 1
517#define EN2_LDO_ASS_VMMC_EN2_MASK 0x01
518#define EN2_LDO_ASS_VMMC_EN2_SHIFT 0
519
520
521/*Register EN2_SMPS_ASS (0x80) register.RegisterDescription */
522#define EN2_SMPS_ASS_RSVD_MASK 0xE0
523#define EN2_SMPS_ASS_RSVD_SHIFT 5
524#define EN2_SMPS_ASS_SPARE_EN2_MASK 0x10
525#define EN2_SMPS_ASS_SPARE_EN2_SHIFT 4
526#define EN2_SMPS_ASS_VDD3_EN2_MASK 0x08
527#define EN2_SMPS_ASS_VDD3_EN2_SHIFT 3
528#define EN2_SMPS_ASS_VDD2_EN2_MASK 0x04
529#define EN2_SMPS_ASS_VDD2_EN2_SHIFT 2
530#define EN2_SMPS_ASS_VDD1_EN2_MASK 0x02
531#define EN2_SMPS_ASS_VDD1_EN2_SHIFT 1
532#define EN2_SMPS_ASS_VIO_EN2_MASK 0x01
533#define EN2_SMPS_ASS_VIO_EN2_SHIFT 0
534
535
536/*Register EN3_LDO_ASS (0x80) register.RegisterDescription */
537#define EN3_LDO_ASS_VDAC_EN3_MASK 0x80
538#define EN3_LDO_ASS_VDAC_EN3_SHIFT 7
539#define EN3_LDO_ASS_VPLL_EN3_MASK 0x40
540#define EN3_LDO_ASS_VPLL_EN3_SHIFT 6
541#define EN3_LDO_ASS_VAUX33_EN3_MASK 0x20
542#define EN3_LDO_ASS_VAUX33_EN3_SHIFT 5
543#define EN3_LDO_ASS_VAUX2_EN3_MASK 0x10
544#define EN3_LDO_ASS_VAUX2_EN3_SHIFT 4
545#define EN3_LDO_ASS_VAUX1_EN3_MASK 0x08
546#define EN3_LDO_ASS_VAUX1_EN3_SHIFT 3
547#define EN3_LDO_ASS_VDIG2_EN3_MASK 0x04
548#define EN3_LDO_ASS_VDIG2_EN3_SHIFT 2
549#define EN3_LDO_ASS_VDIG1_EN3_MASK 0x02
550#define EN3_LDO_ASS_VDIG1_EN3_SHIFT 1
551#define EN3_LDO_ASS_VMMC_EN3_MASK 0x01
552#define EN3_LDO_ASS_VMMC_EN3_SHIFT 0
553
554
555/*Register SPARE (0x80) register.RegisterDescription */
556#define SPARE_SPARE_MASK 0xFF
557#define SPARE_SPARE_SHIFT 0
558
559
560/*Register INT_STS (0x80) register.RegisterDescription */
561#define INT_STS_RTC_PERIOD_IT_MASK 0x80
562#define INT_STS_RTC_PERIOD_IT_SHIFT 7
563#define INT_STS_RTC_ALARM_IT_MASK 0x40
564#define INT_STS_RTC_ALARM_IT_SHIFT 6
565#define INT_STS_HOTDIE_IT_MASK 0x20
566#define INT_STS_HOTDIE_IT_SHIFT 5
567#define INT_STS_PWRHOLD_IT_MASK 0x10
568#define INT_STS_PWRHOLD_IT_SHIFT 4
569#define INT_STS_PWRON_LP_IT_MASK 0x08
570#define INT_STS_PWRON_LP_IT_SHIFT 3
571#define INT_STS_PWRON_IT_MASK 0x04
572#define INT_STS_PWRON_IT_SHIFT 2
573#define INT_STS_VMBHI_IT_MASK 0x02
574#define INT_STS_VMBHI_IT_SHIFT 1
575#define INT_STS_VMBDCH_IT_MASK 0x01
576#define INT_STS_VMBDCH_IT_SHIFT 0
577
578
579/*Register INT_MSK (0x80) register.RegisterDescription */
580#define INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80
581#define INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7
582#define INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40
583#define INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6
584#define INT_MSK_HOTDIE_IT_MSK_MASK 0x20
585#define INT_MSK_HOTDIE_IT_MSK_SHIFT 5
586#define INT_MSK_PWRHOLD_IT_MSK_MASK 0x10
587#define INT_MSK_PWRHOLD_IT_MSK_SHIFT 4
588#define INT_MSK_PWRON_LP_IT_MSK_MASK 0x08
589#define INT_MSK_PWRON_LP_IT_MSK_SHIFT 3
590#define INT_MSK_PWRON_IT_MSK_MASK 0x04
591#define INT_MSK_PWRON_IT_MSK_SHIFT 2
592#define INT_MSK_VMBHI_IT_MSK_MASK 0x02
593#define INT_MSK_VMBHI_IT_MSK_SHIFT 1
594#define INT_MSK_VMBDCH_IT_MSK_MASK 0x01
595#define INT_MSK_VMBDCH_IT_MSK_SHIFT 0
596
597
598/*Register INT_STS2 (0x80) register.RegisterDescription */
599#define INT_STS2_GPIO3_F_IT_MASK 0x80
600#define INT_STS2_GPIO3_F_IT_SHIFT 7
601#define INT_STS2_GPIO3_R_IT_MASK 0x40
602#define INT_STS2_GPIO3_R_IT_SHIFT 6
603#define INT_STS2_GPIO2_F_IT_MASK 0x20
604#define INT_STS2_GPIO2_F_IT_SHIFT 5
605#define INT_STS2_GPIO2_R_IT_MASK 0x10
606#define INT_STS2_GPIO2_R_IT_SHIFT 4
607#define INT_STS2_GPIO1_F_IT_MASK 0x08
608#define INT_STS2_GPIO1_F_IT_SHIFT 3
609#define INT_STS2_GPIO1_R_IT_MASK 0x04
610#define INT_STS2_GPIO1_R_IT_SHIFT 2
611#define INT_STS2_GPIO0_F_IT_MASK 0x02
612#define INT_STS2_GPIO0_F_IT_SHIFT 1
613#define INT_STS2_GPIO0_R_IT_MASK 0x01
614#define INT_STS2_GPIO0_R_IT_SHIFT 0
615
616
617/*Register INT_MSK2 (0x80) register.RegisterDescription */
618#define INT_MSK2_GPIO3_F_IT_MSK_MASK 0x80
619#define INT_MSK2_GPIO3_F_IT_MSK_SHIFT 7
620#define INT_MSK2_GPIO3_R_IT_MSK_MASK 0x40
621#define INT_MSK2_GPIO3_R_IT_MSK_SHIFT 6
622#define INT_MSK2_GPIO2_F_IT_MSK_MASK 0x20
623#define INT_MSK2_GPIO2_F_IT_MSK_SHIFT 5
624#define INT_MSK2_GPIO2_R_IT_MSK_MASK 0x10
625#define INT_MSK2_GPIO2_R_IT_MSK_SHIFT 4
626#define INT_MSK2_GPIO1_F_IT_MSK_MASK 0x08
627#define INT_MSK2_GPIO1_F_IT_MSK_SHIFT 3
628#define INT_MSK2_GPIO1_R_IT_MSK_MASK 0x04
629#define INT_MSK2_GPIO1_R_IT_MSK_SHIFT 2
630#define INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02
631#define INT_MSK2_GPIO0_F_IT_MSK_SHIFT 1
632#define INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01
633#define INT_MSK2_GPIO0_R_IT_MSK_SHIFT 0
634
635
636/*Register INT_STS3 (0x80) register.RegisterDescription */
637#define INT_STS3_GPIO5_F_IT_MASK 0x08
638#define INT_STS3_GPIO5_F_IT_SHIFT 3
639#define INT_STS3_GPIO5_R_IT_MASK 0x04
640#define INT_STS3_GPIO5_R_IT_SHIFT 2
641#define INT_STS3_GPIO4_F_IT_MASK 0x02
642#define INT_STS3_GPIO4_F_IT_SHIFT 1
643#define INT_STS3_GPIO4_R_IT_MASK 0x01
644#define INT_STS3_GPIO4_R_IT_SHIFT 0
645
646
647/*Register INT_MSK3 (0x80) register.RegisterDescription */
648#define INT_MSK3_GPIO5_F_IT_MSK_MASK 0x08
649#define INT_MSK3_GPIO5_F_IT_MSK_SHIFT 3
650#define INT_MSK3_GPIO5_R_IT_MSK_MASK 0x04
651#define INT_MSK3_GPIO5_R_IT_MSK_SHIFT 2
652#define INT_MSK3_GPIO4_F_IT_MSK_MASK 0x02
653#define INT_MSK3_GPIO4_F_IT_MSK_SHIFT 1
654#define INT_MSK3_GPIO4_R_IT_MSK_MASK 0x01
655#define INT_MSK3_GPIO4_R_IT_MSK_SHIFT 0
656
657
658/*Register GPIO (0x80) register.RegisterDescription */
659#define GPIO_DEB_MASK 0x10
660#define GPIO_DEB_SHIFT 4
661#define GPIO_PUEN_MASK 0x08
662#define GPIO_PUEN_SHIFT 3
663#define GPIO_CFG_MASK 0x04
664#define GPIO_CFG_SHIFT 2
665#define GPIO_STS_MASK 0x02
666#define GPIO_STS_SHIFT 1
667#define GPIO_SET_MASK 0x01
668#define GPIO_SET_SHIFT 0
669
670
671/*Register JTAGVERNUM (0x80) register.RegisterDescription */
672#define JTAGVERNUM_VERNUM_MASK 0x0F
673#define JTAGVERNUM_VERNUM_SHIFT 0
674
675
676/* Register VDDCTRL (0x27) bit definitions */
677#define VDDCTRL_ST_MASK 0x03
678#define VDDCTRL_ST_SHIFT 0
679
680
681/*Register VDDCTRL_OP (0x28) bit definitios */
682#define VDDCTRL_OP_CMD_MASK 0x80
683#define VDDCTRL_OP_CMD_SHIFT 7
684#define VDDCTRL_OP_SEL_MASK 0x7F
685#define VDDCTRL_OP_SEL_SHIFT 0
686
687
688/*Register VDDCTRL_SR (0x29) bit definitions */
689#define VDDCTRL_SR_SEL_MASK 0x7F
690#define VDDCTRL_SR_SEL_SHIFT 0
691
692
693/* IRQ Definitions */
694#define TPS65910_IRQ_VBAT_VMBDCH 0
695#define TPS65910_IRQ_VBAT_VMHI 1
696#define TPS65910_IRQ_PWRON 2
697#define TPS65910_IRQ_PWRON_LP 3
698#define TPS65910_IRQ_PWRHOLD 4
699#define TPS65910_IRQ_HOTDIE 5
700#define TPS65910_IRQ_RTC_ALARM 6
701#define TPS65910_IRQ_RTC_PERIOD 7
702#define TPS65910_IRQ_GPIO_R 8
703#define TPS65910_IRQ_GPIO_F 9
704#define TPS65910_NUM_IRQ 10
705
706#define TPS65911_IRQ_VBAT_VMBDCH 0
707#define TPS65911_IRQ_VBAT_VMBDCH2L 1
708#define TPS65911_IRQ_VBAT_VMBDCH2H 2
709#define TPS65911_IRQ_VBAT_VMHI 3
710#define TPS65911_IRQ_PWRON 4
711#define TPS65911_IRQ_PWRON_LP 5
712#define TPS65911_IRQ_PWRHOLD_F 6
713#define TPS65911_IRQ_PWRHOLD_R 7
714#define TPS65911_IRQ_HOTDIE 8
715#define TPS65911_IRQ_RTC_ALARM 9
716#define TPS65911_IRQ_RTC_PERIOD 10
717#define TPS65911_IRQ_GPIO0_R 11
718#define TPS65911_IRQ_GPIO0_F 12
719#define TPS65911_IRQ_GPIO1_R 13
720#define TPS65911_IRQ_GPIO1_F 14
721#define TPS65911_IRQ_GPIO2_R 15
722#define TPS65911_IRQ_GPIO2_F 16
723#define TPS65911_IRQ_GPIO3_R 17
724#define TPS65911_IRQ_GPIO3_F 18
725#define TPS65911_IRQ_GPIO4_R 19
726#define TPS65911_IRQ_GPIO4_F 20
727#define TPS65911_IRQ_GPIO5_R 21
728#define TPS65911_IRQ_GPIO5_F 22
729#define TPS65911_IRQ_WTCHDG 23
730#define TPS65911_IRQ_PWRDN 24
731
732#define TPS65911_NUM_IRQ 25
733
734
735/* GPIO Register Definitions */
736#define TPS65910_GPIO_DEB BIT(2)
737#define TPS65910_GPIO_PUEN BIT(3)
738#define TPS65910_GPIO_CFG BIT(2)
739#define TPS65910_GPIO_STS BIT(1)
740#define TPS65910_GPIO_SET BIT(0)
741
742/**
743 * struct tps65910_board
744 * Board platform data may be used to initialize regulators.
745 */
746
747struct tps65910_board {
748 int gpio_base;
749 int irq;
750 int irq_base;
751 int vmbch_threshold;
752 int vmbch2_threshold;
753 struct regulator_init_data *tps65910_pmic_init_data;
754};
755
756/**
757 * struct tps65910 - tps65910 sub-driver chip access routines
758 */
759
760struct tps65910 {
761 struct device *dev;
762 struct i2c_client *i2c_client;
763 struct mutex io_mutex;
764 unsigned int id;
765 int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
766 int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
767
768 /* Client devices */
769 struct tps65910_pmic *pmic;
770 struct tps65910_rtc *rtc;
771 struct tps65910_power *power;
772
773 /* GPIO Handling */
774 struct gpio_chip gpio;
775
776 /* IRQ Handling */
777 struct mutex irq_lock;
778 int chip_irq;
779 int irq_base;
780 int irq_num;
781 u32 irq_mask;
782};
783
784struct tps65910_platform_data {
785 int irq;
786 int irq_base;
787};
788
789int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
790int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
791void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
792int tps65910_irq_init(struct tps65910 *tps65910, int irq,
793 struct tps65910_platform_data *pdata);
794
795static inline int tps65910_chip_id(struct tps65910 *tps65910)
796{
797 return tps65910->id;
798}
799
800#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/twl4030-codec.h b/include/linux/mfd/twl4030-codec.h
index 2ec317c68e59..5cc16bbd1da1 100644
--- a/include/linux/mfd/twl4030-codec.h
+++ b/include/linux/mfd/twl4030-codec.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * MFD driver for twl4030 codec submodule 2 * MFD driver for twl4030 codec submodule
3 * 3 *
4 * Author: Peter Ujfalusi <peter.ujfalusi@nokia.com> 4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 * 5 *
6 * Copyright: (C) 2009 Nokia Corporation 6 * Copyright: (C) 2009 Nokia Corporation
7 * 7 *
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 903280d21866..0d515ee1c247 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -301,30 +301,4 @@ int wm831x_device_suspend(struct wm831x *wm831x);
301int wm831x_irq_init(struct wm831x *wm831x, int irq); 301int wm831x_irq_init(struct wm831x *wm831x, int irq);
302void wm831x_irq_exit(struct wm831x *wm831x); 302void wm831x_irq_exit(struct wm831x *wm831x);
303 303
304static inline int __must_check wm831x_request_irq(struct wm831x *wm831x,
305 unsigned int irq,
306 irq_handler_t handler,
307 unsigned long flags,
308 const char *name,
309 void *dev)
310{
311 return request_threaded_irq(irq, NULL, handler, flags, name, dev);
312}
313
314static inline void wm831x_free_irq(struct wm831x *wm831x,
315 unsigned int irq, void *dev)
316{
317 free_irq(irq, dev);
318}
319
320static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq)
321{
322 disable_irq(irq);
323}
324
325static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq)
326{
327 enable_irq(irq);
328}
329
330#endif 304#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 632d1567a1b6..ff42d700293f 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -105,6 +105,9 @@ struct wm831x_watchdog_pdata {
105#define WM831X_MAX_LDO 11 105#define WM831X_MAX_LDO 11
106#define WM831X_MAX_ISINK 2 106#define WM831X_MAX_ISINK 2
107 107
108#define WM831X_GPIO_CONFIGURE 0x10000
109#define WM831X_GPIO_NUM 16
110
108struct wm831x_pdata { 111struct wm831x_pdata {
109 /** Used to distinguish multiple WM831x chips */ 112 /** Used to distinguish multiple WM831x chips */
110 int wm831x_num; 113 int wm831x_num;
@@ -119,6 +122,7 @@ struct wm831x_pdata {
119 122
120 int irq_base; 123 int irq_base;
121 int gpio_base; 124 int gpio_base;
125 int gpio_defaults[WM831X_GPIO_NUM];
122 struct wm831x_backlight_pdata *backlight; 126 struct wm831x_backlight_pdata *backlight;
123 struct wm831x_backup_pdata *backup; 127 struct wm831x_backup_pdata *backup;
124 struct wm831x_battery_pdata *battery; 128 struct wm831x_battery_pdata *battery;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6507dde38b16..9670f71d7be9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -153,6 +153,7 @@ extern pgprot_t protection_map[16];
153#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 153#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
154#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 154#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
155#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ 155#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
156#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
156 157
157/* 158/*
158 * This interface is used by x86 PAT code to identify a pfn mapping that is 159 * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -164,12 +165,12 @@ extern pgprot_t protection_map[16];
164 */ 165 */
165static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) 166static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
166{ 167{
167 return (vma->vm_flags & VM_PFN_AT_MMAP); 168 return !!(vma->vm_flags & VM_PFN_AT_MMAP);
168} 169}
169 170
170static inline int is_pfn_mapping(struct vm_area_struct *vma) 171static inline int is_pfn_mapping(struct vm_area_struct *vma)
171{ 172{
172 return (vma->vm_flags & VM_PFNMAP); 173 return !!(vma->vm_flags & VM_PFNMAP);
173} 174}
174 175
175/* 176/*
@@ -604,10 +605,6 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
604#define NODE_NOT_IN_PAGE_FLAGS 605#define NODE_NOT_IN_PAGE_FLAGS
605#endif 606#endif
606 607
607#ifndef PFN_SECTION_SHIFT
608#define PFN_SECTION_SHIFT 0
609#endif
610
611/* 608/*
612 * Define the bit shifts to access each section. For non-existent 609 * Define the bit shifts to access each section. For non-existent
613 * sections we define the shift as 0; that plus a 0 mask ensures 610 * sections we define the shift as 0; that plus a 0 mask ensures
@@ -681,6 +678,12 @@ static inline struct zone *page_zone(struct page *page)
681} 678}
682 679
683#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 680#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
681static inline void set_page_section(struct page *page, unsigned long section)
682{
683 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
684 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
685}
686
684static inline unsigned long page_to_section(struct page *page) 687static inline unsigned long page_to_section(struct page *page)
685{ 688{
686 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 689 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -699,18 +702,14 @@ static inline void set_page_node(struct page *page, unsigned long node)
699 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 702 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
700} 703}
701 704
702static inline void set_page_section(struct page *page, unsigned long section)
703{
704 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
705 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
706}
707
708static inline void set_page_links(struct page *page, enum zone_type zone, 705static inline void set_page_links(struct page *page, enum zone_type zone,
709 unsigned long node, unsigned long pfn) 706 unsigned long node, unsigned long pfn)
710{ 707{
711 set_page_zone(page, zone); 708 set_page_zone(page, zone);
712 set_page_node(page, node); 709 set_page_node(page, node);
710#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
713 set_page_section(page, pfn_to_section_nr(pfn)); 711 set_page_section(page, pfn_to_section_nr(pfn));
712#endif
714} 713}
715 714
716/* 715/*
@@ -862,26 +861,18 @@ extern void pagefault_out_of_memory(void);
862#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 861#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
863 862
864/* 863/*
865 * Flags passed to show_mem() and __show_free_areas() to suppress output in 864 * Flags passed to show_mem() and show_free_areas() to suppress output in
866 * various contexts. 865 * various contexts.
867 */ 866 */
868#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */ 867#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
869 868
870extern void show_free_areas(void); 869extern void show_free_areas(unsigned int flags);
871extern void __show_free_areas(unsigned int flags); 870extern bool skip_free_areas_node(unsigned int flags, int nid);
872 871
873int shmem_lock(struct file *file, int lock, struct user_struct *user); 872int shmem_lock(struct file *file, int lock, struct user_struct *user);
874struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); 873struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
875int shmem_zero_setup(struct vm_area_struct *); 874int shmem_zero_setup(struct vm_area_struct *);
876 875
877#ifndef CONFIG_MMU
878extern unsigned long shmem_get_unmapped_area(struct file *file,
879 unsigned long addr,
880 unsigned long len,
881 unsigned long pgoff,
882 unsigned long flags);
883#endif
884
885extern int can_do_mlock(void); 876extern int can_do_mlock(void);
886extern int user_shm_lock(size_t, struct user_struct *); 877extern int user_shm_lock(size_t, struct user_struct *);
887extern void user_shm_unlock(size_t, struct user_struct *); 878extern void user_shm_unlock(size_t, struct user_struct *);
@@ -894,8 +885,6 @@ struct zap_details {
894 struct address_space *check_mapping; /* Check page->mapping if set */ 885 struct address_space *check_mapping; /* Check page->mapping if set */
895 pgoff_t first_index; /* Lowest page->index to unmap */ 886 pgoff_t first_index; /* Lowest page->index to unmap */
896 pgoff_t last_index; /* Highest page->index to unmap */ 887 pgoff_t last_index; /* Highest page->index to unmap */
897 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
898 unsigned long truncate_count; /* Compare vm_truncate_count */
899}; 888};
900 889
901struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 890struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -905,7 +894,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
905 unsigned long size); 894 unsigned long size);
906unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 895unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
907 unsigned long size, struct zap_details *); 896 unsigned long size, struct zap_details *);
908unsigned long unmap_vmas(struct mmu_gather **tlb, 897unsigned long unmap_vmas(struct mmu_gather *tlb,
909 struct vm_area_struct *start_vma, unsigned long start_addr, 898 struct vm_area_struct *start_vma, unsigned long start_addr,
910 unsigned long end_addr, unsigned long *nr_accounted, 899 unsigned long end_addr, unsigned long *nr_accounted,
911 struct zap_details *); 900 struct zap_details *);
@@ -1056,65 +1045,35 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1056/* 1045/*
1057 * per-process(per-mm_struct) statistics. 1046 * per-process(per-mm_struct) statistics.
1058 */ 1047 */
1059#if defined(SPLIT_RSS_COUNTING)
1060/*
1061 * The mm counters are not protected by its page_table_lock,
1062 * so must be incremented atomically.
1063 */
1064static inline void set_mm_counter(struct mm_struct *mm, int member, long value) 1048static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1065{ 1049{
1066 atomic_long_set(&mm->rss_stat.count[member], value); 1050 atomic_long_set(&mm->rss_stat.count[member], value);
1067} 1051}
1068 1052
1053#if defined(SPLIT_RSS_COUNTING)
1069unsigned long get_mm_counter(struct mm_struct *mm, int member); 1054unsigned long get_mm_counter(struct mm_struct *mm, int member);
1070 1055#else
1071static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1072{
1073 atomic_long_add(value, &mm->rss_stat.count[member]);
1074}
1075
1076static inline void inc_mm_counter(struct mm_struct *mm, int member)
1077{
1078 atomic_long_inc(&mm->rss_stat.count[member]);
1079}
1080
1081static inline void dec_mm_counter(struct mm_struct *mm, int member)
1082{
1083 atomic_long_dec(&mm->rss_stat.count[member]);
1084}
1085
1086#else /* !USE_SPLIT_PTLOCKS */
1087/*
1088 * The mm counters are protected by its page_table_lock,
1089 * so can be incremented directly.
1090 */
1091static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1092{
1093 mm->rss_stat.count[member] = value;
1094}
1095
1096static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1056static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1097{ 1057{
1098 return mm->rss_stat.count[member]; 1058 return atomic_long_read(&mm->rss_stat.count[member]);
1099} 1059}
1060#endif
1100 1061
1101static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1062static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1102{ 1063{
1103 mm->rss_stat.count[member] += value; 1064 atomic_long_add(value, &mm->rss_stat.count[member]);
1104} 1065}
1105 1066
1106static inline void inc_mm_counter(struct mm_struct *mm, int member) 1067static inline void inc_mm_counter(struct mm_struct *mm, int member)
1107{ 1068{
1108 mm->rss_stat.count[member]++; 1069 atomic_long_inc(&mm->rss_stat.count[member]);
1109} 1070}
1110 1071
1111static inline void dec_mm_counter(struct mm_struct *mm, int member) 1072static inline void dec_mm_counter(struct mm_struct *mm, int member)
1112{ 1073{
1113 mm->rss_stat.count[member]--; 1074 atomic_long_dec(&mm->rss_stat.count[member]);
1114} 1075}
1115 1076
1116#endif /* !USE_SPLIT_PTLOCKS */
1117
1118static inline unsigned long get_mm_rss(struct mm_struct *mm) 1077static inline unsigned long get_mm_rss(struct mm_struct *mm)
1119{ 1078{
1120 return get_mm_counter(mm, MM_FILEPAGES) + 1079 return get_mm_counter(mm, MM_FILEPAGES) +
@@ -1163,13 +1122,24 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
1163#endif 1122#endif
1164 1123
1165/* 1124/*
1125 * This struct is used to pass information from page reclaim to the shrinkers.
1126 * We consolidate the values for easier extention later.
1127 */
1128struct shrink_control {
1129 gfp_t gfp_mask;
1130
1131 /* How many slab objects shrinker() should scan and try to reclaim */
1132 unsigned long nr_to_scan;
1133};
1134
1135/*
1166 * A callback you can register to apply pressure to ageable caches. 1136 * A callback you can register to apply pressure to ageable caches.
1167 * 1137 *
1168 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should 1138 * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
1169 * look through the least-recently-used 'nr_to_scan' entries and 1139 * and a 'gfpmask'. It should look through the least-recently-used
1170 * attempt to free them up. It should return the number of objects 1140 * 'nr_to_scan' entries and attempt to free them up. It should return
1171 * which remain in the cache. If it returns -1, it means it cannot do 1141 * the number of objects which remain in the cache. If it returns -1, it means
1172 * any scanning at this time (eg. there is a risk of deadlock). 1142 * it cannot do any scanning at this time (eg. there is a risk of deadlock).
1173 * 1143 *
1174 * The 'gfpmask' refers to the allocation we are currently trying to 1144 * The 'gfpmask' refers to the allocation we are currently trying to
1175 * fulfil. 1145 * fulfil.
@@ -1178,7 +1148,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
1178 * querying the cache size, so a fastpath for that case is appropriate. 1148 * querying the cache size, so a fastpath for that case is appropriate.
1179 */ 1149 */
1180struct shrinker { 1150struct shrinker {
1181 int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); 1151 int (*shrink)(struct shrinker *, struct shrink_control *sc);
1182 int seeks; /* seeks to recreate an obj */ 1152 int seeks; /* seeks to recreate an obj */
1183 1153
1184 /* These are for internal use */ 1154 /* These are for internal use */
@@ -1380,7 +1350,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
1380extern void memmap_init_zone(unsigned long, int, unsigned long, 1350extern void memmap_init_zone(unsigned long, int, unsigned long,
1381 unsigned long, enum memmap_context); 1351 unsigned long, enum memmap_context);
1382extern void setup_per_zone_wmarks(void); 1352extern void setup_per_zone_wmarks(void);
1383extern void calculate_zone_inactive_ratio(struct zone *zone); 1353extern int __meminit init_per_zone_wmark_min(void);
1384extern void mem_init(void); 1354extern void mem_init(void);
1385extern void __init mmap_init(void); 1355extern void __init mmap_init(void);
1386extern void show_mem(unsigned int flags); 1356extern void show_mem(unsigned int flags);
@@ -1388,6 +1358,8 @@ extern void si_meminfo(struct sysinfo * val);
1388extern void si_meminfo_node(struct sysinfo *val, int nid); 1358extern void si_meminfo_node(struct sysinfo *val, int nid);
1389extern int after_bootmem; 1359extern int after_bootmem;
1390 1360
1361extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1362
1391extern void setup_per_cpu_pageset(void); 1363extern void setup_per_cpu_pageset(void);
1392 1364
1393extern void zone_pcp_update(struct zone *zone); 1365extern void zone_pcp_update(struct zone *zone);
@@ -1436,17 +1408,11 @@ extern void exit_mmap(struct mm_struct *);
1436extern int mm_take_all_locks(struct mm_struct *mm); 1408extern int mm_take_all_locks(struct mm_struct *mm);
1437extern void mm_drop_all_locks(struct mm_struct *mm); 1409extern void mm_drop_all_locks(struct mm_struct *mm);
1438 1410
1439#ifdef CONFIG_PROC_FS
1440/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ 1411/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
1441extern void added_exe_file_vma(struct mm_struct *mm); 1412extern void added_exe_file_vma(struct mm_struct *mm);
1442extern void removed_exe_file_vma(struct mm_struct *mm); 1413extern void removed_exe_file_vma(struct mm_struct *mm);
1443#else 1414extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1444static inline void added_exe_file_vma(struct mm_struct *mm) 1415extern struct file *get_mm_exe_file(struct mm_struct *mm);
1445{}
1446
1447static inline void removed_exe_file_vma(struct mm_struct *mm)
1448{}
1449#endif /* CONFIG_PROC_FS */
1450 1416
1451extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); 1417extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1452extern int install_special_mapping(struct mm_struct *mm, 1418extern int install_special_mapping(struct mm_struct *mm,
@@ -1460,7 +1426,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1460 unsigned long flag, unsigned long pgoff); 1426 unsigned long flag, unsigned long pgoff);
1461extern unsigned long mmap_region(struct file *file, unsigned long addr, 1427extern unsigned long mmap_region(struct file *file, unsigned long addr,
1462 unsigned long len, unsigned long flags, 1428 unsigned long len, unsigned long flags,
1463 unsigned int vm_flags, unsigned long pgoff); 1429 vm_flags_t vm_flags, unsigned long pgoff);
1464 1430
1465static inline unsigned long do_mmap(struct file *file, unsigned long addr, 1431static inline unsigned long do_mmap(struct file *file, unsigned long addr,
1466 unsigned long len, unsigned long prot, 1432 unsigned long len, unsigned long prot,
@@ -1517,15 +1483,17 @@ unsigned long ra_submit(struct file_ra_state *ra,
1517 struct address_space *mapping, 1483 struct address_space *mapping,
1518 struct file *filp); 1484 struct file *filp);
1519 1485
1520/* Do stack extension */ 1486/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1521extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1487extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1488
1489/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
1490extern int expand_downwards(struct vm_area_struct *vma,
1491 unsigned long address);
1522#if VM_GROWSUP 1492#if VM_GROWSUP
1523extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1493extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1524#else 1494#else
1525 #define expand_upwards(vma, address) do { } while (0) 1495 #define expand_upwards(vma, address) do { } while (0)
1526#endif 1496#endif
1527extern int expand_stack_downwards(struct vm_area_struct *vma,
1528 unsigned long address);
1529 1497
1530/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1498/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1531extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 1499extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
@@ -1627,8 +1595,9 @@ int in_gate_area_no_mm(unsigned long addr);
1627 1595
1628int drop_caches_sysctl_handler(struct ctl_table *, int, 1596int drop_caches_sysctl_handler(struct ctl_table *, int,
1629 void __user *, size_t *, loff_t *); 1597 void __user *, size_t *, loff_t *);
1630unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1598unsigned long shrink_slab(struct shrink_control *shrink,
1631 unsigned long lru_pages); 1599 unsigned long nr_pages_scanned,
1600 unsigned long lru_pages);
1632 1601
1633#ifndef CONFIG_MMU 1602#ifndef CONFIG_MMU
1634#define randomize_va_space 0 1603#define randomize_va_space 0
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 02aa5619709b..2a78aae78c69 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -102,6 +102,8 @@ struct page {
102#endif 102#endif
103}; 103};
104 104
105typedef unsigned long __nocast vm_flags_t;
106
105/* 107/*
106 * A region containing a mapping of a non-memory backed file under NOMMU 108 * A region containing a mapping of a non-memory backed file under NOMMU
107 * conditions. These are held in a global tree and are pinned by the VMAs that 109 * conditions. These are held in a global tree and are pinned by the VMAs that
@@ -109,7 +111,7 @@ struct page {
109 */ 111 */
110struct vm_region { 112struct vm_region {
111 struct rb_node vm_rb; /* link in global region tree */ 113 struct rb_node vm_rb; /* link in global region tree */
112 unsigned long vm_flags; /* VMA vm_flags */ 114 vm_flags_t vm_flags; /* VMA vm_flags */
113 unsigned long vm_start; /* start address of region */ 115 unsigned long vm_start; /* start address of region */
114 unsigned long vm_end; /* region initialised to here */ 116 unsigned long vm_end; /* region initialised to here */
115 unsigned long vm_top; /* region allocated to here */ 117 unsigned long vm_top; /* region allocated to here */
@@ -175,7 +177,6 @@ struct vm_area_struct {
175 units, *not* PAGE_CACHE_SIZE */ 177 units, *not* PAGE_CACHE_SIZE */
176 struct file * vm_file; /* File we map to (can be NULL). */ 178 struct file * vm_file; /* File we map to (can be NULL). */
177 void * vm_private_data; /* was vm_pte (shared mem) */ 179 void * vm_private_data; /* was vm_pte (shared mem) */
178 unsigned long vm_truncate_count;/* truncate_count or restart_addr */
179 180
180#ifndef CONFIG_MMU 181#ifndef CONFIG_MMU
181 struct vm_region *vm_region; /* NOMMU mapping region */ 182 struct vm_region *vm_region; /* NOMMU mapping region */
@@ -205,19 +206,16 @@ enum {
205 206
206#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) 207#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
207#define SPLIT_RSS_COUNTING 208#define SPLIT_RSS_COUNTING
208struct mm_rss_stat {
209 atomic_long_t count[NR_MM_COUNTERS];
210};
211/* per-thread cached information, */ 209/* per-thread cached information, */
212struct task_rss_stat { 210struct task_rss_stat {
213 int events; /* for synchronization threshold */ 211 int events; /* for synchronization threshold */
214 int count[NR_MM_COUNTERS]; 212 int count[NR_MM_COUNTERS];
215}; 213};
216#else /* !USE_SPLIT_PTLOCKS */ 214#endif /* USE_SPLIT_PTLOCKS */
215
217struct mm_rss_stat { 216struct mm_rss_stat {
218 unsigned long count[NR_MM_COUNTERS]; 217 atomic_long_t count[NR_MM_COUNTERS];
219}; 218};
220#endif /* !USE_SPLIT_PTLOCKS */
221 219
222struct mm_struct { 220struct mm_struct {
223 struct vm_area_struct * mmap; /* list of VMAs */ 221 struct vm_area_struct * mmap; /* list of VMAs */
@@ -266,8 +264,6 @@ struct mm_struct {
266 264
267 struct linux_binfmt *binfmt; 265 struct linux_binfmt *binfmt;
268 266
269 cpumask_t cpu_vm_mask;
270
271 /* Architecture-specific MM context */ 267 /* Architecture-specific MM context */
272 mm_context_t context; 268 mm_context_t context;
273 269
@@ -306,20 +302,23 @@ struct mm_struct {
306 struct task_struct __rcu *owner; 302 struct task_struct __rcu *owner;
307#endif 303#endif
308 304
309#ifdef CONFIG_PROC_FS
310 /* store ref to file /proc/<pid>/exe symlink points to */ 305 /* store ref to file /proc/<pid>/exe symlink points to */
311 struct file *exe_file; 306 struct file *exe_file;
312 unsigned long num_exe_file_vmas; 307 unsigned long num_exe_file_vmas;
313#endif
314#ifdef CONFIG_MMU_NOTIFIER 308#ifdef CONFIG_MMU_NOTIFIER
315 struct mmu_notifier_mm *mmu_notifier_mm; 309 struct mmu_notifier_mm *mmu_notifier_mm;
316#endif 310#endif
317#ifdef CONFIG_TRANSPARENT_HUGEPAGE 311#ifdef CONFIG_TRANSPARENT_HUGEPAGE
318 pgtable_t pmd_huge_pte; /* protected by page_table_lock */ 312 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
319#endif 313#endif
314
315 cpumask_var_t cpu_vm_mask_var;
320}; 316};
321 317
322/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ 318/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
323#define mm_cpumask(mm) (&(mm)->cpu_vm_mask) 319static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
320{
321 return mm->cpu_vm_mask_var;
322}
324 323
325#endif /* _LINUX_MM_TYPES_H */ 324#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmc/Kbuild b/include/linux/mmc/Kbuild
new file mode 100644
index 000000000000..1fb26448faa9
--- /dev/null
+++ b/include/linux/mmc/Kbuild
@@ -0,0 +1 @@
header-y += ioctl.h
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index adb4888248be..c6927a4d157f 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -11,6 +11,7 @@
11#define LINUX_MMC_CARD_H 11#define LINUX_MMC_CARD_H
12 12
13#include <linux/mmc/core.h> 13#include <linux/mmc/core.h>
14#include <linux/mod_devicetable.h>
14 15
15struct mmc_cid { 16struct mmc_cid {
16 unsigned int manfid; 17 unsigned int manfid;
@@ -29,6 +30,7 @@ struct mmc_csd {
29 unsigned short cmdclass; 30 unsigned short cmdclass;
30 unsigned short tacc_clks; 31 unsigned short tacc_clks;
31 unsigned int tacc_ns; 32 unsigned int tacc_ns;
33 unsigned int c_size;
32 unsigned int r2w_factor; 34 unsigned int r2w_factor;
33 unsigned int max_dtr; 35 unsigned int max_dtr;
34 unsigned int erase_size; /* In sectors */ 36 unsigned int erase_size; /* In sectors */
@@ -45,6 +47,10 @@ struct mmc_ext_csd {
45 u8 rev; 47 u8 rev;
46 u8 erase_group_def; 48 u8 erase_group_def;
47 u8 sec_feature_support; 49 u8 sec_feature_support;
50 u8 rel_sectors;
51 u8 rel_param;
52 u8 part_config;
53 unsigned int part_time; /* Units: ms */
48 unsigned int sa_timeout; /* Units: 100ns */ 54 unsigned int sa_timeout; /* Units: 100ns */
49 unsigned int hs_max_dtr; 55 unsigned int hs_max_dtr;
50 unsigned int sectors; 56 unsigned int sectors;
@@ -57,13 +63,18 @@ struct mmc_ext_csd {
57 bool enhanced_area_en; /* enable bit */ 63 bool enhanced_area_en; /* enable bit */
58 unsigned long long enhanced_area_offset; /* Units: Byte */ 64 unsigned long long enhanced_area_offset; /* Units: Byte */
59 unsigned int enhanced_area_size; /* Units: KB */ 65 unsigned int enhanced_area_size; /* Units: KB */
66 unsigned int boot_size; /* in bytes */
60}; 67};
61 68
62struct sd_scr { 69struct sd_scr {
63 unsigned char sda_vsn; 70 unsigned char sda_vsn;
71 unsigned char sda_spec3;
64 unsigned char bus_widths; 72 unsigned char bus_widths;
65#define SD_SCR_BUS_WIDTH_1 (1<<0) 73#define SD_SCR_BUS_WIDTH_1 (1<<0)
66#define SD_SCR_BUS_WIDTH_4 (1<<2) 74#define SD_SCR_BUS_WIDTH_4 (1<<2)
75 unsigned char cmds;
76#define SD_SCR_CMD20_SUPPORT (1<<0)
77#define SD_SCR_CMD23_SUPPORT (1<<1)
67}; 78};
68 79
69struct sd_ssr { 80struct sd_ssr {
@@ -74,6 +85,39 @@ struct sd_ssr {
74 85
75struct sd_switch_caps { 86struct sd_switch_caps {
76 unsigned int hs_max_dtr; 87 unsigned int hs_max_dtr;
88 unsigned int uhs_max_dtr;
89#define UHS_SDR104_MAX_DTR 208000000
90#define UHS_SDR50_MAX_DTR 100000000
91#define UHS_DDR50_MAX_DTR 50000000
92#define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR
93#define UHS_SDR12_MAX_DTR 25000000
94 unsigned int sd3_bus_mode;
95#define UHS_SDR12_BUS_SPEED 0
96#define UHS_SDR25_BUS_SPEED 1
97#define UHS_SDR50_BUS_SPEED 2
98#define UHS_SDR104_BUS_SPEED 3
99#define UHS_DDR50_BUS_SPEED 4
100
101#define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED)
102#define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED)
103#define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED)
104#define SD_MODE_UHS_SDR104 (1 << UHS_SDR104_BUS_SPEED)
105#define SD_MODE_UHS_DDR50 (1 << UHS_DDR50_BUS_SPEED)
106 unsigned int sd3_drv_type;
107#define SD_DRIVER_TYPE_B 0x01
108#define SD_DRIVER_TYPE_A 0x02
109#define SD_DRIVER_TYPE_C 0x04
110#define SD_DRIVER_TYPE_D 0x08
111 unsigned int sd3_curr_limit;
112#define SD_SET_CURRENT_LIMIT_200 0
113#define SD_SET_CURRENT_LIMIT_400 1
114#define SD_SET_CURRENT_LIMIT_600 2
115#define SD_SET_CURRENT_LIMIT_800 3
116
117#define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200)
118#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400)
119#define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600)
120#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
77}; 121};
78 122
79struct sdio_cccr { 123struct sdio_cccr {
@@ -118,6 +162,8 @@ struct mmc_card {
118#define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ 162#define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */
119#define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ 163#define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */
120#define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */ 164#define MMC_STATE_HIGHSPEED_DDR (1<<4) /* card is in high speed mode */
165#define MMC_STATE_ULTRAHIGHSPEED (1<<5) /* card is in ultra high speed mode */
166#define MMC_CARD_SDXC (1<<6) /* card is SDXC */
121 unsigned int quirks; /* card quirks */ 167 unsigned int quirks; /* card quirks */
122#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ 168#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
123#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ 169#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -125,6 +171,10 @@ struct mmc_card {
125#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ 171#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */
126 /* (missing CIA registers) */ 172 /* (missing CIA registers) */
127#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */ 173#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */
174#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */
175#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */
176#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
177#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
128 178
129 unsigned int erase_size; /* erase size in sectors */ 179 unsigned int erase_size; /* erase size in sectors */
130 unsigned int erase_shift; /* if erase unit is power 2 */ 180 unsigned int erase_shift; /* if erase unit is power 2 */
@@ -145,14 +195,100 @@ struct mmc_card {
145 struct sdio_cccr cccr; /* common card info */ 195 struct sdio_cccr cccr; /* common card info */
146 struct sdio_cis cis; /* common tuple info */ 196 struct sdio_cis cis; /* common tuple info */
147 struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */ 197 struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
198 struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */
148 unsigned num_info; /* number of info strings */ 199 unsigned num_info; /* number of info strings */
149 const char **info; /* info strings */ 200 const char **info; /* info strings */
150 struct sdio_func_tuple *tuples; /* unknown common tuples */ 201 struct sdio_func_tuple *tuples; /* unknown common tuples */
151 202
203 unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
204
152 struct dentry *debugfs_root; 205 struct dentry *debugfs_root;
153}; 206};
154 207
155void mmc_fixup_device(struct mmc_card *dev); 208/*
209 * The world is not perfect and supplies us with broken mmc/sdio devices.
210 * For at least some of these bugs we need a work-around.
211 */
212
213struct mmc_fixup {
214 /* CID-specific fields. */
215 const char *name;
216
217 /* Valid revision range */
218 u64 rev_start, rev_end;
219
220 unsigned int manfid;
221 unsigned short oemid;
222
223 /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
224 u16 cis_vendor, cis_device;
225
226 void (*vendor_fixup)(struct mmc_card *card, int data);
227 int data;
228};
229
230#define CID_MANFID_ANY (-1u)
231#define CID_OEMID_ANY ((unsigned short) -1)
232#define CID_NAME_ANY (NULL)
233
234#define END_FIXUP { 0 }
235
236#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
237 _cis_vendor, _cis_device, \
238 _fixup, _data) \
239 { \
240 .name = (_name), \
241 .manfid = (_manfid), \
242 .oemid = (_oemid), \
243 .rev_start = (_rev_start), \
244 .rev_end = (_rev_end), \
245 .cis_vendor = (_cis_vendor), \
246 .cis_device = (_cis_device), \
247 .vendor_fixup = (_fixup), \
248 .data = (_data), \
249 }
250
251#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
252 _fixup, _data) \
253 _FIXUP_EXT(_name, _manfid, \
254 _oemid, _rev_start, _rev_end, \
255 SDIO_ANY_ID, SDIO_ANY_ID, \
256 _fixup, _data) \
257
258#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
259 MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data)
260
261#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
262 _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
263 CID_OEMID_ANY, 0, -1ull, \
264 _vendor, _device, \
265 _fixup, _data) \
266
267#define cid_rev(hwrev, fwrev, year, month) \
268 (((u64) hwrev) << 40 | \
269 ((u64) fwrev) << 32 | \
270 ((u64) year) << 16 | \
271 ((u64) month))
272
273#define cid_rev_card(card) \
274 cid_rev(card->cid.hwrev, \
275 card->cid.fwrev, \
276 card->cid.year, \
277 card->cid.month)
278
279/*
280 * Unconditionally quirk add/remove.
281 */
282
283static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
284{
285 card->quirks |= data;
286}
287
288static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
289{
290 card->quirks &= ~data;
291}
156 292
157#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) 293#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
158#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) 294#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
@@ -163,12 +299,50 @@ void mmc_fixup_device(struct mmc_card *dev);
163#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) 299#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
164#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) 300#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
165#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) 301#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR)
302#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
303#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
166 304
167#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 305#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
168#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 306#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
169#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) 307#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
170#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 308#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
171#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) 309#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
310#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
311#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
312
313/*
314 * Quirk add/remove for MMC products.
315 */
316
317static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
318{
319 if (mmc_card_mmc(card))
320 card->quirks |= data;
321}
322
323static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
324 int data)
325{
326 if (mmc_card_mmc(card))
327 card->quirks &= ~data;
328}
329
330/*
331 * Quirk add/remove for SD products.
332 */
333
334static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
335{
336 if (mmc_card_sd(card))
337 card->quirks |= data;
338}
339
340static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
341 int data)
342{
343 if (mmc_card_sd(card))
344 card->quirks &= ~data;
345}
172 346
173static inline int mmc_card_lenient_fn0(const struct mmc_card *c) 347static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
174{ 348{
@@ -180,6 +354,16 @@ static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
180 return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; 354 return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
181} 355}
182 356
357static inline int mmc_card_disable_cd(const struct mmc_card *c)
358{
359 return c->quirks & MMC_QUIRK_DISABLE_CD;
360}
361
362static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
363{
364 return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
365}
366
183#define mmc_card_name(c) ((c)->cid.prod_name) 367#define mmc_card_name(c) ((c)->cid.prod_name)
184#define mmc_card_id(c) (dev_name(&(c)->dev)) 368#define mmc_card_id(c) (dev_name(&(c)->dev))
185 369
@@ -203,4 +387,7 @@ struct mmc_driver {
203extern int mmc_register_driver(struct mmc_driver *); 387extern int mmc_register_driver(struct mmc_driver *);
204extern void mmc_unregister_driver(struct mmc_driver *); 388extern void mmc_unregister_driver(struct mmc_driver *);
205 389
390extern void mmc_fixup_device(struct mmc_card *card,
391 const struct mmc_fixup *table);
392
206#endif 393#endif
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 07f27af4dba5..b6718e549a51 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -92,7 +92,7 @@ struct mmc_command {
92 * actively failing requests 92 * actively failing requests
93 */ 93 */
94 94
95 unsigned int erase_timeout; /* in milliseconds */ 95 unsigned int cmd_timeout_ms; /* in milliseconds */
96 96
97 struct mmc_data *data; /* data segment associated with cmd */ 97 struct mmc_data *data; /* data segment associated with cmd */
98 struct mmc_request *mrq; /* associated request */ 98 struct mmc_request *mrq; /* associated request */
@@ -120,6 +120,7 @@ struct mmc_data {
120}; 120};
121 121
122struct mmc_request { 122struct mmc_request {
123 struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
123 struct mmc_command *cmd; 124 struct mmc_command *cmd;
124 struct mmc_data *data; 125 struct mmc_data *data;
125 struct mmc_command *stop; 126 struct mmc_command *stop;
@@ -133,8 +134,10 @@ struct mmc_card;
133 134
134extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); 135extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
135extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); 136extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
137extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
136extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, 138extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
137 struct mmc_command *, int); 139 struct mmc_command *, int);
140extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
138 141
139#define MMC_ERASE_ARG 0x00000000 142#define MMC_ERASE_ARG 0x00000000
140#define MMC_SECURE_ERASE_ARG 0x80000000 143#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index bcb793ec7374..1ee4424462eb 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -50,12 +50,30 @@ struct mmc_ios {
50#define MMC_TIMING_LEGACY 0 50#define MMC_TIMING_LEGACY 0
51#define MMC_TIMING_MMC_HS 1 51#define MMC_TIMING_MMC_HS 1
52#define MMC_TIMING_SD_HS 2 52#define MMC_TIMING_SD_HS 2
53#define MMC_TIMING_UHS_SDR12 MMC_TIMING_LEGACY
54#define MMC_TIMING_UHS_SDR25 MMC_TIMING_SD_HS
55#define MMC_TIMING_UHS_SDR50 3
56#define MMC_TIMING_UHS_SDR104 4
57#define MMC_TIMING_UHS_DDR50 5
53 58
54 unsigned char ddr; /* dual data rate used */ 59 unsigned char ddr; /* dual data rate used */
55 60
56#define MMC_SDR_MODE 0 61#define MMC_SDR_MODE 0
57#define MMC_1_2V_DDR_MODE 1 62#define MMC_1_2V_DDR_MODE 1
58#define MMC_1_8V_DDR_MODE 2 63#define MMC_1_8V_DDR_MODE 2
64
65 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
66
67#define MMC_SIGNAL_VOLTAGE_330 0
68#define MMC_SIGNAL_VOLTAGE_180 1
69#define MMC_SIGNAL_VOLTAGE_120 2
70
71 unsigned char drv_type; /* driver type (A, B, C, D) */
72
73#define MMC_SET_DRIVER_TYPE_B 0
74#define MMC_SET_DRIVER_TYPE_A 1
75#define MMC_SET_DRIVER_TYPE_C 2
76#define MMC_SET_DRIVER_TYPE_D 3
59}; 77};
60 78
61struct mmc_host_ops { 79struct mmc_host_ops {
@@ -117,6 +135,10 @@ struct mmc_host_ops {
117 135
118 /* optional callback for HC quirks */ 136 /* optional callback for HC quirks */
119 void (*init_card)(struct mmc_host *host, struct mmc_card *card); 137 void (*init_card)(struct mmc_host *host, struct mmc_card *card);
138
139 int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
140 int (*execute_tuning)(struct mmc_host *host);
141 void (*enable_preset_value)(struct mmc_host *host, bool enable);
120}; 142};
121 143
122struct mmc_card; 144struct mmc_card;
@@ -173,6 +195,22 @@ struct mmc_host {
173 /* DDR mode at 1.2V */ 195 /* DDR mode at 1.2V */
174#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */ 196#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
175#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */ 197#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */
198#define MMC_CAP_UHS_SDR12 (1 << 15) /* Host supports UHS SDR12 mode */
199#define MMC_CAP_UHS_SDR25 (1 << 16) /* Host supports UHS SDR25 mode */
200#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
201#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
202#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
203#define MMC_CAP_SET_XPC_330 (1 << 20) /* Host supports >150mA current at 3.3V */
204#define MMC_CAP_SET_XPC_300 (1 << 21) /* Host supports >150mA current at 3.0V */
205#define MMC_CAP_SET_XPC_180 (1 << 22) /* Host supports >150mA current at 1.8V */
206#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
207#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
208#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
209#define MMC_CAP_MAX_CURRENT_200 (1 << 26) /* Host max current limit is 200mA */
210#define MMC_CAP_MAX_CURRENT_400 (1 << 27) /* Host max current limit is 400mA */
211#define MMC_CAP_MAX_CURRENT_600 (1 << 28) /* Host max current limit is 600mA */
212#define MMC_CAP_MAX_CURRENT_800 (1 << 29) /* Host max current limit is 800mA */
213#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
176 214
177 mmc_pm_flag_t pm_caps; /* supported pm features */ 215 mmc_pm_flag_t pm_caps; /* supported pm features */
178 216
@@ -321,10 +359,19 @@ static inline int mmc_card_is_removable(struct mmc_host *host)
321 return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable; 359 return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
322} 360}
323 361
324static inline int mmc_card_is_powered_resumed(struct mmc_host *host) 362static inline int mmc_card_keep_power(struct mmc_host *host)
325{ 363{
326 return host->pm_flags & MMC_PM_KEEP_POWER; 364 return host->pm_flags & MMC_PM_KEEP_POWER;
327} 365}
328 366
367static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
368{
369 return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
370}
371
372static inline int mmc_host_cmd23(struct mmc_host *host)
373{
374 return host->caps & MMC_CAP_CMD23;
375}
329#endif 376#endif
330 377
diff --git a/include/linux/mmc/ioctl.h b/include/linux/mmc/ioctl.h
new file mode 100644
index 000000000000..5baf2983a12f
--- /dev/null
+++ b/include/linux/mmc/ioctl.h
@@ -0,0 +1,54 @@
1#ifndef LINUX_MMC_IOCTL_H
2#define LINUX_MMC_IOCTL_H
3struct mmc_ioc_cmd {
4 /* Implies direction of data. true = write, false = read */
5 int write_flag;
6
7 /* Application-specific command. true = precede with CMD55 */
8 int is_acmd;
9
10 __u32 opcode;
11 __u32 arg;
12 __u32 response[4]; /* CMD response */
13 unsigned int flags;
14 unsigned int blksz;
15 unsigned int blocks;
16
17 /*
18 * Sleep at least postsleep_min_us useconds, and at most
19 * postsleep_max_us useconds *after* issuing command. Needed for
20 * some read commands for which cards have no other way of indicating
21 * they're ready for the next command (i.e. there is no equivalent of
22 * a "busy" indicator for read operations).
23 */
24 unsigned int postsleep_min_us;
25 unsigned int postsleep_max_us;
26
27 /*
28 * Override driver-computed timeouts. Note the difference in units!
29 */
30 unsigned int data_timeout_ns;
31 unsigned int cmd_timeout_ms;
32
33 /*
34 * For 64-bit machines, the next member, ``__u64 data_ptr``, wants to
35 * be 8-byte aligned. Make sure this struct is the same size when
36 * built for 32-bit.
37 */
38 __u32 __pad;
39
40 /* DAT buffer */
41 __u64 data_ptr;
42};
43#define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr
44
45#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
46
47/*
48 * Since this ioctl is only meant to enhance (and not replace) normal access
49 * to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
50 * is enforced per ioctl call. For larger data transfers, use the normal
51 * block device operations.
52 */
53#define MMC_IOC_MAX_BYTES (512L * 256)
54#endif /* LINUX_MMC_IOCTL_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 264ba5451e3b..ac26a685cca8 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -50,6 +50,7 @@
50#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */ 50#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */
51#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */ 51#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */
52#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */ 52#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */
53#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
53 54
54 /* class 3 */ 55 /* class 3 */
55#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ 56#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
@@ -82,6 +83,12 @@
82#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */ 83#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */
83#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ 84#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
84 85
86static inline bool mmc_op_multi(u32 opcode)
87{
88 return opcode == MMC_WRITE_MULTIPLE_BLOCK ||
89 opcode == MMC_READ_MULTIPLE_BLOCK;
90}
91
85/* 92/*
86 * MMC_SWITCH argument format: 93 * MMC_SWITCH argument format:
87 * 94 *
@@ -255,18 +262,23 @@ struct _mmc_csd {
255 262
256#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ 263#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
257#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ 264#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
265#define EXT_CSD_WR_REL_PARAM 166 /* RO */
258#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ 266#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
267#define EXT_CSD_PART_CONFIG 179 /* R/W */
259#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ 268#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
260#define EXT_CSD_BUS_WIDTH 183 /* R/W */ 269#define EXT_CSD_BUS_WIDTH 183 /* R/W */
261#define EXT_CSD_HS_TIMING 185 /* R/W */ 270#define EXT_CSD_HS_TIMING 185 /* R/W */
262#define EXT_CSD_REV 192 /* RO */ 271#define EXT_CSD_REV 192 /* RO */
263#define EXT_CSD_STRUCTURE 194 /* RO */ 272#define EXT_CSD_STRUCTURE 194 /* RO */
264#define EXT_CSD_CARD_TYPE 196 /* RO */ 273#define EXT_CSD_CARD_TYPE 196 /* RO */
274#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
265#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ 275#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */
266#define EXT_CSD_S_A_TIMEOUT 217 /* RO */ 276#define EXT_CSD_S_A_TIMEOUT 217 /* RO */
277#define EXT_CSD_REL_WR_SEC_C 222 /* RO */
267#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */ 278#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */
268#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */ 279#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */
269#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */ 280#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */
281#define EXT_CSD_BOOT_MULT 226 /* RO */
270#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */ 282#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */
271#define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ 283#define EXT_CSD_SEC_ERASE_MULT 230 /* RO */
272#define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ 284#define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */
@@ -276,6 +288,12 @@ struct _mmc_csd {
276 * EXT_CSD field definitions 288 * EXT_CSD field definitions
277 */ 289 */
278 290
291#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
292
293#define EXT_CSD_PART_CONFIG_ACC_MASK (0x7)
294#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1)
295#define EXT_CSD_PART_CONFIG_ACC_BOOT1 (0x2)
296
279#define EXT_CSD_CMD_SET_NORMAL (1<<0) 297#define EXT_CSD_CMD_SET_NORMAL (1<<0)
280#define EXT_CSD_CMD_SET_SECURE (1<<1) 298#define EXT_CSD_CMD_SET_SECURE (1<<1)
281#define EXT_CSD_CMD_SET_CPSECURE (1<<2) 299#define EXT_CSD_CMD_SET_CPSECURE (1<<2)
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 3fd85e088cc3..7d35d52c3df3 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -17,6 +17,7 @@
17/* This is basically the same command as for MMC with some quirks. */ 17/* This is basically the same command as for MMC with some quirks. */
18#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ 18#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */
19#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ 19#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
20#define SD_SWITCH_VOLTAGE 11 /* ac R1 */
20 21
21 /* class 10 */ 22 /* class 10 */
22#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ 23#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
@@ -32,6 +33,12 @@
32#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ 33#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
33#define SD_APP_SEND_SCR 51 /* adtc R1 */ 34#define SD_APP_SEND_SCR 51 /* adtc R1 */
34 35
36/* OCR bit definitions */
37#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
38#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
39#define SD_OCR_XPC (1 << 28) /* SDXC power control */
40#define SD_OCR_CCS (1 << 30) /* Card Capacity Status */
41
35/* 42/*
36 * SD_SWITCH argument format: 43 * SD_SWITCH argument format:
37 * 44 *
@@ -59,7 +66,7 @@
59 66
60#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */ 67#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */
61#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */ 68#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */
62#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00 */ 69#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00-3.0X */
63 70
64/* 71/*
65 * SD bus widths 72 * SD bus widths
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 83bd9f76709a..6a68c4eb4e44 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -85,6 +85,8 @@ struct sdhci_host {
85#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) 85#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
86/* Controller treats ADMA descriptors with length 0000h incorrectly */ 86/* Controller treats ADMA descriptors with length 0000h incorrectly */
87#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30) 87#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
88/* The read-only detection via SDHCI_PRESENT_STATE register is unstable */
89#define SDHCI_QUIRK_UNSTABLE_RO_DETECT (1<<31)
88 90
89 int irq; /* Device IRQ */ 91 int irq; /* Device IRQ */
90 void __iomem *ioaddr; /* Mapped address */ 92 void __iomem *ioaddr; /* Mapped address */
@@ -109,11 +111,16 @@ struct sdhci_host {
109#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ 111#define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */
110#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ 112#define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
111#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ 113#define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
114#define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */
115#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */
116#define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */
117#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */
112 118
113 unsigned int version; /* SDHCI spec. version */ 119 unsigned int version; /* SDHCI spec. version */
114 120
115 unsigned int max_clk; /* Max possible freq (MHz) */ 121 unsigned int max_clk; /* Max possible freq (MHz) */
116 unsigned int timeout_clk; /* Timeout freq (KHz) */ 122 unsigned int timeout_clk; /* Timeout freq (KHz) */
123 unsigned int clk_mul; /* Clock Muliplier value */
117 124
118 unsigned int clock; /* Current clock (MHz) */ 125 unsigned int clock; /* Current clock (MHz) */
119 u8 pwr; /* Current voltage */ 126 u8 pwr; /* Current voltage */
@@ -145,6 +152,14 @@ struct sdhci_host {
145 unsigned int ocr_avail_sd; 152 unsigned int ocr_avail_sd;
146 unsigned int ocr_avail_mmc; 153 unsigned int ocr_avail_mmc;
147 154
155 wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
156 unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
157
158 unsigned int tuning_count; /* Timer count for re-tuning */
159 unsigned int tuning_mode; /* Re-tuning mode supported by host */
160#define SDHCI_TUNING_MODE_1 0
161 struct timer_list tuning_timer; /* Timer for tuning */
162
148 unsigned long private[0] ____cacheline_aligned; 163 unsigned long private[0] ____cacheline_aligned;
149}; 164};
150#endif /* __SDHCI_H */ 165#endif /* __SDHCI_H */
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index c981b959760f..faf32b6ec185 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -3,12 +3,16 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct platform_device;
7struct tmio_mmc_data;
8
6struct sh_mobile_sdhi_info { 9struct sh_mobile_sdhi_info {
7 int dma_slave_tx; 10 int dma_slave_tx;
8 int dma_slave_rx; 11 int dma_slave_rx;
9 unsigned long tmio_flags; 12 unsigned long tmio_flags;
10 unsigned long tmio_caps; 13 unsigned long tmio_caps;
11 u32 tmio_ocr_mask; /* available MMC voltages */ 14 u32 tmio_ocr_mask; /* available MMC voltages */
15 struct tmio_mmc_data *pdata;
12 void (*set_pwr)(struct platform_device *pdev, int state); 16 void (*set_pwr)(struct platform_device *pdev, int state);
13 int (*get_cd)(struct platform_device *pdev); 17 int (*get_cd)(struct platform_device *pdev);
14}; 18};
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index cc2e7dfea9d7..1d1b1e13f79f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -150,7 +150,7 @@ struct mmu_notifier_ops {
150 * Therefore notifier chains can only be traversed when either 150 * Therefore notifier chains can only be traversed when either
151 * 151 *
152 * 1. mmap_sem is held. 152 * 1. mmap_sem is held.
153 * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock). 153 * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex).
154 * 3. No other concurrent thread can access the list (release) 154 * 3. No other concurrent thread can access the list (release)
155 */ 155 */
156struct mmu_notifier { 156struct mmu_notifier {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e56f835274c9..29312bdf119f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -273,11 +273,6 @@ struct zone_reclaim_stat {
273 */ 273 */
274 unsigned long recent_rotated[2]; 274 unsigned long recent_rotated[2];
275 unsigned long recent_scanned[2]; 275 unsigned long recent_scanned[2];
276
277 /*
278 * accumulated for batching
279 */
280 unsigned long nr_saved_scan[NR_LRU_LISTS];
281}; 276};
282 277
283struct zone { 278struct zone {
@@ -928,9 +923,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
928#define pfn_to_nid(pfn) (0) 923#define pfn_to_nid(pfn) (0)
929#endif 924#endif
930 925
931#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
932#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
933
934#ifdef CONFIG_SPARSEMEM 926#ifdef CONFIG_SPARSEMEM
935 927
936/* 928/*
@@ -956,6 +948,12 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
956#error Allocator MAX_ORDER exceeds SECTION_SIZE 948#error Allocator MAX_ORDER exceeds SECTION_SIZE
957#endif 949#endif
958 950
951#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
952#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
953
954#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
955#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
956
959struct page; 957struct page;
960struct page_cgroup; 958struct page_cgroup;
961struct mem_section { 959struct mem_section {
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index c75471db576e..a940fe435aca 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -132,6 +132,7 @@ static inline int mutex_is_locked(struct mutex *lock)
132 */ 132 */
133#ifdef CONFIG_DEBUG_LOCK_ALLOC 133#ifdef CONFIG_DEBUG_LOCK_ALLOC
134extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 134extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
135extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
135extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 136extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
136 unsigned int subclass); 137 unsigned int subclass);
137extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 138extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
@@ -140,6 +141,13 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
140#define mutex_lock(lock) mutex_lock_nested(lock, 0) 141#define mutex_lock(lock) mutex_lock_nested(lock, 0)
141#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 142#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
142#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) 143#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
144
145#define mutex_lock_nest_lock(lock, nest_lock) \
146do { \
147 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
148 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
149} while (0)
150
143#else 151#else
144extern void mutex_lock(struct mutex *lock); 152extern void mutex_lock(struct mutex *lock);
145extern int __must_check mutex_lock_interruptible(struct mutex *lock); 153extern int __must_check mutex_lock_interruptible(struct mutex *lock);
@@ -148,6 +156,7 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
148# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 156# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
149# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 157# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
150# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 158# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
159# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
151#endif 160#endif
152 161
153/* 162/*
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 4c4ac3f3ce5a..a9dd89552f9c 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -24,6 +24,7 @@
24/* leave room for NETLINK_DM (DM Events) */ 24/* leave room for NETLINK_DM (DM Events) */
25#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ 25#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
26#define NETLINK_ECRYPTFS 19 26#define NETLINK_ECRYPTFS 19
27#define NETLINK_RDMA 20
27 28
28#define MAX_LINKS 32 29#define MAX_LINKS 32
29 30
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 7b370c7cfeff..50d20aba57d3 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -81,13 +81,4 @@ static inline void get_nsproxy(struct nsproxy *ns)
81 atomic_inc(&ns->count); 81 atomic_inc(&ns->count);
82} 82}
83 83
84#ifdef CONFIG_CGROUP_NS
85int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid);
86#else
87static inline int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid)
88{
89 return 0;
90}
91#endif
92
93#endif 84#endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5e3aa8311c5e..4952fb874ad3 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -40,6 +40,8 @@ enum oom_constraint {
40 CONSTRAINT_MEMCG, 40 CONSTRAINT_MEMCG,
41}; 41};
42 42
43extern int test_set_oom_score_adj(int new_val);
44
43extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, 45extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
44 const nodemask_t *nodemask, unsigned long totalpages); 46 const nodemask_t *nodemask, unsigned long totalpages);
45extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 47extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c11950652646..716875e53520 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -219,6 +219,12 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); 219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220} 220}
221 221
222static inline struct page *page_cache_alloc_readahead(struct address_space *x)
223{
224 return __page_cache_alloc(mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
226}
227
222typedef int filler_t(void *, struct page *); 228typedef int filler_t(void *, struct page *);
223 229
224extern struct page * find_get_page(struct address_space *mapping, 230extern struct page * find_get_page(struct address_space *mapping,
@@ -357,6 +363,15 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
357 */ 363 */
358extern void wait_on_page_bit(struct page *page, int bit_nr); 364extern void wait_on_page_bit(struct page *page, int bit_nr);
359 365
366extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
367
368static inline int wait_on_page_locked_killable(struct page *page)
369{
370 if (PageLocked(page))
371 return wait_on_page_bit_killable(page, PG_locked);
372 return 0;
373}
374
360/* 375/*
361 * Wait for a page to be unlocked. 376 * Wait for a page to be unlocked.
362 * 377 *
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 46f6ba56fa91..5edc9014263a 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -75,7 +75,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
75 barrier(); /* Prevent reloads of fbc->count */ 75 barrier(); /* Prevent reloads of fbc->count */
76 if (ret >= 0) 76 if (ret >= 0)
77 return ret; 77 return ret;
78 return 1; 78 return 0;
79} 79}
80 80
81static inline int percpu_counter_initialized(struct percpu_counter *fbc) 81static inline int percpu_counter_initialized(struct percpu_counter *fbc)
@@ -133,6 +133,10 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
133 return fbc->count; 133 return fbc->count;
134} 134}
135 135
136/*
137 * percpu_counter is intended to track positive numbers. In the UP case the
138 * number should never be negative.
139 */
136static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 140static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
137{ 141{
138 return fbc->count; 142 return fbc->count;
diff --git a/include/linux/pid.h b/include/linux/pid.h
index cdced84261d7..b152d44fb181 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -105,7 +105,7 @@ extern struct pid_namespace init_pid_ns;
105 * or rcu_read_lock() held. 105 * or rcu_read_lock() held.
106 * 106 *
107 * find_pid_ns() finds the pid in the namespace specified 107 * find_pid_ns() finds the pid in the namespace specified
108 * find_vpid() finr the pid by its virtual id, i.e. in the current namespace 108 * find_vpid() finds the pid by its virtual id, i.e. in the current namespace
109 * 109 *
110 * see also find_task_by_vpid() set in include/linux/sched.h 110 * see also find_task_by_vpid() set in include/linux/sched.h
111 */ 111 */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 808227d40a64..959c14132f46 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -82,6 +82,7 @@ struct k_itimer {
82 unsigned long expires; 82 unsigned long expires;
83 } mmtimer; 83 } mmtimer;
84 struct alarm alarmtimer; 84 struct alarm alarmtimer;
85 struct rcu_head rcu;
85 } it; 86 } it;
86}; 87};
87 88
diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h
new file mode 100644
index 000000000000..68096a6aa2d7
--- /dev/null
+++ b/include/linux/power/isp1704_charger.h
@@ -0,0 +1,29 @@
1/*
2 * ISP1704 USB Charger Detection driver
3 *
4 * Copyright (C) 2011 Nokia Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21
22#ifndef __ISP1704_CHARGER_H
23#define __ISP1704_CHARGER_H
24
25struct isp1704_charger_data {
26 void (*set_power)(bool on);
27};
28
29#endif
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
new file mode 100644
index 000000000000..24f51db8a83f
--- /dev/null
+++ b/include/linux/power/max8903_charger.h
@@ -0,0 +1,57 @@
1/*
2 * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
3 *
4 * Copyright (C) 2011 Samsung Electronics
5 * MyungJoo Ham <myungjoo.ham@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef __MAX8903_CHARGER_H__
24#define __MAX8903_CHARGER_H__
25
26struct max8903_pdata {
27 /*
28 * GPIOs
29 * cen, chg, flt, and usus are optional.
30 * dok, dcm, and uok are not optional depending on the status of
31 * dc_valid and usb_valid.
32 */
33 int cen; /* Charger Enable input */
34 int dok; /* DC(Adapter) Power OK output */
35 int uok; /* USB Power OK output */
36 int chg; /* Charger status output */
37 int flt; /* Fault output */
38 int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
39 int usus; /* USB Suspend Input (1: suspended) */
40
41 /*
42 * DC(Adapter/TA) is wired
43 * When dc_valid is true,
44 * dok and dcm should be valid.
45 *
46 * At least one of dc_valid or usb_valid should be true.
47 */
48 bool dc_valid;
49 /*
50 * USB is wired
51 * When usb_valid is true,
52 * uok should be valid.
53 */
54 bool usb_valid;
55};
56
57#endif /* __MAX8903_CHARGER_H__ */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index ee048e77e1ae..0101d55d9651 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -1,6 +1,8 @@
1#ifndef __KERNEL_PRINTK__ 1#ifndef __KERNEL_PRINTK__
2#define __KERNEL_PRINTK__ 2#define __KERNEL_PRINTK__
3 3
4#include <linux/init.h>
5
4extern const char linux_banner[]; 6extern const char linux_banner[];
5extern const char linux_proc_banner[]; 7extern const char linux_proc_banner[];
6 8
@@ -113,6 +115,7 @@ extern int dmesg_restrict;
113extern int kptr_restrict; 115extern int kptr_restrict;
114 116
115void log_buf_kexec_setup(void); 117void log_buf_kexec_setup(void);
118void __init setup_log_buf(int early);
116#else 119#else
117static inline __attribute__ ((format (printf, 1, 0))) 120static inline __attribute__ ((format (printf, 1, 0)))
118int vprintk(const char *s, va_list args) 121int vprintk(const char *s, va_list args)
@@ -137,6 +140,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
137static inline void log_buf_kexec_setup(void) 140static inline void log_buf_kexec_setup(void)
138{ 141{
139} 142}
143
144static inline void setup_log_buf(int early)
145{
146}
140#endif 147#endif
141 148
142extern void dump_stack(void) __cold; 149extern void dump_stack(void) __cold;
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index eaf4350c0f90..e7576cf9e32d 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -173,11 +173,7 @@ extern void proc_net_remove(struct net *net, const char *name);
173extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 173extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
174 struct proc_dir_entry *parent); 174 struct proc_dir_entry *parent);
175 175
176/* While the {get|set|dup}_mm_exe_file functions are for mm_structs, they are 176extern struct file *proc_ns_fget(int fd);
177 * only needed to implement /proc/<pid>|self/exe so we define them here. */
178extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
179extern struct file *get_mm_exe_file(struct mm_struct *mm);
180extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm);
181 177
182#else 178#else
183 179
@@ -228,19 +224,11 @@ static inline void pid_ns_release_proc(struct pid_namespace *ns)
228{ 224{
229} 225}
230 226
231static inline void set_mm_exe_file(struct mm_struct *mm, 227static inline struct file *proc_ns_fget(int fd)
232 struct file *new_exe_file)
233{}
234
235static inline struct file *get_mm_exe_file(struct mm_struct *mm)
236{ 228{
237 return NULL; 229 return ERR_PTR(-EINVAL);
238} 230}
239 231
240static inline void dup_mm_exe_file(struct mm_struct *oldmm,
241 struct mm_struct *newmm)
242{}
243
244#endif /* CONFIG_PROC_FS */ 232#endif /* CONFIG_PROC_FS */
245 233
246#if !defined(CONFIG_PROC_KCORE) 234#if !defined(CONFIG_PROC_KCORE)
@@ -252,6 +240,18 @@ kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
252extern void kclist_add(struct kcore_list *, void *, size_t, int type); 240extern void kclist_add(struct kcore_list *, void *, size_t, int type);
253#endif 241#endif
254 242
243struct nsproxy;
244struct proc_ns_operations {
245 const char *name;
246 int type;
247 void *(*get)(struct task_struct *task);
248 void (*put)(void *ns);
249 int (*install)(struct nsproxy *nsproxy, void *ns);
250};
251extern const struct proc_ns_operations netns_operations;
252extern const struct proc_ns_operations utsns_operations;
253extern const struct proc_ns_operations ipcns_operations;
254
255union proc_op { 255union proc_op {
256 int (*proc_get_link)(struct inode *, struct path *); 256 int (*proc_get_link)(struct inode *, struct path *);
257 int (*proc_read)(struct task_struct *task, char *page); 257 int (*proc_read)(struct task_struct *task, char *page);
@@ -270,6 +270,8 @@ struct proc_inode {
270 struct proc_dir_entry *pde; 270 struct proc_dir_entry *pde;
271 struct ctl_table_header *sysctl; 271 struct ctl_table_header *sysctl;
272 struct ctl_table *sysctl_entry; 272 struct ctl_table *sysctl_entry;
273 void *ns;
274 const struct proc_ns_operations *ns_ops;
273 struct inode vfs_inode; 275 struct inode vfs_inode;
274}; 276};
275 277
@@ -288,12 +290,4 @@ static inline struct net *PDE_NET(struct proc_dir_entry *pde)
288 return pde->parent->data; 290 return pde->parent->data;
289} 291}
290 292
291struct proc_maps_private {
292 struct pid *pid;
293 struct task_struct *task;
294#ifdef CONFIG_MMU
295 struct vm_area_struct *tail_vma;
296#endif
297};
298
299#endif /* _LINUX_PROC_FS_H */ 293#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index 943a85ab0020..e07e2742a865 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/if_ether.h> 26#include <linux/if_ether.h>
27#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
28#include <linux/ip.h>
28#include <linux/filter.h> 29#include <linux/filter.h>
29#ifdef __KERNEL__ 30#ifdef __KERNEL__
30#include <linux/in.h> 31#include <linux/in.h>
@@ -58,6 +59,12 @@
58#define OFF_NEXT 6 59#define OFF_NEXT 6
59#define OFF_UDP_DST 2 60#define OFF_UDP_DST 2
60 61
62#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
63#define OFF_PTP_SEQUENCE_ID 30
64#define OFF_PTP_CONTROL 32 /* PTPv1 only */
65
66#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
67
61#define IP6_HLEN 40 68#define IP6_HLEN 40
62#define UDP_HLEN 8 69#define UDP_HLEN 8
63 70
diff --git a/include/linux/ptp_clock.h b/include/linux/ptp_clock.h
new file mode 100644
index 000000000000..94e981f810a2
--- /dev/null
+++ b/include/linux/ptp_clock.h
@@ -0,0 +1,84 @@
1/*
2 * PTP 1588 clock support - user space interface
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _PTP_CLOCK_H_
22#define _PTP_CLOCK_H_
23
24#include <linux/ioctl.h>
25#include <linux/types.h>
26
27/* PTP_xxx bits, for the flags field within the request structures. */
28#define PTP_ENABLE_FEATURE (1<<0)
29#define PTP_RISING_EDGE (1<<1)
30#define PTP_FALLING_EDGE (1<<2)
31
32/*
33 * struct ptp_clock_time - represents a time value
34 *
35 * The sign of the seconds field applies to the whole value. The
36 * nanoseconds field is always unsigned. The reserved field is
37 * included for sub-nanosecond resolution, should the demand for
38 * this ever appear.
39 *
40 */
41struct ptp_clock_time {
42 __s64 sec; /* seconds */
43 __u32 nsec; /* nanoseconds */
44 __u32 reserved;
45};
46
47struct ptp_clock_caps {
48 int max_adj; /* Maximum frequency adjustment in parts per billon. */
49 int n_alarm; /* Number of programmable alarms. */
50 int n_ext_ts; /* Number of external time stamp channels. */
51 int n_per_out; /* Number of programmable periodic signals. */
52 int pps; /* Whether the clock supports a PPS callback. */
53 int rsv[15]; /* Reserved for future use. */
54};
55
56struct ptp_extts_request {
57 unsigned int index; /* Which channel to configure. */
58 unsigned int flags; /* Bit field for PTP_xxx flags. */
59 unsigned int rsv[2]; /* Reserved for future use. */
60};
61
62struct ptp_perout_request {
63 struct ptp_clock_time start; /* Absolute start time. */
64 struct ptp_clock_time period; /* Desired period, zero means disable. */
65 unsigned int index; /* Which channel to configure. */
66 unsigned int flags; /* Reserved for future use. */
67 unsigned int rsv[4]; /* Reserved for future use. */
68};
69
70#define PTP_CLK_MAGIC '='
71
72#define PTP_CLOCK_GETCAPS _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps)
73#define PTP_EXTTS_REQUEST _IOW(PTP_CLK_MAGIC, 2, struct ptp_extts_request)
74#define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request)
75#define PTP_ENABLE_PPS _IOW(PTP_CLK_MAGIC, 4, int)
76
77struct ptp_extts_event {
78 struct ptp_clock_time t; /* Time event occured. */
79 unsigned int index; /* Which channel produced the event. */
80 unsigned int flags; /* Reserved for future use. */
81 unsigned int rsv[2]; /* Reserved for future use. */
82};
83
84#endif
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
new file mode 100644
index 000000000000..dd2e44fba63e
--- /dev/null
+++ b/include/linux/ptp_clock_kernel.h
@@ -0,0 +1,139 @@
1/*
2 * PTP 1588 clock support
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _PTP_CLOCK_KERNEL_H_
22#define _PTP_CLOCK_KERNEL_H_
23
24#include <linux/ptp_clock.h>
25
26
27struct ptp_clock_request {
28 enum {
29 PTP_CLK_REQ_EXTTS,
30 PTP_CLK_REQ_PEROUT,
31 PTP_CLK_REQ_PPS,
32 } type;
33 union {
34 struct ptp_extts_request extts;
35 struct ptp_perout_request perout;
36 };
37};
38
39/**
40 * struct ptp_clock_info - decribes a PTP hardware clock
41 *
42 * @owner: The clock driver should set to THIS_MODULE.
43 * @name: A short name to identify the clock.
44 * @max_adj: The maximum possible frequency adjustment, in parts per billon.
45 * @n_alarm: The number of programmable alarms.
46 * @n_ext_ts: The number of external time stamp channels.
47 * @n_per_out: The number of programmable periodic signals.
48 * @pps: Indicates whether the clock supports a PPS callback.
49 *
50 * clock operations
51 *
52 * @adjfreq: Adjusts the frequency of the hardware clock.
53 * parameter delta: Desired period change in parts per billion.
54 *
55 * @adjtime: Shifts the time of the hardware clock.
56 * parameter delta: Desired change in nanoseconds.
57 *
58 * @gettime: Reads the current time from the hardware clock.
59 * parameter ts: Holds the result.
60 *
61 * @settime: Set the current time on the hardware clock.
62 * parameter ts: Time value to set.
63 *
64 * @enable: Request driver to enable or disable an ancillary feature.
65 * parameter request: Desired resource to enable or disable.
66 * parameter on: Caller passes one to enable or zero to disable.
67 *
68 * Drivers should embed their ptp_clock_info within a private
69 * structure, obtaining a reference to it using container_of().
70 *
71 * The callbacks must all return zero on success, non-zero otherwise.
72 */
73
74struct ptp_clock_info {
75 struct module *owner;
76 char name[16];
77 s32 max_adj;
78 int n_alarm;
79 int n_ext_ts;
80 int n_per_out;
81 int pps;
82 int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
83 int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
84 int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
85 int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
86 int (*enable)(struct ptp_clock_info *ptp,
87 struct ptp_clock_request *request, int on);
88};
89
90struct ptp_clock;
91
92/**
93 * ptp_clock_register() - register a PTP hardware clock driver
94 *
95 * @info: Structure describing the new clock.
96 */
97
98extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info);
99
100/**
101 * ptp_clock_unregister() - unregister a PTP hardware clock driver
102 *
103 * @ptp: The clock to remove from service.
104 */
105
106extern int ptp_clock_unregister(struct ptp_clock *ptp);
107
108
109enum ptp_clock_events {
110 PTP_CLOCK_ALARM,
111 PTP_CLOCK_EXTTS,
112 PTP_CLOCK_PPS,
113};
114
115/**
116 * struct ptp_clock_event - decribes a PTP hardware clock event
117 *
118 * @type: One of the ptp_clock_events enumeration values.
119 * @index: Identifies the source of the event.
120 * @timestamp: When the event occured.
121 */
122
123struct ptp_clock_event {
124 int type;
125 int index;
126 u64 timestamp;
127};
128
129/**
130 * ptp_clock_event() - notify the PTP layer about an event
131 *
132 * @ptp: The clock obtained from ptp_clock_register().
133 * @event: Message structure describing the event.
134 */
135
136extern void ptp_clock_event(struct ptp_clock *ptp,
137 struct ptp_clock_event *event);
138
139#endif
diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h
new file mode 100644
index 000000000000..612062313b68
--- /dev/null
+++ b/include/linux/regulator/db8500-prcmu.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 *
6 * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
7 *
8 * Interface to power domain regulators on DB8500
9 */
10
11#ifndef __REGULATOR_H__
12#define __REGULATOR_H__
13
14/* Number of DB8500 regulators and regulator enumeration */
15enum db8500_regulator_id {
16 DB8500_REGULATOR_VAPE,
17 DB8500_REGULATOR_VARM,
18 DB8500_REGULATOR_VMODEM,
19 DB8500_REGULATOR_VPLL,
20 DB8500_REGULATOR_VSMPS1,
21 DB8500_REGULATOR_VSMPS2,
22 DB8500_REGULATOR_VSMPS3,
23 DB8500_REGULATOR_VRF1,
24 DB8500_REGULATOR_SWITCH_SVAMMDSP,
25 DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
26 DB8500_REGULATOR_SWITCH_SVAPIPE,
27 DB8500_REGULATOR_SWITCH_SIAMMDSP,
28 DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
29 DB8500_REGULATOR_SWITCH_SIAPIPE,
30 DB8500_REGULATOR_SWITCH_SGA,
31 DB8500_REGULATOR_SWITCH_B2R2_MCDE,
32 DB8500_REGULATOR_SWITCH_ESRAM12,
33 DB8500_REGULATOR_SWITCH_ESRAM12RET,
34 DB8500_REGULATOR_SWITCH_ESRAM34,
35 DB8500_REGULATOR_SWITCH_ESRAM34RET,
36 DB8500_NUM_REGULATORS
37};
38
39/*
40 * Exported interface for CPUIdle only. This function is called with all
41 * interrupts turned off.
42 */
43int power_state_active_is_enabled(void);
44
45#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index c4c4fc45f856..ce3127a75c88 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -68,6 +68,8 @@ struct regulator_state {
68 * 68 *
69 * @min_uV: Smallest voltage consumers may set. 69 * @min_uV: Smallest voltage consumers may set.
70 * @max_uV: Largest voltage consumers may set. 70 * @max_uV: Largest voltage consumers may set.
71 * @uV_offset: Offset applied to voltages from consumer to compensate for
72 * voltage drops.
71 * 73 *
72 * @min_uA: Smallest consumers consumers may set. 74 * @min_uA: Smallest consumers consumers may set.
73 * @max_uA: Largest current consumers may set. 75 * @max_uA: Largest current consumers may set.
@@ -99,6 +101,8 @@ struct regulation_constraints {
99 int min_uV; 101 int min_uV;
100 int max_uV; 102 int max_uV;
101 103
104 int uV_offset;
105
102 /* current output range (inclusive) - for current control */ 106 /* current output range (inclusive) - for current control */
103 int min_uA; 107 int min_uA;
104 int max_uA; 108 int max_uA;
@@ -160,8 +164,6 @@ struct regulator_consumer_supply {
160 * @supply_regulator: Parent regulator. Specified using the regulator name 164 * @supply_regulator: Parent regulator. Specified using the regulator name
161 * as it appears in the name field in sysfs, which can 165 * as it appears in the name field in sysfs, which can
162 * be explicitly set using the constraints field 'name'. 166 * be explicitly set using the constraints field 'name'.
163 * @supply_regulator_dev: Parent regulator (if any) - DEPRECATED in favour
164 * of supply_regulator.
165 * 167 *
166 * @constraints: Constraints. These must be specified for the regulator to 168 * @constraints: Constraints. These must be specified for the regulator to
167 * be usable. 169 * be usable.
@@ -173,7 +175,6 @@ struct regulator_consumer_supply {
173 */ 175 */
174struct regulator_init_data { 176struct regulator_init_data {
175 const char *supply_regulator; /* or NULL for system supply */ 177 const char *supply_regulator; /* or NULL for system supply */
176 struct device *supply_regulator_dev; /* or NULL for system supply */
177 178
178 struct regulation_constraints constraints; 179 struct regulation_constraints constraints;
179 180
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h
new file mode 100644
index 000000000000..a175d0598033
--- /dev/null
+++ b/include/linux/rfkill-gpio.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (c) 2011, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19
20#ifndef __RFKILL_GPIO_H
21#define __RFKILL_GPIO_H
22
23#include <linux/types.h>
24#include <linux/rfkill.h>
25
26/**
27 * struct rfkill_gpio_platform_data - platform data for rfkill gpio device.
28 * for unused gpio's, the expected value is -1.
29 * @name: name for the gpio rf kill instance
30 * @reset_gpio: GPIO which is used for reseting rfkill switch
31 * @shutdown_gpio: GPIO which is used for shutdown of rfkill switch
32 * @power_clk_name: [optional] name of clk to turn off while blocked
33 */
34
35struct rfkill_gpio_platform_data {
36 char *name;
37 int reset_gpio;
38 int shutdown_gpio;
39 const char *power_clk_name;
40 enum rfkill_type type;
41};
42
43#endif /* __RFKILL_GPIO_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 830e65dc01ee..2148b122779b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -7,7 +7,7 @@
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/spinlock.h> 10#include <linux/mutex.h>
11#include <linux/memcontrol.h> 11#include <linux/memcontrol.h>
12 12
13/* 13/*
@@ -26,7 +26,7 @@
26 */ 26 */
27struct anon_vma { 27struct anon_vma {
28 struct anon_vma *root; /* Root of this anon_vma tree */ 28 struct anon_vma *root; /* Root of this anon_vma tree */
29 spinlock_t lock; /* Serialize access to vma list */ 29 struct mutex mutex; /* Serialize access to vma list */
30 /* 30 /*
31 * The refcount is taken on an anon_vma when there is no 31 * The refcount is taken on an anon_vma when there is no
32 * guarantee that the vma of page tables will exist for 32 * guarantee that the vma of page tables will exist for
@@ -64,7 +64,7 @@ struct anon_vma_chain {
64 struct vm_area_struct *vma; 64 struct vm_area_struct *vma;
65 struct anon_vma *anon_vma; 65 struct anon_vma *anon_vma;
66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ 66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
67 struct list_head same_anon_vma; /* locked by anon_vma->lock */ 67 struct list_head same_anon_vma; /* locked by anon_vma->mutex */
68}; 68};
69 69
70#ifdef CONFIG_MMU 70#ifdef CONFIG_MMU
@@ -93,24 +93,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
93{ 93{
94 struct anon_vma *anon_vma = vma->anon_vma; 94 struct anon_vma *anon_vma = vma->anon_vma;
95 if (anon_vma) 95 if (anon_vma)
96 spin_lock(&anon_vma->root->lock); 96 mutex_lock(&anon_vma->root->mutex);
97} 97}
98 98
99static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) 99static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
100{ 100{
101 struct anon_vma *anon_vma = vma->anon_vma; 101 struct anon_vma *anon_vma = vma->anon_vma;
102 if (anon_vma) 102 if (anon_vma)
103 spin_unlock(&anon_vma->root->lock); 103 mutex_unlock(&anon_vma->root->mutex);
104} 104}
105 105
106static inline void anon_vma_lock(struct anon_vma *anon_vma) 106static inline void anon_vma_lock(struct anon_vma *anon_vma)
107{ 107{
108 spin_lock(&anon_vma->root->lock); 108 mutex_lock(&anon_vma->root->mutex);
109} 109}
110 110
111static inline void anon_vma_unlock(struct anon_vma *anon_vma) 111static inline void anon_vma_unlock(struct anon_vma *anon_vma)
112{ 112{
113 spin_unlock(&anon_vma->root->lock); 113 mutex_unlock(&anon_vma->root->mutex);
114} 114}
115 115
116/* 116/*
@@ -218,20 +218,7 @@ int try_to_munlock(struct page *);
218/* 218/*
219 * Called by memory-failure.c to kill processes. 219 * Called by memory-failure.c to kill processes.
220 */ 220 */
221struct anon_vma *__page_lock_anon_vma(struct page *page); 221struct anon_vma *page_lock_anon_vma(struct page *page);
222
223static inline struct anon_vma *page_lock_anon_vma(struct page *page)
224{
225 struct anon_vma *anon_vma;
226
227 __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
228
229 /* (void) is needed to make gcc happy */
230 (void) __cond_lock(&anon_vma->root->lock, anon_vma);
231
232 return anon_vma;
233}
234
235void page_unlock_anon_vma(struct anon_vma *anon_vma); 222void page_unlock_anon_vma(struct anon_vma *anon_vma);
236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 223int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
237 224
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 877ece45426f..b27ebea25660 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -92,10 +92,10 @@ struct rtc_pll_info {
92#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ 92#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
93 93
94/* interrupt flags */ 94/* interrupt flags */
95#define RTC_IRQF 0x80 /* any of the following is active */ 95#define RTC_IRQF 0x80 /* Any of the following is active */
96#define RTC_PF 0x40 96#define RTC_PF 0x40 /* Periodic interrupt */
97#define RTC_AF 0x20 97#define RTC_AF 0x20 /* Alarm interrupt */
98#define RTC_UF 0x10 98#define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */
99 99
100#ifdef __KERNEL__ 100#ifdef __KERNEL__
101 101
diff --git a/include/linux/sched.h b/include/linux/sched.h
index aaf71e08222c..dc8871295a5a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -513,6 +513,7 @@ struct thread_group_cputimer {
513 spinlock_t lock; 513 spinlock_t lock;
514}; 514};
515 515
516#include <linux/rwsem.h>
516struct autogroup; 517struct autogroup;
517 518
518/* 519/*
@@ -632,6 +633,16 @@ struct signal_struct {
632 unsigned audit_tty; 633 unsigned audit_tty;
633 struct tty_audit_buf *tty_audit_buf; 634 struct tty_audit_buf *tty_audit_buf;
634#endif 635#endif
636#ifdef CONFIG_CGROUPS
637 /*
638 * The threadgroup_fork_lock prevents threads from forking with
639 * CLONE_THREAD while held for writing. Use this for fork-sensitive
640 * threadgroup-wide operations. It's taken for reading in fork.c in
641 * copy_process().
642 * Currently only needed write-side by cgroups.
643 */
644 struct rw_semaphore threadgroup_fork_lock;
645#endif
635 646
636 int oom_adj; /* OOM kill score adjustment (bit shift) */ 647 int oom_adj; /* OOM kill score adjustment (bit shift) */
637 int oom_score_adj; /* OOM kill score adjustment */ 648 int oom_score_adj; /* OOM kill score adjustment */
@@ -1753,7 +1764,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1753#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1764#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1754#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1765#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1755#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1766#define PF_KSWAPD 0x00040000 /* I am kswapd */
1756#define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */
1757#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1767#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1758#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1768#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1759#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1769#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -2177,6 +2187,7 @@ static inline void mmdrop(struct mm_struct * mm)
2177 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2187 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2178 __mmdrop(mm); 2188 __mmdrop(mm);
2179} 2189}
2190extern int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm);
2180 2191
2181/* mmput gets rid of the mappings and all user-space */ 2192/* mmput gets rid of the mappings and all user-space */
2182extern void mmput(struct mm_struct *); 2193extern void mmput(struct mm_struct *);
@@ -2323,6 +2334,31 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
2323 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2334 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2324} 2335}
2325 2336
2337/* See the declaration of threadgroup_fork_lock in signal_struct. */
2338#ifdef CONFIG_CGROUPS
2339static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
2340{
2341 down_read(&tsk->signal->threadgroup_fork_lock);
2342}
2343static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
2344{
2345 up_read(&tsk->signal->threadgroup_fork_lock);
2346}
2347static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
2348{
2349 down_write(&tsk->signal->threadgroup_fork_lock);
2350}
2351static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
2352{
2353 up_write(&tsk->signal->threadgroup_fork_lock);
2354}
2355#else
2356static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
2357static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
2358static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
2359static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
2360#endif
2361
2326#ifndef __HAVE_THREAD_FUNCTIONS 2362#ifndef __HAVE_THREAD_FUNCTIONS
2327 2363
2328#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2364#define task_thread_info(task) ((struct thread_info *)(task)->stack)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 06d69648fc86..e9811892844f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -41,9 +41,6 @@ typedef struct {
41#define __SEQLOCK_UNLOCKED(lockname) \ 41#define __SEQLOCK_UNLOCKED(lockname) \
42 { 0, __SPIN_LOCK_UNLOCKED(lockname) } 42 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
43 43
44#define SEQLOCK_UNLOCKED \
45 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
46
47#define seqlock_init(x) \ 44#define seqlock_init(x) \
48 do { \ 45 do { \
49 (x)->sequence = 0; \ 46 (x)->sequence = 0; \
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 399be5ad2f99..2b7fec840517 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -9,6 +9,8 @@
9 9
10#define SHMEM_NR_DIRECT 16 10#define SHMEM_NR_DIRECT 16
11 11
12#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
13
12struct shmem_inode_info { 14struct shmem_inode_info {
13 spinlock_t lock; 15 spinlock_t lock;
14 unsigned long flags; 16 unsigned long flags;
@@ -17,8 +19,12 @@ struct shmem_inode_info {
17 unsigned long next_index; /* highest alloced index + 1 */ 19 unsigned long next_index; /* highest alloced index + 1 */
18 struct shared_policy policy; /* NUMA memory alloc policy */ 20 struct shared_policy policy; /* NUMA memory alloc policy */
19 struct page *i_indirect; /* top indirect blocks page */ 21 struct page *i_indirect; /* top indirect blocks page */
20 swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */ 22 union {
23 swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */
24 char inline_symlink[SHMEM_SYMLINK_INLINE_LEN];
25 };
21 struct list_head swaplist; /* chain of maybes on swap */ 26 struct list_head swaplist; /* chain of maybes on swap */
27 struct list_head xattr_list; /* list of shmem_xattr */
22 struct inode vfs_inode; 28 struct inode vfs_inode;
23}; 29};
24 30
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 74243c86ba39..7ad824d510a2 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -98,16 +98,6 @@ void ipi_call_unlock_irq(void);
98 */ 98 */
99int on_each_cpu(smp_call_func_t func, void *info, int wait); 99int on_each_cpu(smp_call_func_t func, void *info, int wait);
100 100
101#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
102#define MSG_ALL 0x8001
103
104#define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */
105#define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's
106 * when rebooting
107 */
108#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/
109#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */
110
111/* 101/*
112 * Mark the boot cpu "online" so that it can call console drivers in 102 * Mark the boot cpu "online" so that it can call console drivers in
113 * printk() and can access its per-cpu storage. 103 * printk() and can access its per-cpu storage.
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index b4d7710bc38d..bb4f5fbbbd8e 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -581,7 +581,7 @@ extern int spi_bus_unlock(struct spi_master *master);
581 * Callable only from contexts that can sleep. 581 * Callable only from contexts that can sleep.
582 */ 582 */
583static inline int 583static inline int
584spi_write(struct spi_device *spi, const u8 *buf, size_t len) 584spi_write(struct spi_device *spi, const void *buf, size_t len)
585{ 585{
586 struct spi_transfer t = { 586 struct spi_transfer t = {
587 .tx_buf = buf, 587 .tx_buf = buf,
@@ -605,7 +605,7 @@ spi_write(struct spi_device *spi, const u8 *buf, size_t len)
605 * Callable only from contexts that can sleep. 605 * Callable only from contexts that can sleep.
606 */ 606 */
607static inline int 607static inline int
608spi_read(struct spi_device *spi, u8 *buf, size_t len) 608spi_read(struct spi_device *spi, void *buf, size_t len)
609{ 609{
610 struct spi_transfer t = { 610 struct spi_transfer t = {
611 .rx_buf = buf, 611 .rx_buf = buf,
@@ -620,8 +620,8 @@ spi_read(struct spi_device *spi, u8 *buf, size_t len)
620 620
621/* this copies txbuf and rxbuf data; for small transfers only! */ 621/* this copies txbuf and rxbuf data; for small transfers only! */
622extern int spi_write_then_read(struct spi_device *spi, 622extern int spi_write_then_read(struct spi_device *spi,
623 const u8 *txbuf, unsigned n_tx, 623 const void *txbuf, unsigned n_tx,
624 u8 *rxbuf, unsigned n_rx); 624 void *rxbuf, unsigned n_rx);
625 625
626/** 626/**
627 * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read 627 * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a5c6da5d8df8..384eb5fe530b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -257,7 +257,8 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
257extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 257extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
258 gfp_t gfp_mask, bool noswap, 258 gfp_t gfp_mask, bool noswap,
259 unsigned int swappiness, 259 unsigned int swappiness,
260 struct zone *zone); 260 struct zone *zone,
261 unsigned long *nr_scanned);
261extern int __isolate_lru_page(struct page *page, int mode, int file); 262extern int __isolate_lru_page(struct page *page, int mode, int file);
262extern unsigned long shrink_all_memory(unsigned long nr_pages); 263extern unsigned long shrink_all_memory(unsigned long nr_pages);
263extern int vm_swappiness; 264extern int vm_swappiness;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index ab71447d0c5a..8c03b98df5f9 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -846,4 +846,5 @@ asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
846asmlinkage long sys_open_by_handle_at(int mountdirfd, 846asmlinkage long sys_open_by_handle_at(int mountdirfd,
847 struct file_handle __user *handle, 847 struct file_handle __user *handle,
848 int flags); 848 int flags);
849asmlinkage long sys_setns(int fd, int nstype);
849#endif 850#endif
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
new file mode 100644
index 000000000000..03b90cdc1921
--- /dev/null
+++ b/include/linux/vm_event_item.h
@@ -0,0 +1,64 @@
1#ifndef VM_EVENT_ITEM_H_INCLUDED
2#define VM_EVENT_ITEM_H_INCLUDED
3
4#ifdef CONFIG_ZONE_DMA
5#define DMA_ZONE(xx) xx##_DMA,
6#else
7#define DMA_ZONE(xx)
8#endif
9
10#ifdef CONFIG_ZONE_DMA32
11#define DMA32_ZONE(xx) xx##_DMA32,
12#else
13#define DMA32_ZONE(xx)
14#endif
15
16#ifdef CONFIG_HIGHMEM
17#define HIGHMEM_ZONE(xx) , xx##_HIGH
18#else
19#define HIGHMEM_ZONE(xx)
20#endif
21
22#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
23
24enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
25 FOR_ALL_ZONES(PGALLOC),
26 PGFREE, PGACTIVATE, PGDEACTIVATE,
27 PGFAULT, PGMAJFAULT,
28 FOR_ALL_ZONES(PGREFILL),
29 FOR_ALL_ZONES(PGSTEAL),
30 FOR_ALL_ZONES(PGSCAN_KSWAPD),
31 FOR_ALL_ZONES(PGSCAN_DIRECT),
32#ifdef CONFIG_NUMA
33 PGSCAN_ZONE_RECLAIM_FAILED,
34#endif
35 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
36 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
37 KSWAPD_SKIP_CONGESTION_WAIT,
38 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
39#ifdef CONFIG_COMPACTION
40 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
41 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
42#endif
43#ifdef CONFIG_HUGETLB_PAGE
44 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
45#endif
46 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
47 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
48 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
49 UNEVICTABLE_PGMLOCKED,
50 UNEVICTABLE_PGMUNLOCKED,
51 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
52 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
53 UNEVICTABLE_MLOCKFREED,
54#ifdef CONFIG_TRANSPARENT_HUGEPAGE
55 THP_FAULT_ALLOC,
56 THP_FAULT_FALLBACK,
57 THP_COLLAPSE_ALLOC,
58 THP_COLLAPSE_ALLOC_FAILED,
59 THP_SPLIT,
60#endif
61 NR_VM_EVENT_ITEMS
62};
63
64#endif /* VM_EVENT_ITEM_H_INCLUDED */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2b3831b58aa4..bcd942fa611c 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -5,69 +5,9 @@
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/mmzone.h> 7#include <linux/mmzone.h>
8#include <linux/vm_event_item.h>
8#include <asm/atomic.h> 9#include <asm/atomic.h>
9 10
10#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
16#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
28
29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
30
31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39#ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif
42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
45 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
46#ifdef CONFIG_COMPACTION
47 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
48 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
49#endif
50#ifdef CONFIG_HUGETLB_PAGE
51 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
52#endif
53 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
54 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
55 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
56 UNEVICTABLE_PGMLOCKED,
57 UNEVICTABLE_PGMUNLOCKED,
58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
60 UNEVICTABLE_MLOCKFREED,
61#ifdef CONFIG_TRANSPARENT_HUGEPAGE
62 THP_FAULT_ALLOC,
63 THP_FAULT_FALLBACK,
64 THP_COLLAPSE_ALLOC,
65 THP_COLLAPSE_ALLOC_FAILED,
66 THP_SPLIT,
67#endif
68 NR_VM_EVENT_ITEMS
69};
70
71extern int sysctl_stat_interval; 11extern int sysctl_stat_interval;
72 12
73#ifdef CONFIG_VM_EVENT_COUNTERS 13#ifdef CONFIG_VM_EVENT_COUNTERS
@@ -261,6 +201,7 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
261extern void __dec_zone_state(struct zone *, enum zone_stat_item); 201extern void __dec_zone_state(struct zone *, enum zone_stat_item);
262 202
263void refresh_cpu_vm_stats(int); 203void refresh_cpu_vm_stats(int);
204void refresh_zone_stat_thresholds(void);
264 205
265int calculate_pressure_threshold(struct zone *zone); 206int calculate_pressure_threshold(struct zone *zone);
266int calculate_normal_threshold(struct zone *zone); 207int calculate_normal_threshold(struct zone *zone);
@@ -313,6 +254,10 @@ static inline void __dec_zone_page_state(struct page *page,
313#define set_pgdat_percpu_threshold(pgdat, callback) { } 254#define set_pgdat_percpu_threshold(pgdat, callback) { }
314 255
315static inline void refresh_cpu_vm_stats(int cpu) { } 256static inline void refresh_cpu_vm_stats(int cpu) { }
316#endif 257static inline void refresh_zone_stat_thresholds(void) { }
258
259#endif /* CONFIG_SMP */
260
261extern const char * const vmstat_text[];
317 262
318#endif /* _LINUX_VMSTAT_H */ 263#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 6050783005bd..aed54c50aa66 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -13,10 +13,6 @@
13#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ 13#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
14#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ 14#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
15 15
16#ifdef __KERNEL__
17
18#include <linux/types.h>
19
20/* Namespaces */ 16/* Namespaces */
21#define XATTR_OS2_PREFIX "os2." 17#define XATTR_OS2_PREFIX "os2."
22#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1) 18#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
@@ -53,6 +49,10 @@
53#define XATTR_CAPS_SUFFIX "capability" 49#define XATTR_CAPS_SUFFIX "capability"
54#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX 50#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
55 51
52#ifdef __KERNEL__
53
54#include <linux/types.h>
55
56struct inode; 56struct inode;
57struct dentry; 57struct dentry;
58 58
diff --git a/include/media/m5mols.h b/include/media/m5mols.h
new file mode 100644
index 000000000000..2d7e7ca2313d
--- /dev/null
+++ b/include/media/m5mols.h
@@ -0,0 +1,35 @@
1/*
2 * Driver header for M-5MOLS 8M Pixel camera sensor with ISP
3 *
4 * Copyright (C) 2011 Samsung Electronics Co., Ltd.
5 * Author: HeungJun Kim, riverful.kim@samsung.com
6 *
7 * Copyright (C) 2009 Samsung Electronics Co., Ltd.
8 * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef MEDIA_M5MOLS_H
17#define MEDIA_M5MOLS_H
18
19/**
20 * struct m5mols_platform_data - platform data for M-5MOLS driver
21 * @irq: GPIO getting the irq pin of M-5MOLS
22 * @gpio_reset: GPIO driving the reset pin of M-5MOLS
23 * @reset_polarity: active state for gpio_rst pin, 0 or 1
24 * @set_power: an additional callback to the board setup code
25 * to be called after enabling and before disabling
26 * the sensor's supply regulators
27 */
28struct m5mols_platform_data {
29 int irq;
30 int gpio_reset;
31 u8 reset_polarity;
32 int (*set_power)(struct device *dev, int on);
33};
34
35#endif /* MEDIA_M5MOLS_H */
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index 07cf4b9d0a65..bf365721d6b0 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -4,6 +4,9 @@
4#include <dvb_net.h> 4#include <dvb_net.h>
5#include <dvb_frontend.h> 5#include <dvb_frontend.h>
6 6
7#ifndef _VIDEOBUF_DVB_H_
8#define _VIDEOBUF_DVB_H_
9
7struct videobuf_dvb { 10struct videobuf_dvb {
8 /* filling that the job of the driver */ 11 /* filling that the job of the driver */
9 char *name; 12 char *name;
@@ -54,6 +57,7 @@ void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f);
54struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id); 57struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id);
55int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p); 58int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
56 59
60#endif /* _VIDEOBUF_DVB_H_ */
57 61
58/* 62/*
59 * Local variables: 63 * Local variables:
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index d2df55b0c213..008711e8e78f 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -241,10 +241,10 @@ enum p9_open_mode_t {
241 241
242/** 242/**
243 * enum p9_perm_t - 9P permissions 243 * enum p9_perm_t - 9P permissions
244 * @P9_DMDIR: mode bite for directories 244 * @P9_DMDIR: mode bit for directories
245 * @P9_DMAPPEND: mode bit for is append-only 245 * @P9_DMAPPEND: mode bit for is append-only
246 * @P9_DMEXCL: mode bit for excluse use (only one open handle allowed) 246 * @P9_DMEXCL: mode bit for excluse use (only one open handle allowed)
247 * @P9_DMMOUNT: mode bite for mount points 247 * @P9_DMMOUNT: mode bit for mount points
248 * @P9_DMAUTH: mode bit for authentication file 248 * @P9_DMAUTH: mode bit for authentication file
249 * @P9_DMTMP: mode bit for non-backed-up files 249 * @P9_DMTMP: mode bit for non-backed-up files
250 * @P9_DMSYMLINK: mode bit for symbolic links (9P2000.u) 250 * @P9_DMSYMLINK: mode bit for symbolic links (9P2000.u)
@@ -362,7 +362,7 @@ struct p9_qid {
362}; 362};
363 363
364/** 364/**
365 * struct p9_stat - file system metadata information 365 * struct p9_wstat - file system metadata information
366 * @size: length prefix for this stat structure instance 366 * @size: length prefix for this stat structure instance
367 * @type: the type of the server (equivalent to a major number) 367 * @type: the type of the server (equivalent to a major number)
368 * @dev: the sub-type of the server (equivalent to a minor number) 368 * @dev: the sub-type of the server (equivalent to a minor number)
@@ -687,10 +687,10 @@ struct p9_rwstat {
687 * @size: prefixed length of the structure 687 * @size: prefixed length of the structure
688 * @id: protocol operating identifier of type &p9_msg_t 688 * @id: protocol operating identifier of type &p9_msg_t
689 * @tag: transaction id of the request 689 * @tag: transaction id of the request
690 * @offset: used by marshalling routines to track currentposition in buffer 690 * @offset: used by marshalling routines to track current position in buffer
691 * @capacity: used by marshalling routines to track total malloc'd capacity 691 * @capacity: used by marshalling routines to track total malloc'd capacity
692 * @pubuf: Payload user buffer given by the caller 692 * @pubuf: Payload user buffer given by the caller
693 * @pubuf: Payload kernel buffer given by the caller 693 * @pkbuf: Payload kernel buffer given by the caller
694 * @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write. 694 * @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write.
695 * @private: For transport layer's use. 695 * @private: For transport layer's use.
696 * @sdata: payload 696 * @sdata: payload
@@ -714,7 +714,7 @@ struct p9_fcall {
714 size_t pbuf_size; 714 size_t pbuf_size;
715 void *private; 715 void *private;
716 716
717 uint8_t *sdata; 717 u8 *sdata;
718}; 718};
719 719
720struct p9_idpool; 720struct p9_idpool;
@@ -728,7 +728,6 @@ void p9_idpool_put(int id, struct p9_idpool *p);
728int p9_idpool_check(int id, struct p9_idpool *p); 728int p9_idpool_check(int id, struct p9_idpool *p);
729 729
730int p9_error_init(void); 730int p9_error_init(void);
731int p9_errstr2errno(char *, int);
732int p9_trans_fd_init(void); 731int p9_trans_fd_init(void);
733void p9_trans_fd_exit(void); 732void p9_trans_fd_exit(void);
734#endif /* NET_9P_H */ 733#endif /* NET_9P_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 051a99f79769..d26d5e98a173 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -60,7 +60,7 @@ enum p9_trans_status {
60}; 60};
61 61
62/** 62/**
63 * enum p9_req_status_t - virtio request status 63 * enum p9_req_status_t - status of a request
64 * @REQ_STATUS_IDLE: request slot unused 64 * @REQ_STATUS_IDLE: request slot unused
65 * @REQ_STATUS_ALLOC: request has been allocated but not sent 65 * @REQ_STATUS_ALLOC: request has been allocated but not sent
66 * @REQ_STATUS_UNSENT: request waiting to be sent 66 * @REQ_STATUS_UNSENT: request waiting to be sent
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 8f08c736c4c3..d8549fb9c742 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -41,6 +41,7 @@
41 * @pref: Preferences of this transport 41 * @pref: Preferences of this transport
42 * @def: set if this transport should be considered the default 42 * @def: set if this transport should be considered the default
43 * @create: member function to create a new connection on this transport 43 * @create: member function to create a new connection on this transport
44 * @close: member function to discard a connection on this transport
44 * @request: member function to issue a request to the transport 45 * @request: member function to issue a request to the transport
45 * @cancel: member function to cancel a request (if it hasn't been sent) 46 * @cancel: member function to cancel a request (if it hasn't been sent)
46 * 47 *
@@ -48,7 +49,7 @@
48 * transport module with the 9P core network module and used by the client 49 * transport module with the 9P core network module and used by the client
49 * to instantiate a new connection on a transport. 50 * to instantiate a new connection on a transport.
50 * 51 *
51 * BUGS: the transport module list isn't protected. 52 * The transport module list is protected by v9fs_trans_lock.
52 */ 53 */
53 54
54struct p9_trans_module { 55struct p9_trans_module {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bfd6557946be..0589f554788a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -531,6 +531,7 @@ struct sta_bss_parameters {
531 * @tx_retries: cumulative retry counts 531 * @tx_retries: cumulative retry counts
532 * @tx_failed: number of failed transmissions (retries exceeded, no ACK) 532 * @tx_failed: number of failed transmissions (retries exceeded, no ACK)
533 * @rx_dropped_misc: Dropped for un-specified reason. 533 * @rx_dropped_misc: Dropped for un-specified reason.
534 * @bss_param: current BSS parameters
534 * @generation: generation number for nl80211 dumps. 535 * @generation: generation number for nl80211 dumps.
535 * This number should increase every time the list of stations 536 * This number should increase every time the list of stations
536 * changes, i.e. when a station is added or removed, so that 537 * changes, i.e. when a station is added or removed, so that
@@ -1537,7 +1538,7 @@ struct cfg80211_ops {
1537 * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN. 1538 * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
1538 * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing 1539 * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
1539 * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH. 1540 * auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
1540 * @WIPHY_FLAG_SCHED_SCAN: The device supports scheduled scans. 1541 * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
1541 */ 1542 */
1542enum wiphy_flags { 1543enum wiphy_flags {
1543 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), 1544 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
@@ -2878,6 +2879,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
2878 * cfg80211_roamed - notify cfg80211 of roaming 2879 * cfg80211_roamed - notify cfg80211 of roaming
2879 * 2880 *
2880 * @dev: network device 2881 * @dev: network device
2882 * @channel: the channel of the new AP
2881 * @bssid: the BSSID of the new AP 2883 * @bssid: the BSSID of the new AP
2882 * @req_ie: association request IEs (maybe be %NULL) 2884 * @req_ie: association request IEs (maybe be %NULL)
2883 * @req_ie_len: association request IEs length 2885 * @req_ie_len: association request IEs length
@@ -2888,7 +2890,9 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
2888 * It should be called by the underlying driver whenever it roamed 2890 * It should be called by the underlying driver whenever it roamed
2889 * from one AP to another while connected. 2891 * from one AP to another while connected.
2890 */ 2892 */
2891void cfg80211_roamed(struct net_device *dev, const u8 *bssid, 2893void cfg80211_roamed(struct net_device *dev,
2894 struct ieee80211_channel *channel,
2895 const u8 *bssid,
2892 const u8 *req_ie, size_t req_ie_len, 2896 const u8 *req_ie, size_t req_ie_len,
2893 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp); 2897 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
2894 2898
diff --git a/include/net/dst.h b/include/net/dst.h
index 07a0402c52e6..7d15d238b6ec 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -111,6 +111,8 @@ static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
111{ 111{
112 unsigned long p = dst->_metrics; 112 unsigned long p = dst->_metrics;
113 113
114 BUG_ON(!p);
115
114 if (p & DST_METRICS_READ_ONLY) 116 if (p & DST_METRICS_READ_ONLY)
115 return dst->ops->cow_metrics(dst, p); 117 return dst->ops->cow_metrics(dst, p);
116 return __DST_METRICS_PTR(p); 118 return __DST_METRICS_PTR(p);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 3ae491932bc8..dcc8f5749d3f 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -119,6 +119,7 @@ static inline struct net *copy_net_ns(unsigned long flags, struct net *net_ns)
119extern struct list_head net_namespace_list; 119extern struct list_head net_namespace_list;
120 120
121extern struct net *get_net_ns_by_pid(pid_t pid); 121extern struct net *get_net_ns_by_pid(pid_t pid);
122extern struct net *get_net_ns_by_fd(int pid);
122 123
123#ifdef CONFIG_NET_NS 124#ifdef CONFIG_NET_NS
124extern void __put_net(struct net *net); 125extern void __put_net(struct net *net);
diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild
index e7c043216558..ea56f76c0c22 100644
--- a/include/rdma/Kbuild
+++ b/include/rdma/Kbuild
@@ -1 +1,6 @@
1header-y += ib_user_cm.h
1header-y += ib_user_mad.h 2header-y += ib_user_mad.h
3header-y += ib_user_sa.h
4header-y += ib_user_verbs.h
5header-y += rdma_netlink.h
6header-y += rdma_user_cm.h
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h
index bd3d380781e0..f79014aa28f9 100644
--- a/include/rdma/ib_user_cm.h
+++ b/include/rdma/ib_user_cm.h
@@ -34,6 +34,7 @@
34#ifndef IB_USER_CM_H 34#ifndef IB_USER_CM_H
35#define IB_USER_CM_H 35#define IB_USER_CM_H
36 36
37#include <linux/types.h>
37#include <rdma/ib_user_sa.h> 38#include <rdma/ib_user_sa.h>
38 39
39#define IB_USER_CM_ABI_VERSION 5 40#define IB_USER_CM_ABI_VERSION 5
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 169f7a53fb0c..26977c149c41 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -111,6 +111,20 @@ struct rdma_cm_event {
111 } param; 111 } param;
112}; 112};
113 113
114enum rdma_cm_state {
115 RDMA_CM_IDLE,
116 RDMA_CM_ADDR_QUERY,
117 RDMA_CM_ADDR_RESOLVED,
118 RDMA_CM_ROUTE_QUERY,
119 RDMA_CM_ROUTE_RESOLVED,
120 RDMA_CM_CONNECT,
121 RDMA_CM_DISCONNECT,
122 RDMA_CM_ADDR_BOUND,
123 RDMA_CM_LISTEN,
124 RDMA_CM_DEVICE_REMOVAL,
125 RDMA_CM_DESTROYING
126};
127
114struct rdma_cm_id; 128struct rdma_cm_id;
115 129
116/** 130/**
@@ -130,6 +144,7 @@ struct rdma_cm_id {
130 rdma_cm_event_handler event_handler; 144 rdma_cm_event_handler event_handler;
131 struct rdma_route route; 145 struct rdma_route route;
132 enum rdma_port_space ps; 146 enum rdma_port_space ps;
147 enum ib_qp_type qp_type;
133 u8 port_num; 148 u8 port_num;
134}; 149};
135 150
@@ -140,9 +155,11 @@ struct rdma_cm_id {
140 * returned rdma_id. 155 * returned rdma_id.
141 * @context: User specified context associated with the id. 156 * @context: User specified context associated with the id.
142 * @ps: RDMA port space. 157 * @ps: RDMA port space.
158 * @qp_type: type of queue pair associated with the id.
143 */ 159 */
144struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 160struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
145 void *context, enum rdma_port_space ps); 161 void *context, enum rdma_port_space ps,
162 enum ib_qp_type qp_type);
146 163
147/** 164/**
148 * rdma_destroy_id - Destroys an RDMA identifier. 165 * rdma_destroy_id - Destroys an RDMA identifier.
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
new file mode 100644
index 000000000000..3c5363ab867b
--- /dev/null
+++ b/include/rdma/rdma_netlink.h
@@ -0,0 +1,92 @@
1#ifndef _RDMA_NETLINK_H
2#define _RDMA_NETLINK_H
3
4#include <linux/types.h>
5
6enum {
7 RDMA_NL_RDMA_CM = 1
8};
9
10#define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10)
11#define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1))
12#define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op)
13
14enum {
15 RDMA_NL_RDMA_CM_ID_STATS = 0,
16 RDMA_NL_RDMA_CM_NUM_OPS
17};
18
19enum {
20 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1,
21 RDMA_NL_RDMA_CM_ATTR_DST_ADDR,
22 RDMA_NL_RDMA_CM_NUM_ATTR,
23};
24
25struct rdma_cm_id_stats {
26 __u32 qp_num;
27 __u32 bound_dev_if;
28 __u32 port_space;
29 __s32 pid;
30 __u8 cm_state;
31 __u8 node_type;
32 __u8 port_num;
33 __u8 qp_type;
34};
35
36#ifdef __KERNEL__
37
38#include <linux/netlink.h>
39
40struct ibnl_client_cbs {
41 int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
42};
43
44int ibnl_init(void);
45void ibnl_cleanup(void);
46
47/**
48 * Add a a client to the list of IB netlink exporters.
49 * @index: Index of the added client
50 * @nops: Number of supported ops by the added client.
51 * @cb_table: A table for op->callback
52 *
53 * Returns 0 on success or a negative error code.
54 */
55int ibnl_add_client(int index, int nops,
56 const struct ibnl_client_cbs cb_table[]);
57
58/**
59 * Remove a client from IB netlink.
60 * @index: Index of the removed IB client.
61 *
62 * Returns 0 on success or a negative error code.
63 */
64int ibnl_remove_client(int index);
65
66/**
67 * Put a new message in a supplied skb.
68 * @skb: The netlink skb.
69 * @nlh: Pointer to put the header of the new netlink message.
70 * @seq: The message sequence number.
71 * @len: The requested message length to allocate.
72 * @client: Calling IB netlink client.
73 * @op: message content op.
74 * Returns the allocated buffer on success and NULL on failure.
75 */
76void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
77 int len, int client, int op);
78/**
79 * Put a new attribute in a supplied skb.
80 * @skb: The netlink skb.
81 * @nlh: Header of the netlink message to append the attribute to.
82 * @len: The length of the attribute data.
83 * @data: The attribute data to put.
84 * @type: The attribute type.
85 * Returns the 0 and a negative error code on failure.
86 */
87int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
88 int len, void *data, int type);
89
90#endif /* __KERNEL__ */
91
92#endif /* _RDMA_NETLINK_H */
diff --git a/include/trace/events/gpio.h b/include/trace/events/gpio.h
new file mode 100644
index 000000000000..927a8ad9e51b
--- /dev/null
+++ b/include/trace/events/gpio.h
@@ -0,0 +1,56 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM gpio
3
4#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_GPIO_H
6
7#include <linux/tracepoint.h>
8
9TRACE_EVENT(gpio_direction,
10
11 TP_PROTO(unsigned gpio, int in, int err),
12
13 TP_ARGS(gpio, in, err),
14
15 TP_STRUCT__entry(
16 __field(unsigned, gpio)
17 __field(int, in)
18 __field(int, err)
19 ),
20
21 TP_fast_assign(
22 __entry->gpio = gpio;
23 __entry->in = in;
24 __entry->err = err;
25 ),
26
27 TP_printk("%u %3s (%d)", __entry->gpio,
28 __entry->in ? "in" : "out", __entry->err)
29);
30
31TRACE_EVENT(gpio_value,
32
33 TP_PROTO(unsigned gpio, int get, int value),
34
35 TP_ARGS(gpio, get, value),
36
37 TP_STRUCT__entry(
38 __field(unsigned, gpio)
39 __field(int, get)
40 __field(int, value)
41 ),
42
43 TP_fast_assign(
44 __entry->gpio = gpio;
45 __entry->get = get;
46 __entry->value = value;
47 ),
48
49 TP_printk("%u %3s %d", __entry->gpio,
50 __entry->get ? "get" : "set", __entry->value)
51);
52
53#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */
54
55/* This part must be outside protection */
56#include <trace/define_trace.h>
diff --git a/arch/arm/plat-omap/include/plat/panel-generic-dpi.h b/include/video/omap-panel-generic-dpi.h
index 790619734bcd..127e3f20328e 100644
--- a/arch/arm/plat-omap/include/plat/panel-generic-dpi.h
+++ b/include/video/omap-panel-generic-dpi.h
@@ -17,10 +17,10 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#ifndef __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H 20#ifndef __OMAP_PANEL_GENERIC_DPI_H
21#define __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H 21#define __OMAP_PANEL_GENERIC_DPI_H
22 22
23#include "display.h" 23struct omap_dss_device;
24 24
25/** 25/**
26 * struct panel_generic_dpi_data - panel driver configuration data 26 * struct panel_generic_dpi_data - panel driver configuration data
@@ -34,4 +34,4 @@ struct panel_generic_dpi_data {
34 void (*platform_disable)(struct omap_dss_device *dssdev); 34 void (*platform_disable)(struct omap_dss_device *dssdev);
35}; 35};
36 36
37#endif /* __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H */ 37#endif /* __OMAP_PANEL_GENERIC_DPI_H */
diff --git a/arch/arm/plat-omap/include/plat/nokia-dsi-panel.h b/include/video/omap-panel-nokia-dsi.h
index 01ab6572ccbb..921ae9327228 100644
--- a/arch/arm/plat-omap/include/plat/nokia-dsi-panel.h
+++ b/include/video/omap-panel-nokia-dsi.h
@@ -1,14 +1,15 @@
1#ifndef __ARCH_ARM_PLAT_OMAP_NOKIA_DSI_PANEL_H 1#ifndef __OMAP_NOKIA_DSI_PANEL_H
2#define __ARCH_ARM_PLAT_OMAP_NOKIA_DSI_PANEL_H 2#define __OMAP_NOKIA_DSI_PANEL_H
3 3
4#include "display.h" 4struct omap_dss_device;
5 5
6/** 6/**
7 * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration 7 * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration
8 * @name: panel name 8 * @name: panel name
9 * @use_ext_te: use external TE 9 * @use_ext_te: use external TE
10 * @ext_te_gpio: external TE GPIO 10 * @ext_te_gpio: external TE GPIO
11 * @use_esd_check: perform ESD checks 11 * @esd_interval: interval of ESD checks, 0 = disabled (ms)
12 * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
12 * @max_backlight_level: maximum backlight level 13 * @max_backlight_level: maximum backlight level
13 * @set_backlight: pointer to backlight set function 14 * @set_backlight: pointer to backlight set function
14 * @get_backlight: pointer to backlight get function 15 * @get_backlight: pointer to backlight get function
@@ -21,11 +22,12 @@ struct nokia_dsi_panel_data {
21 bool use_ext_te; 22 bool use_ext_te;
22 int ext_te_gpio; 23 int ext_te_gpio;
23 24
24 bool use_esd_check; 25 unsigned esd_interval;
26 unsigned ulps_timeout;
25 27
26 int max_backlight_level; 28 int max_backlight_level;
27 int (*set_backlight)(struct omap_dss_device *dssdev, int level); 29 int (*set_backlight)(struct omap_dss_device *dssdev, int level);
28 int (*get_backlight)(struct omap_dss_device *dssdev); 30 int (*get_backlight)(struct omap_dss_device *dssdev);
29}; 31};
30 32
31#endif /* __ARCH_ARM_PLAT_OMAP_NOKIA_DSI_PANEL_H */ 33#endif /* __OMAP_NOKIA_DSI_PANEL_H */
diff --git a/arch/arm/plat-omap/include/plat/display.h b/include/video/omapdss.h
index 5e04ddc18fa8..892b97f8e157 100644
--- a/arch/arm/plat-omap/include/plat/display.h
+++ b/include/video/omapdss.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/include/asm-arm/arch-omap/display.h
3 *
4 * Copyright (C) 2008 Nokia Corporation 2 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> 3 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 * 4 *
@@ -17,8 +15,8 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 16 */
19 17
20#ifndef __ASM_ARCH_OMAP_DISPLAY_H 18#ifndef __OMAP_OMAPDSS_H
21#define __ASM_ARCH_OMAP_DISPLAY_H 19#define __OMAP_OMAPDSS_H
22 20
23#include <linux/list.h> 21#include <linux/list.h>
24#include <linux/kobject.h> 22#include <linux/kobject.h>
@@ -88,6 +86,11 @@ enum omap_color_mode {
88 OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */ 86 OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
89 OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */ 87 OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
90 OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */ 88 OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
89 OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
90 OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */
91 OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */
92 OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */
93 OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
91}; 94};
92 95
93enum omap_lcd_display_type { 96enum omap_lcd_display_type {
@@ -174,6 +177,17 @@ enum omap_overlay_manager_caps {
174 OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0, 177 OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0,
175}; 178};
176 179
180enum omap_dss_clk_source {
181 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
182 * OMAP4: DSS_FCLK */
183 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
184 * OMAP4: PLL1_CLK1 */
185 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
186 * OMAP4: PLL1_CLK2 */
187 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
188 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
189};
190
177/* RFBI */ 191/* RFBI */
178 192
179struct rfbi_timings { 193struct rfbi_timings {
@@ -205,20 +219,30 @@ int omap_rfbi_enable_te(bool enable, unsigned line);
205int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode, 219int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
206 unsigned hs_pulse_time, unsigned vs_pulse_time, 220 unsigned hs_pulse_time, unsigned vs_pulse_time,
207 int hs_pol_inv, int vs_pol_inv, int extif_div); 221 int hs_pol_inv, int vs_pol_inv, int extif_div);
222void rfbi_bus_lock(void);
223void rfbi_bus_unlock(void);
208 224
209/* DSI */ 225/* DSI */
210void dsi_bus_lock(void); 226void dsi_bus_lock(struct omap_dss_device *dssdev);
211void dsi_bus_unlock(void); 227void dsi_bus_unlock(struct omap_dss_device *dssdev);
212int dsi_vc_dcs_write(int channel, u8 *data, int len); 228int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
213int dsi_vc_dcs_write_0(int channel, u8 dcs_cmd); 229 int len);
214int dsi_vc_dcs_write_1(int channel, u8 dcs_cmd, u8 param); 230int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel,
215int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len); 231 u8 dcs_cmd);
216int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen); 232int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
217int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data); 233 u8 param);
218int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2); 234int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
219int dsi_vc_set_max_rx_packet_size(int channel, u16 len); 235 u8 *data, int len);
220int dsi_vc_send_null(int channel); 236int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
221int dsi_vc_send_bta_sync(int channel); 237 u8 *buf, int buflen);
238int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
239 u8 *data);
240int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
241 u8 *data1, u8 *data2);
242int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
243 u16 len);
244int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
245int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel);
222 246
223/* Board specific data */ 247/* Board specific data */
224struct omap_dss_board_info { 248struct omap_dss_board_info {
@@ -226,6 +250,7 @@ struct omap_dss_board_info {
226 int num_devices; 250 int num_devices;
227 struct omap_dss_device **devices; 251 struct omap_dss_device **devices;
228 struct omap_dss_device *default_device; 252 struct omap_dss_device *default_device;
253 void (*dsi_mux_pads)(bool enable);
229}; 254};
230 255
231#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS) 256#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
@@ -280,6 +305,7 @@ struct omap_overlay_info {
280 305
281 u32 paddr; 306 u32 paddr;
282 void __iomem *vaddr; 307 void __iomem *vaddr;
308 u32 p_uv_addr; /* for NV12 format */
283 u16 screen_width; 309 u16 screen_width;
284 u16 width; 310 u16 width;
285 u16 height; 311 u16 height;
@@ -400,18 +426,12 @@ struct omap_dss_device {
400 u8 data1_pol; 426 u8 data1_pol;
401 u8 data2_lane; 427 u8 data2_lane;
402 u8 data2_pol; 428 u8 data2_pol;
429 u8 data3_lane;
430 u8 data3_pol;
431 u8 data4_lane;
432 u8 data4_pol;
403 433
404 struct { 434 int module;
405 u16 regn;
406 u16 regm;
407 u16 regm_dispc;
408 u16 regm_dsi;
409
410 u16 lp_clk_div;
411
412 u16 lck_div;
413 u16 pck_div;
414 } div;
415 435
416 bool ext_te; 436 bool ext_te;
417 u8 ext_te_gpio; 437 u8 ext_te_gpio;
@@ -424,6 +444,33 @@ struct omap_dss_device {
424 } phy; 444 } phy;
425 445
426 struct { 446 struct {
447 struct {
448 struct {
449 u16 lck_div;
450 u16 pck_div;
451 enum omap_dss_clk_source lcd_clk_src;
452 } channel;
453
454 enum omap_dss_clk_source dispc_fclk_src;
455 } dispc;
456
457 struct {
458 u16 regn;
459 u16 regm;
460 u16 regm_dispc;
461 u16 regm_dsi;
462
463 u16 lp_clk_div;
464 enum omap_dss_clk_source dsi_fclk_src;
465 } dsi;
466
467 struct {
468 u16 regn;
469 u16 regm2;
470 } hdmi;
471 } clocks;
472
473 struct {
427 struct omap_video_timings timings; 474 struct omap_video_timings timings;
428 475
429 int acbi; /* ac-bias pin transitions per interrupt */ 476 int acbi; /* ac-bias pin transitions per interrupt */
@@ -503,6 +550,8 @@ struct omap_dss_driver {
503 550
504 void (*get_resolution)(struct omap_dss_device *dssdev, 551 void (*get_resolution)(struct omap_dss_device *dssdev,
505 u16 *xres, u16 *yres); 552 u16 *xres, u16 *yres);
553 void (*get_dimensions)(struct omap_dss_device *dssdev,
554 u32 *width, u32 *height);
506 int (*get_recommended_bpp)(struct omap_dss_device *dssdev); 555 int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
507 556
508 int (*check_timings)(struct omap_dss_device *dssdev, 557 int (*check_timings)(struct omap_dss_device *dssdev,
@@ -519,9 +568,6 @@ struct omap_dss_driver {
519int omap_dss_register_driver(struct omap_dss_driver *); 568int omap_dss_register_driver(struct omap_dss_driver *);
520void omap_dss_unregister_driver(struct omap_dss_driver *); 569void omap_dss_unregister_driver(struct omap_dss_driver *);
521 570
522int omap_dss_register_device(struct omap_dss_device *);
523void omap_dss_unregister_device(struct omap_dss_device *);
524
525void omap_dss_get_device(struct omap_dss_device *dssdev); 571void omap_dss_get_device(struct omap_dss_device *dssdev);
526void omap_dss_put_device(struct omap_dss_device *dssdev); 572void omap_dss_put_device(struct omap_dss_device *dssdev);
527#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) 573#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
@@ -553,7 +599,8 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
553#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver) 599#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
554#define to_dss_device(x) container_of((x), struct omap_dss_device, dev) 600#define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
555 601
556void omapdss_dsi_vc_enable_hs(int channel, bool enable); 602void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
603 bool enable);
557int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable); 604int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable);
558 605
559int omap_dsi_prepare_update(struct omap_dss_device *dssdev, 606int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
@@ -568,7 +615,8 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id);
568void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel); 615void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel);
569 616
570int omapdss_dsi_display_enable(struct omap_dss_device *dssdev); 617int omapdss_dsi_display_enable(struct omap_dss_device *dssdev);
571void omapdss_dsi_display_disable(struct omap_dss_device *dssdev); 618void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
619 bool disconnect_lanes, bool enter_ulps);
572 620
573int omapdss_dpi_display_enable(struct omap_dss_device *dssdev); 621int omapdss_dpi_display_enable(struct omap_dss_device *dssdev);
574void omapdss_dpi_display_disable(struct omap_dss_device *dssdev); 622void omapdss_dpi_display_disable(struct omap_dss_device *dssdev);
@@ -587,5 +635,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
587int omap_rfbi_update(struct omap_dss_device *dssdev, 635int omap_rfbi_update(struct omap_dss_device *dssdev,
588 u16 x, u16 y, u16 w, u16 h, 636 u16 x, u16 y, u16 w, u16 h,
589 void (*callback)(void *), void *data); 637 void (*callback)(void *), void *data);
638int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size,
639 int data_lines);
590 640
591#endif 641#endif
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index 2c8d369190b3..d964e68fc61d 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -2,6 +2,7 @@
2#define __ASM_SH_MOBILE_LCDC_H__ 2#define __ASM_SH_MOBILE_LCDC_H__
3 3
4#include <linux/fb.h> 4#include <linux/fb.h>
5#include <video/sh_mobile_meram.h>
5 6
6enum { 7enum {
7 RGB8, /* 24bpp, 8:8:8 */ 8 RGB8, /* 24bpp, 8:8:8 */
@@ -87,11 +88,13 @@ struct sh_mobile_lcdc_chan_cfg {
87 struct sh_mobile_lcdc_bl_info bl_info; 88 struct sh_mobile_lcdc_bl_info bl_info;
88 struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */ 89 struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */
89 int nonstd; 90 int nonstd;
91 struct sh_mobile_meram_cfg *meram_cfg;
90}; 92};
91 93
92struct sh_mobile_lcdc_info { 94struct sh_mobile_lcdc_info {
93 int clock_source; 95 int clock_source;
94 struct sh_mobile_lcdc_chan_cfg ch[2]; 96 struct sh_mobile_lcdc_chan_cfg ch[2];
97 struct sh_mobile_meram_info *meram_dev;
95}; 98};
96 99
97#endif /* __ASM_SH_MOBILE_LCDC_H__ */ 100#endif /* __ASM_SH_MOBILE_LCDC_H__ */
diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h
new file mode 100644
index 000000000000..af602d602b28
--- /dev/null
+++ b/include/video/sh_mobile_meram.h
@@ -0,0 +1,68 @@
1#ifndef __VIDEO_SH_MOBILE_MERAM_H__
2#define __VIDEO_SH_MOBILE_MERAM_H__
3
4/* For sh_mobile_meram_info.addr_mode */
5enum {
6 SH_MOBILE_MERAM_MODE0 = 0,
7 SH_MOBILE_MERAM_MODE1
8};
9
10enum {
11 SH_MOBILE_MERAM_PF_NV = 0,
12 SH_MOBILE_MERAM_PF_RGB,
13 SH_MOBILE_MERAM_PF_NV24
14};
15
16
17struct sh_mobile_meram_priv;
18struct sh_mobile_meram_ops;
19
20struct sh_mobile_meram_info {
21 int addr_mode;
22 struct sh_mobile_meram_ops *ops;
23 struct sh_mobile_meram_priv *priv;
24 struct platform_device *pdev;
25};
26
27/* icb config */
28struct sh_mobile_meram_icb {
29 int marker_icb; /* ICB # for Marker ICB */
30 int cache_icb; /* ICB # for Cache ICB */
31 int meram_offset; /* MERAM Buffer Offset to use */
32 int meram_size; /* MERAM Buffer Size to use */
33
34 int cache_unit; /* bytes to cache per ICB */
35};
36
37struct sh_mobile_meram_cfg {
38 struct sh_mobile_meram_icb icb[2];
39 int pixelformat;
40 int current_reg;
41};
42
43struct module;
44struct sh_mobile_meram_ops {
45 struct module *module;
46 /* register usage of meram */
47 int (*meram_register)(struct sh_mobile_meram_info *meram_dev,
48 struct sh_mobile_meram_cfg *cfg,
49 int xres, int yres, int pixelformat,
50 unsigned long base_addr_y,
51 unsigned long base_addr_c,
52 unsigned long *icb_addr_y,
53 unsigned long *icb_addr_c, int *pitch);
54
55 /* unregister usage of meram */
56 int (*meram_unregister)(struct sh_mobile_meram_info *meram_dev,
57 struct sh_mobile_meram_cfg *cfg);
58
59 /* update meram settings */
60 int (*meram_update)(struct sh_mobile_meram_info *meram_dev,
61 struct sh_mobile_meram_cfg *cfg,
62 unsigned long base_addr_y,
63 unsigned long base_addr_c,
64 unsigned long *icb_addr_y,
65 unsigned long *icb_addr_c);
66};
67
68#endif /* __VIDEO_SH_MOBILE_MERAM_H__ */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 61e523af3c46..3d5d6db864fe 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -45,6 +45,19 @@ typedef uint64_t blkif_sector_t;
45#define BLKIF_OP_WRITE_BARRIER 2 45#define BLKIF_OP_WRITE_BARRIER 2
46 46
47/* 47/*
48 * Recognised if "feature-flush-cache" is present in backend xenbus
49 * info. A flush will ask the underlying storage hardware to flush its
50 * non-volatile caches as appropriate. The "feature-flush-cache" node
51 * contains a boolean indicating whether flush requests are likely to
52 * succeed or fail. Either way, a flush request may fail at any time
53 * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
54 * block-device hardware. The boolean simply indicates whether or not it
55 * is worthwhile for the frontend to attempt flushes. If a backend does
56 * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
57 * "feature-flush-cache" node!
58 */
59#define BLKIF_OP_FLUSH_DISKCACHE 3
60/*
48 * Maximum scatter/gather segments per request. 61 * Maximum scatter/gather segments per request.
49 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. 62 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
50 * NB. This could be 12 if the ring indexes weren't stored in the same page. 63 * NB. This could be 12 if the ring indexes weren't stored in the same page.
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index b33257bc7e83..70213b4515eb 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -58,6 +58,7 @@
58#define __HYPERVISOR_event_channel_op 32 58#define __HYPERVISOR_event_channel_op 32
59#define __HYPERVISOR_physdev_op 33 59#define __HYPERVISOR_physdev_op 33
60#define __HYPERVISOR_hvm_op 34 60#define __HYPERVISOR_hvm_op 34
61#define __HYPERVISOR_tmem_op 38
61 62
62/* Architecture-specific hypercall definitions. */ 63/* Architecture-specific hypercall definitions. */
63#define __HYPERVISOR_arch_0 48 64#define __HYPERVISOR_arch_0 48
@@ -461,6 +462,27 @@ typedef uint8_t xen_domain_handle_t[16];
461#define __mk_unsigned_long(x) x ## UL 462#define __mk_unsigned_long(x) x ## UL
462#define mk_unsigned_long(x) __mk_unsigned_long(x) 463#define mk_unsigned_long(x) __mk_unsigned_long(x)
463 464
465#define TMEM_SPEC_VERSION 1
466
467struct tmem_op {
468 uint32_t cmd;
469 int32_t pool_id;
470 union {
471 struct { /* for cmd == TMEM_NEW_POOL */
472 uint64_t uuid[2];
473 uint32_t flags;
474 } new;
475 struct {
476 uint64_t oid[3];
477 uint32_t index;
478 uint32_t tmem_offset;
479 uint32_t pfn_offset;
480 uint32_t len;
481 GUEST_HANDLE(void) gmfn; /* guest machine page frame */
482 } gen;
483 } u;
484};
485
464#else /* __ASSEMBLY__ */ 486#else /* __ASSEMBLY__ */
465 487
466/* In assembly code we cannot use C numeric constant suffixes. */ 488/* In assembly code we cannot use C numeric constant suffixes. */
diff --git a/init/Kconfig b/init/Kconfig
index 332aac649966..ebafac4231ee 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -589,14 +589,6 @@ config CGROUP_DEBUG
589 589
590 Say N if unsure. 590 Say N if unsure.
591 591
592config CGROUP_NS
593 bool "Namespace cgroup subsystem"
594 help
595 Provides a simple namespace cgroup subsystem to
596 provide hierarchical naming of sets of namespaces,
597 for instance virtual servers and checkpoint/restart
598 jobs.
599
600config CGROUP_FREEZER 592config CGROUP_FREEZER
601 bool "Freezer cgroup subsystem" 593 bool "Freezer cgroup subsystem"
602 help 594 help
diff --git a/init/calibrate.c b/init/calibrate.c
index 76ac9194cbc4..cfd7000c9d71 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -38,6 +38,9 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
38 unsigned long timer_rate_min, timer_rate_max; 38 unsigned long timer_rate_min, timer_rate_max;
39 unsigned long good_timer_sum = 0; 39 unsigned long good_timer_sum = 0;
40 unsigned long good_timer_count = 0; 40 unsigned long good_timer_count = 0;
41 unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES];
42 int max = -1; /* index of measured_times with max/min values or not set */
43 int min = -1;
41 int i; 44 int i;
42 45
43 if (read_current_timer(&pre_start) < 0 ) 46 if (read_current_timer(&pre_start) < 0 )
@@ -90,18 +93,78 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
90 * If the upper limit and lower limit of the timer_rate is 93 * If the upper limit and lower limit of the timer_rate is
91 * >= 12.5% apart, redo calibration. 94 * >= 12.5% apart, redo calibration.
92 */ 95 */
93 if (pre_start != 0 && pre_end != 0 && 96 printk(KERN_DEBUG "calibrate_delay_direct() timer_rate_max=%lu "
97 "timer_rate_min=%lu pre_start=%lu pre_end=%lu\n",
98 timer_rate_max, timer_rate_min, pre_start, pre_end);
99 if (start >= post_end)
100 printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
101 "timer_rate as we had a TSC wrap around"
102 " start=%lu >=post_end=%lu\n",
103 start, post_end);
104 if (start < post_end && pre_start != 0 && pre_end != 0 &&
94 (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) { 105 (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) {
95 good_timer_count++; 106 good_timer_count++;
96 good_timer_sum += timer_rate_max; 107 good_timer_sum += timer_rate_max;
97 } 108 measured_times[i] = timer_rate_max;
109 if (max < 0 || timer_rate_max > measured_times[max])
110 max = i;
111 if (min < 0 || timer_rate_max < measured_times[min])
112 min = i;
113 } else
114 measured_times[i] = 0;
115
98 } 116 }
99 117
100 if (good_timer_count) 118 /*
101 return (good_timer_sum/good_timer_count); 119 * Find the maximum & minimum - if they differ too much throw out the
120 * one with the largest difference from the mean and try again...
121 */
122 while (good_timer_count > 1) {
123 unsigned long estimate;
124 unsigned long maxdiff;
125
126 /* compute the estimate */
127 estimate = (good_timer_sum/good_timer_count);
128 maxdiff = estimate >> 3;
129
130 /* if range is within 12% let's take it */
131 if ((measured_times[max] - measured_times[min]) < maxdiff)
132 return estimate;
133
134 /* ok - drop the worse value and try again... */
135 good_timer_sum = 0;
136 good_timer_count = 0;
137 if ((measured_times[max] - estimate) <
138 (estimate - measured_times[min])) {
139 printk(KERN_NOTICE "calibrate_delay_direct() dropping "
140 "min bogoMips estimate %d = %lu\n",
141 min, measured_times[min]);
142 measured_times[min] = 0;
143 min = max;
144 } else {
145 printk(KERN_NOTICE "calibrate_delay_direct() dropping "
146 "max bogoMips estimate %d = %lu\n",
147 max, measured_times[max]);
148 measured_times[max] = 0;
149 max = min;
150 }
151
152 for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
153 if (measured_times[i] == 0)
154 continue;
155 good_timer_count++;
156 good_timer_sum += measured_times[i];
157 if (measured_times[i] < measured_times[min])
158 min = i;
159 if (measured_times[i] > measured_times[max])
160 max = i;
161 }
162
163 }
102 164
103 printk(KERN_WARNING "calibrate_delay_direct() failed to get a good " 165 printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good "
104 "estimate for loops_per_jiffy.\nProbably due to long platform interrupts. Consider using \"lpj=\" boot option.\n"); 166 "estimate for loops_per_jiffy.\nProbably due to long platform "
167 "interrupts. Consider using \"lpj=\" boot option.\n");
105 return 0; 168 return 0;
106} 169}
107#else 170#else
diff --git a/init/main.c b/init/main.c
index 48df882d51d2..d2f1e086bf33 100644
--- a/init/main.c
+++ b/init/main.c
@@ -504,11 +504,14 @@ asmlinkage void __init start_kernel(void)
504 * These use large bootmem allocations and must precede 504 * These use large bootmem allocations and must precede
505 * kmem_cache_init() 505 * kmem_cache_init()
506 */ 506 */
507 setup_log_buf(0);
507 pidhash_init(); 508 pidhash_init();
508 vfs_caches_init_early(); 509 vfs_caches_init_early();
509 sort_main_extable(); 510 sort_main_extable();
510 trap_init(); 511 trap_init();
511 mm_init(); 512 mm_init();
513 BUG_ON(mm_init_cpumask(&init_mm, 0));
514
512 /* 515 /*
513 * Set up the scheduler prior starting any interrupts (such as the 516 * Set up the scheduler prior starting any interrupts (such as the
514 * timer interrupt). Full topology setup happens at smp_init() 517 * timer interrupt). Full topology setup happens at smp_init()
diff --git a/ipc/namespace.c b/ipc/namespace.c
index 8054c8e5faf1..ce0a647869b1 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -12,6 +12,7 @@
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/mount.h> 13#include <linux/mount.h>
14#include <linux/user_namespace.h> 14#include <linux/user_namespace.h>
15#include <linux/proc_fs.h>
15 16
16#include "util.h" 17#include "util.h"
17 18
@@ -140,3 +141,39 @@ void put_ipc_ns(struct ipc_namespace *ns)
140 free_ipc_ns(ns); 141 free_ipc_ns(ns);
141 } 142 }
142} 143}
144
145static void *ipcns_get(struct task_struct *task)
146{
147 struct ipc_namespace *ns = NULL;
148 struct nsproxy *nsproxy;
149
150 rcu_read_lock();
151 nsproxy = task_nsproxy(task);
152 if (nsproxy)
153 ns = get_ipc_ns(nsproxy->ipc_ns);
154 rcu_read_unlock();
155
156 return ns;
157}
158
159static void ipcns_put(void *ns)
160{
161 return put_ipc_ns(ns);
162}
163
164static int ipcns_install(struct nsproxy *nsproxy, void *ns)
165{
166 /* Ditch state from the old ipc namespace */
167 exit_sem(current);
168 put_ipc_ns(nsproxy->ipc_ns);
169 nsproxy->ipc_ns = get_ipc_ns(ns);
170 return 0;
171}
172
173const struct proc_ns_operations ipcns_operations = {
174 .name = "ipc",
175 .type = CLONE_NEWIPC,
176 .get = ipcns_get,
177 .put = ipcns_put,
178 .install = ipcns_install,
179};
diff --git a/ipc/shm.c b/ipc/shm.c
index 729acb7e3148..ab3385a21b27 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -347,7 +347,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
347 struct file * file; 347 struct file * file;
348 char name[13]; 348 char name[13];
349 int id; 349 int id;
350 int acctflag = 0; 350 vm_flags_t acctflag = 0;
351 351
352 if (size < SHMMIN || size > ns->shm_ctlmax) 352 if (size < SHMMIN || size > ns->shm_ctlmax)
353 return -EINVAL; 353 return -EINVAL;
diff --git a/kernel/Makefile b/kernel/Makefile
index e9cf19155b46..2d64cfcc8b42 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -61,7 +61,6 @@ obj-$(CONFIG_COMPAT) += compat.o
61obj-$(CONFIG_CGROUPS) += cgroup.o 61obj-$(CONFIG_CGROUPS) += cgroup.o
62obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o 62obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
63obj-$(CONFIG_CPUSETS) += cpuset.o 63obj-$(CONFIG_CPUSETS) += cpuset.o
64obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
65obj-$(CONFIG_UTS_NS) += utsname.o 64obj-$(CONFIG_UTS_NS) += utsname.o
66obj-$(CONFIG_USER_NS) += user_namespace.o 65obj-$(CONFIG_USER_NS) += user_namespace.o
67obj-$(CONFIG_PID_NS) += pid_namespace.o 66obj-$(CONFIG_PID_NS) += pid_namespace.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 909a35510af5..2731d115d725 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -57,6 +57,7 @@
57#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ 57#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
58#include <linux/eventfd.h> 58#include <linux/eventfd.h>
59#include <linux/poll.h> 59#include <linux/poll.h>
60#include <linux/flex_array.h> /* used in cgroup_attach_proc */
60 61
61#include <asm/atomic.h> 62#include <asm/atomic.h>
62 63
@@ -1735,6 +1736,76 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1735} 1736}
1736EXPORT_SYMBOL_GPL(cgroup_path); 1737EXPORT_SYMBOL_GPL(cgroup_path);
1737 1738
1739/*
1740 * cgroup_task_migrate - move a task from one cgroup to another.
1741 *
1742 * 'guarantee' is set if the caller promises that a new css_set for the task
1743 * will already exist. If not set, this function might sleep, and can fail with
1744 * -ENOMEM. Otherwise, it can only fail with -ESRCH.
1745 */
1746static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1747 struct task_struct *tsk, bool guarantee)
1748{
1749 struct css_set *oldcg;
1750 struct css_set *newcg;
1751
1752 /*
1753 * get old css_set. we need to take task_lock and refcount it, because
1754 * an exiting task can change its css_set to init_css_set and drop its
1755 * old one without taking cgroup_mutex.
1756 */
1757 task_lock(tsk);
1758 oldcg = tsk->cgroups;
1759 get_css_set(oldcg);
1760 task_unlock(tsk);
1761
1762 /* locate or allocate a new css_set for this task. */
1763 if (guarantee) {
1764 /* we know the css_set we want already exists. */
1765 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
1766 read_lock(&css_set_lock);
1767 newcg = find_existing_css_set(oldcg, cgrp, template);
1768 BUG_ON(!newcg);
1769 get_css_set(newcg);
1770 read_unlock(&css_set_lock);
1771 } else {
1772 might_sleep();
1773 /* find_css_set will give us newcg already referenced. */
1774 newcg = find_css_set(oldcg, cgrp);
1775 if (!newcg) {
1776 put_css_set(oldcg);
1777 return -ENOMEM;
1778 }
1779 }
1780 put_css_set(oldcg);
1781
1782 /* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */
1783 task_lock(tsk);
1784 if (tsk->flags & PF_EXITING) {
1785 task_unlock(tsk);
1786 put_css_set(newcg);
1787 return -ESRCH;
1788 }
1789 rcu_assign_pointer(tsk->cgroups, newcg);
1790 task_unlock(tsk);
1791
1792 /* Update the css_set linked lists if we're using them */
1793 write_lock(&css_set_lock);
1794 if (!list_empty(&tsk->cg_list))
1795 list_move(&tsk->cg_list, &newcg->tasks);
1796 write_unlock(&css_set_lock);
1797
1798 /*
1799 * We just gained a reference on oldcg by taking it from the task. As
1800 * trading it for newcg is protected by cgroup_mutex, we're safe to drop
1801 * it here; it will be freed under RCU.
1802 */
1803 put_css_set(oldcg);
1804
1805 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1806 return 0;
1807}
1808
1738/** 1809/**
1739 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' 1810 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1740 * @cgrp: the cgroup the task is attaching to 1811 * @cgrp: the cgroup the task is attaching to
@@ -1745,11 +1816,9 @@ EXPORT_SYMBOL_GPL(cgroup_path);
1745 */ 1816 */
1746int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 1817int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1747{ 1818{
1748 int retval = 0; 1819 int retval;
1749 struct cgroup_subsys *ss, *failed_ss = NULL; 1820 struct cgroup_subsys *ss, *failed_ss = NULL;
1750 struct cgroup *oldcgrp; 1821 struct cgroup *oldcgrp;
1751 struct css_set *cg;
1752 struct css_set *newcg;
1753 struct cgroupfs_root *root = cgrp->root; 1822 struct cgroupfs_root *root = cgrp->root;
1754 1823
1755 /* Nothing to do if the task is already in that cgroup */ 1824 /* Nothing to do if the task is already in that cgroup */
@@ -1759,7 +1828,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1759 1828
1760 for_each_subsys(root, ss) { 1829 for_each_subsys(root, ss) {
1761 if (ss->can_attach) { 1830 if (ss->can_attach) {
1762 retval = ss->can_attach(ss, cgrp, tsk, false); 1831 retval = ss->can_attach(ss, cgrp, tsk);
1763 if (retval) { 1832 if (retval) {
1764 /* 1833 /*
1765 * Remember on which subsystem the can_attach() 1834 * Remember on which subsystem the can_attach()
@@ -1771,46 +1840,29 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1771 goto out; 1840 goto out;
1772 } 1841 }
1773 } 1842 }
1843 if (ss->can_attach_task) {
1844 retval = ss->can_attach_task(cgrp, tsk);
1845 if (retval) {
1846 failed_ss = ss;
1847 goto out;
1848 }
1849 }
1774 } 1850 }
1775 1851
1776 task_lock(tsk); 1852 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
1777 cg = tsk->cgroups; 1853 if (retval)
1778 get_css_set(cg);
1779 task_unlock(tsk);
1780 /*
1781 * Locate or allocate a new css_set for this task,
1782 * based on its final set of cgroups
1783 */
1784 newcg = find_css_set(cg, cgrp);
1785 put_css_set(cg);
1786 if (!newcg) {
1787 retval = -ENOMEM;
1788 goto out;
1789 }
1790
1791 task_lock(tsk);
1792 if (tsk->flags & PF_EXITING) {
1793 task_unlock(tsk);
1794 put_css_set(newcg);
1795 retval = -ESRCH;
1796 goto out; 1854 goto out;
1797 }
1798 rcu_assign_pointer(tsk->cgroups, newcg);
1799 task_unlock(tsk);
1800
1801 /* Update the css_set linked lists if we're using them */
1802 write_lock(&css_set_lock);
1803 if (!list_empty(&tsk->cg_list))
1804 list_move(&tsk->cg_list, &newcg->tasks);
1805 write_unlock(&css_set_lock);
1806 1855
1807 for_each_subsys(root, ss) { 1856 for_each_subsys(root, ss) {
1857 if (ss->pre_attach)
1858 ss->pre_attach(cgrp);
1859 if (ss->attach_task)
1860 ss->attach_task(cgrp, tsk);
1808 if (ss->attach) 1861 if (ss->attach)
1809 ss->attach(ss, cgrp, oldcgrp, tsk, false); 1862 ss->attach(ss, cgrp, oldcgrp, tsk);
1810 } 1863 }
1811 set_bit(CGRP_RELEASABLE, &oldcgrp->flags); 1864
1812 synchronize_rcu(); 1865 synchronize_rcu();
1813 put_css_set(cg);
1814 1866
1815 /* 1867 /*
1816 * wake up rmdir() waiter. the rmdir should fail since the cgroup 1868 * wake up rmdir() waiter. the rmdir should fail since the cgroup
@@ -1829,7 +1881,7 @@ out:
1829 */ 1881 */
1830 break; 1882 break;
1831 if (ss->cancel_attach) 1883 if (ss->cancel_attach)
1832 ss->cancel_attach(ss, cgrp, tsk, false); 1884 ss->cancel_attach(ss, cgrp, tsk);
1833 } 1885 }
1834 } 1886 }
1835 return retval; 1887 return retval;
@@ -1860,49 +1912,370 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1860EXPORT_SYMBOL_GPL(cgroup_attach_task_all); 1912EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
1861 1913
1862/* 1914/*
1863 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex 1915 * cgroup_attach_proc works in two stages, the first of which prefetches all
1864 * held. May take task_lock of task 1916 * new css_sets needed (to make sure we have enough memory before committing
1917 * to the move) and stores them in a list of entries of the following type.
1918 * TODO: possible optimization: use css_set->rcu_head for chaining instead
1919 */
1920struct cg_list_entry {
1921 struct css_set *cg;
1922 struct list_head links;
1923};
1924
1925static bool css_set_check_fetched(struct cgroup *cgrp,
1926 struct task_struct *tsk, struct css_set *cg,
1927 struct list_head *newcg_list)
1928{
1929 struct css_set *newcg;
1930 struct cg_list_entry *cg_entry;
1931 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
1932
1933 read_lock(&css_set_lock);
1934 newcg = find_existing_css_set(cg, cgrp, template);
1935 if (newcg)
1936 get_css_set(newcg);
1937 read_unlock(&css_set_lock);
1938
1939 /* doesn't exist at all? */
1940 if (!newcg)
1941 return false;
1942 /* see if it's already in the list */
1943 list_for_each_entry(cg_entry, newcg_list, links) {
1944 if (cg_entry->cg == newcg) {
1945 put_css_set(newcg);
1946 return true;
1947 }
1948 }
1949
1950 /* not found */
1951 put_css_set(newcg);
1952 return false;
1953}
1954
1955/*
1956 * Find the new css_set and store it in the list in preparation for moving the
1957 * given task to the given cgroup. Returns 0 or -ENOMEM.
1958 */
1959static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
1960 struct list_head *newcg_list)
1961{
1962 struct css_set *newcg;
1963 struct cg_list_entry *cg_entry;
1964
1965 /* ensure a new css_set will exist for this thread */
1966 newcg = find_css_set(cg, cgrp);
1967 if (!newcg)
1968 return -ENOMEM;
1969 /* add it to the list */
1970 cg_entry = kmalloc(sizeof(struct cg_list_entry), GFP_KERNEL);
1971 if (!cg_entry) {
1972 put_css_set(newcg);
1973 return -ENOMEM;
1974 }
1975 cg_entry->cg = newcg;
1976 list_add(&cg_entry->links, newcg_list);
1977 return 0;
1978}
1979
1980/**
1981 * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
1982 * @cgrp: the cgroup to attach to
1983 * @leader: the threadgroup leader task_struct of the group to be attached
1984 *
1985 * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will
1986 * take task_lock of each thread in leader's threadgroup individually in turn.
1987 */
1988int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
1989{
1990 int retval, i, group_size;
1991 struct cgroup_subsys *ss, *failed_ss = NULL;
1992 bool cancel_failed_ss = false;
1993 /* guaranteed to be initialized later, but the compiler needs this */
1994 struct cgroup *oldcgrp = NULL;
1995 struct css_set *oldcg;
1996 struct cgroupfs_root *root = cgrp->root;
1997 /* threadgroup list cursor and array */
1998 struct task_struct *tsk;
1999 struct flex_array *group;
2000 /*
2001 * we need to make sure we have css_sets for all the tasks we're
2002 * going to move -before- we actually start moving them, so that in
2003 * case we get an ENOMEM we can bail out before making any changes.
2004 */
2005 struct list_head newcg_list;
2006 struct cg_list_entry *cg_entry, *temp_nobe;
2007
2008 /*
2009 * step 0: in order to do expensive, possibly blocking operations for
2010 * every thread, we cannot iterate the thread group list, since it needs
2011 * rcu or tasklist locked. instead, build an array of all threads in the
2012 * group - threadgroup_fork_lock prevents new threads from appearing,
2013 * and if threads exit, this will just be an over-estimate.
2014 */
2015 group_size = get_nr_threads(leader);
2016 /* flex_array supports very large thread-groups better than kmalloc. */
2017 group = flex_array_alloc(sizeof(struct task_struct *), group_size,
2018 GFP_KERNEL);
2019 if (!group)
2020 return -ENOMEM;
2021 /* pre-allocate to guarantee space while iterating in rcu read-side. */
2022 retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
2023 if (retval)
2024 goto out_free_group_list;
2025
2026 /* prevent changes to the threadgroup list while we take a snapshot. */
2027 rcu_read_lock();
2028 if (!thread_group_leader(leader)) {
2029 /*
2030 * a race with de_thread from another thread's exec() may strip
2031 * us of our leadership, making while_each_thread unsafe to use
2032 * on this task. if this happens, there is no choice but to
2033 * throw this task away and try again (from cgroup_procs_write);
2034 * this is "double-double-toil-and-trouble-check locking".
2035 */
2036 rcu_read_unlock();
2037 retval = -EAGAIN;
2038 goto out_free_group_list;
2039 }
2040 /* take a reference on each task in the group to go in the array. */
2041 tsk = leader;
2042 i = 0;
2043 do {
2044 /* as per above, nr_threads may decrease, but not increase. */
2045 BUG_ON(i >= group_size);
2046 get_task_struct(tsk);
2047 /*
2048 * saying GFP_ATOMIC has no effect here because we did prealloc
2049 * earlier, but it's good form to communicate our expectations.
2050 */
2051 retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
2052 BUG_ON(retval != 0);
2053 i++;
2054 } while_each_thread(leader, tsk);
2055 /* remember the number of threads in the array for later. */
2056 group_size = i;
2057 rcu_read_unlock();
2058
2059 /*
2060 * step 1: check that we can legitimately attach to the cgroup.
2061 */
2062 for_each_subsys(root, ss) {
2063 if (ss->can_attach) {
2064 retval = ss->can_attach(ss, cgrp, leader);
2065 if (retval) {
2066 failed_ss = ss;
2067 goto out_cancel_attach;
2068 }
2069 }
2070 /* a callback to be run on every thread in the threadgroup. */
2071 if (ss->can_attach_task) {
2072 /* run on each task in the threadgroup. */
2073 for (i = 0; i < group_size; i++) {
2074 tsk = flex_array_get_ptr(group, i);
2075 retval = ss->can_attach_task(cgrp, tsk);
2076 if (retval) {
2077 failed_ss = ss;
2078 cancel_failed_ss = true;
2079 goto out_cancel_attach;
2080 }
2081 }
2082 }
2083 }
2084
2085 /*
2086 * step 2: make sure css_sets exist for all threads to be migrated.
2087 * we use find_css_set, which allocates a new one if necessary.
2088 */
2089 INIT_LIST_HEAD(&newcg_list);
2090 for (i = 0; i < group_size; i++) {
2091 tsk = flex_array_get_ptr(group, i);
2092 /* nothing to do if this task is already in the cgroup */
2093 oldcgrp = task_cgroup_from_root(tsk, root);
2094 if (cgrp == oldcgrp)
2095 continue;
2096 /* get old css_set pointer */
2097 task_lock(tsk);
2098 if (tsk->flags & PF_EXITING) {
2099 /* ignore this task if it's going away */
2100 task_unlock(tsk);
2101 continue;
2102 }
2103 oldcg = tsk->cgroups;
2104 get_css_set(oldcg);
2105 task_unlock(tsk);
2106 /* see if the new one for us is already in the list? */
2107 if (css_set_check_fetched(cgrp, tsk, oldcg, &newcg_list)) {
2108 /* was already there, nothing to do. */
2109 put_css_set(oldcg);
2110 } else {
2111 /* we don't already have it. get new one. */
2112 retval = css_set_prefetch(cgrp, oldcg, &newcg_list);
2113 put_css_set(oldcg);
2114 if (retval)
2115 goto out_list_teardown;
2116 }
2117 }
2118
2119 /*
2120 * step 3: now that we're guaranteed success wrt the css_sets, proceed
2121 * to move all tasks to the new cgroup, calling ss->attach_task for each
2122 * one along the way. there are no failure cases after here, so this is
2123 * the commit point.
2124 */
2125 for_each_subsys(root, ss) {
2126 if (ss->pre_attach)
2127 ss->pre_attach(cgrp);
2128 }
2129 for (i = 0; i < group_size; i++) {
2130 tsk = flex_array_get_ptr(group, i);
2131 /* leave current thread as it is if it's already there */
2132 oldcgrp = task_cgroup_from_root(tsk, root);
2133 if (cgrp == oldcgrp)
2134 continue;
2135 /* attach each task to each subsystem */
2136 for_each_subsys(root, ss) {
2137 if (ss->attach_task)
2138 ss->attach_task(cgrp, tsk);
2139 }
2140 /* if the thread is PF_EXITING, it can just get skipped. */
2141 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
2142 BUG_ON(retval != 0 && retval != -ESRCH);
2143 }
2144 /* nothing is sensitive to fork() after this point. */
2145
2146 /*
2147 * step 4: do expensive, non-thread-specific subsystem callbacks.
2148 * TODO: if ever a subsystem needs to know the oldcgrp for each task
2149 * being moved, this call will need to be reworked to communicate that.
2150 */
2151 for_each_subsys(root, ss) {
2152 if (ss->attach)
2153 ss->attach(ss, cgrp, oldcgrp, leader);
2154 }
2155
2156 /*
2157 * step 5: success! and cleanup
2158 */
2159 synchronize_rcu();
2160 cgroup_wakeup_rmdir_waiter(cgrp);
2161 retval = 0;
2162out_list_teardown:
2163 /* clean up the list of prefetched css_sets. */
2164 list_for_each_entry_safe(cg_entry, temp_nobe, &newcg_list, links) {
2165 list_del(&cg_entry->links);
2166 put_css_set(cg_entry->cg);
2167 kfree(cg_entry);
2168 }
2169out_cancel_attach:
2170 /* same deal as in cgroup_attach_task */
2171 if (retval) {
2172 for_each_subsys(root, ss) {
2173 if (ss == failed_ss) {
2174 if (cancel_failed_ss && ss->cancel_attach)
2175 ss->cancel_attach(ss, cgrp, leader);
2176 break;
2177 }
2178 if (ss->cancel_attach)
2179 ss->cancel_attach(ss, cgrp, leader);
2180 }
2181 }
2182 /* clean up the array of referenced threads in the group. */
2183 for (i = 0; i < group_size; i++) {
2184 tsk = flex_array_get_ptr(group, i);
2185 put_task_struct(tsk);
2186 }
2187out_free_group_list:
2188 flex_array_free(group);
2189 return retval;
2190}
2191
2192/*
2193 * Find the task_struct of the task to attach by vpid and pass it along to the
2194 * function to attach either it or all tasks in its threadgroup. Will take
2195 * cgroup_mutex; may take task_lock of task.
1865 */ 2196 */
1866static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) 2197static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
1867{ 2198{
1868 struct task_struct *tsk; 2199 struct task_struct *tsk;
1869 const struct cred *cred = current_cred(), *tcred; 2200 const struct cred *cred = current_cred(), *tcred;
1870 int ret; 2201 int ret;
1871 2202
2203 if (!cgroup_lock_live_group(cgrp))
2204 return -ENODEV;
2205
1872 if (pid) { 2206 if (pid) {
1873 rcu_read_lock(); 2207 rcu_read_lock();
1874 tsk = find_task_by_vpid(pid); 2208 tsk = find_task_by_vpid(pid);
1875 if (!tsk || tsk->flags & PF_EXITING) { 2209 if (!tsk) {
1876 rcu_read_unlock(); 2210 rcu_read_unlock();
2211 cgroup_unlock();
2212 return -ESRCH;
2213 }
2214 if (threadgroup) {
2215 /*
2216 * RCU protects this access, since tsk was found in the
2217 * tid map. a race with de_thread may cause group_leader
2218 * to stop being the leader, but cgroup_attach_proc will
2219 * detect it later.
2220 */
2221 tsk = tsk->group_leader;
2222 } else if (tsk->flags & PF_EXITING) {
2223 /* optimization for the single-task-only case */
2224 rcu_read_unlock();
2225 cgroup_unlock();
1877 return -ESRCH; 2226 return -ESRCH;
1878 } 2227 }
1879 2228
2229 /*
2230 * even if we're attaching all tasks in the thread group, we
2231 * only need to check permissions on one of them.
2232 */
1880 tcred = __task_cred(tsk); 2233 tcred = __task_cred(tsk);
1881 if (cred->euid && 2234 if (cred->euid &&
1882 cred->euid != tcred->uid && 2235 cred->euid != tcred->uid &&
1883 cred->euid != tcred->suid) { 2236 cred->euid != tcred->suid) {
1884 rcu_read_unlock(); 2237 rcu_read_unlock();
2238 cgroup_unlock();
1885 return -EACCES; 2239 return -EACCES;
1886 } 2240 }
1887 get_task_struct(tsk); 2241 get_task_struct(tsk);
1888 rcu_read_unlock(); 2242 rcu_read_unlock();
1889 } else { 2243 } else {
1890 tsk = current; 2244 if (threadgroup)
2245 tsk = current->group_leader;
2246 else
2247 tsk = current;
1891 get_task_struct(tsk); 2248 get_task_struct(tsk);
1892 } 2249 }
1893 2250
1894 ret = cgroup_attach_task(cgrp, tsk); 2251 if (threadgroup) {
2252 threadgroup_fork_write_lock(tsk);
2253 ret = cgroup_attach_proc(cgrp, tsk);
2254 threadgroup_fork_write_unlock(tsk);
2255 } else {
2256 ret = cgroup_attach_task(cgrp, tsk);
2257 }
1895 put_task_struct(tsk); 2258 put_task_struct(tsk);
2259 cgroup_unlock();
1896 return ret; 2260 return ret;
1897} 2261}
1898 2262
1899static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) 2263static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
1900{ 2264{
2265 return attach_task_by_pid(cgrp, pid, false);
2266}
2267
2268static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
2269{
1901 int ret; 2270 int ret;
1902 if (!cgroup_lock_live_group(cgrp)) 2271 do {
1903 return -ENODEV; 2272 /*
1904 ret = attach_task_by_pid(cgrp, pid); 2273 * attach_proc fails with -EAGAIN if threadgroup leadership
1905 cgroup_unlock(); 2274 * changes in the middle of the operation, in which case we need
2275 * to find the task_struct for the new leader and start over.
2276 */
2277 ret = attach_task_by_pid(cgrp, tgid, true);
2278 } while (ret == -EAGAIN);
1906 return ret; 2279 return ret;
1907} 2280}
1908 2281
@@ -3259,9 +3632,9 @@ static struct cftype files[] = {
3259 { 3632 {
3260 .name = CGROUP_FILE_GENERIC_PREFIX "procs", 3633 .name = CGROUP_FILE_GENERIC_PREFIX "procs",
3261 .open = cgroup_procs_open, 3634 .open = cgroup_procs_open,
3262 /* .write_u64 = cgroup_procs_write, TODO */ 3635 .write_u64 = cgroup_procs_write,
3263 .release = cgroup_pidlist_release, 3636 .release = cgroup_pidlist_release,
3264 .mode = S_IRUGO, 3637 .mode = S_IRUGO | S_IWUSR,
3265 }, 3638 },
3266 { 3639 {
3267 .name = "notify_on_release", 3640 .name = "notify_on_release",
@@ -4257,122 +4630,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4257} 4630}
4258 4631
4259/** 4632/**
4260 * cgroup_clone - clone the cgroup the given subsystem is attached to
4261 * @tsk: the task to be moved
4262 * @subsys: the given subsystem
4263 * @nodename: the name for the new cgroup
4264 *
4265 * Duplicate the current cgroup in the hierarchy that the given
4266 * subsystem is attached to, and move this task into the new
4267 * child.
4268 */
4269int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
4270 char *nodename)
4271{
4272 struct dentry *dentry;
4273 int ret = 0;
4274 struct cgroup *parent, *child;
4275 struct inode *inode;
4276 struct css_set *cg;
4277 struct cgroupfs_root *root;
4278 struct cgroup_subsys *ss;
4279
4280 /* We shouldn't be called by an unregistered subsystem */
4281 BUG_ON(!subsys->active);
4282
4283 /* First figure out what hierarchy and cgroup we're dealing
4284 * with, and pin them so we can drop cgroup_mutex */
4285 mutex_lock(&cgroup_mutex);
4286 again:
4287 root = subsys->root;
4288 if (root == &rootnode) {
4289 mutex_unlock(&cgroup_mutex);
4290 return 0;
4291 }
4292
4293 /* Pin the hierarchy */
4294 if (!atomic_inc_not_zero(&root->sb->s_active)) {
4295 /* We race with the final deactivate_super() */
4296 mutex_unlock(&cgroup_mutex);
4297 return 0;
4298 }
4299
4300 /* Keep the cgroup alive */
4301 task_lock(tsk);
4302 parent = task_cgroup(tsk, subsys->subsys_id);
4303 cg = tsk->cgroups;
4304 get_css_set(cg);
4305 task_unlock(tsk);
4306
4307 mutex_unlock(&cgroup_mutex);
4308
4309 /* Now do the VFS work to create a cgroup */
4310 inode = parent->dentry->d_inode;
4311
4312 /* Hold the parent directory mutex across this operation to
4313 * stop anyone else deleting the new cgroup */
4314 mutex_lock(&inode->i_mutex);
4315 dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
4316 if (IS_ERR(dentry)) {
4317 printk(KERN_INFO
4318 "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
4319 PTR_ERR(dentry));
4320 ret = PTR_ERR(dentry);
4321 goto out_release;
4322 }
4323
4324 /* Create the cgroup directory, which also creates the cgroup */
4325 ret = vfs_mkdir(inode, dentry, 0755);
4326 child = __d_cgrp(dentry);
4327 dput(dentry);
4328 if (ret) {
4329 printk(KERN_INFO
4330 "Failed to create cgroup %s: %d\n", nodename,
4331 ret);
4332 goto out_release;
4333 }
4334
4335 /* The cgroup now exists. Retake cgroup_mutex and check
4336 * that we're still in the same state that we thought we
4337 * were. */
4338 mutex_lock(&cgroup_mutex);
4339 if ((root != subsys->root) ||
4340 (parent != task_cgroup(tsk, subsys->subsys_id))) {
4341 /* Aargh, we raced ... */
4342 mutex_unlock(&inode->i_mutex);
4343 put_css_set(cg);
4344
4345 deactivate_super(root->sb);
4346 /* The cgroup is still accessible in the VFS, but
4347 * we're not going to try to rmdir() it at this
4348 * point. */
4349 printk(KERN_INFO
4350 "Race in cgroup_clone() - leaking cgroup %s\n",
4351 nodename);
4352 goto again;
4353 }
4354
4355 /* do any required auto-setup */
4356 for_each_subsys(root, ss) {
4357 if (ss->post_clone)
4358 ss->post_clone(ss, child);
4359 }
4360
4361 /* All seems fine. Finish by moving the task into the new cgroup */
4362 ret = cgroup_attach_task(child, tsk);
4363 mutex_unlock(&cgroup_mutex);
4364
4365 out_release:
4366 mutex_unlock(&inode->i_mutex);
4367
4368 mutex_lock(&cgroup_mutex);
4369 put_css_set(cg);
4370 mutex_unlock(&cgroup_mutex);
4371 deactivate_super(root->sb);
4372 return ret;
4373}
4374
4375/**
4376 * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp 4633 * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
4377 * @cgrp: the cgroup in question 4634 * @cgrp: the cgroup in question
4378 * @task: the task in question 4635 * @task: the task in question
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e7bebb7c6c38..e691818d7e45 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -160,7 +160,7 @@ static void freezer_destroy(struct cgroup_subsys *ss,
160 */ 160 */
161static int freezer_can_attach(struct cgroup_subsys *ss, 161static int freezer_can_attach(struct cgroup_subsys *ss,
162 struct cgroup *new_cgroup, 162 struct cgroup *new_cgroup,
163 struct task_struct *task, bool threadgroup) 163 struct task_struct *task)
164{ 164{
165 struct freezer *freezer; 165 struct freezer *freezer;
166 166
@@ -172,26 +172,17 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
172 if (freezer->state != CGROUP_THAWED) 172 if (freezer->state != CGROUP_THAWED)
173 return -EBUSY; 173 return -EBUSY;
174 174
175 return 0;
176}
177
178static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
179{
175 rcu_read_lock(); 180 rcu_read_lock();
176 if (__cgroup_freezing_or_frozen(task)) { 181 if (__cgroup_freezing_or_frozen(tsk)) {
177 rcu_read_unlock(); 182 rcu_read_unlock();
178 return -EBUSY; 183 return -EBUSY;
179 } 184 }
180 rcu_read_unlock(); 185 rcu_read_unlock();
181
182 if (threadgroup) {
183 struct task_struct *c;
184
185 rcu_read_lock();
186 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
187 if (__cgroup_freezing_or_frozen(c)) {
188 rcu_read_unlock();
189 return -EBUSY;
190 }
191 }
192 rcu_read_unlock();
193 }
194
195 return 0; 186 return 0;
196} 187}
197 188
@@ -390,6 +381,9 @@ struct cgroup_subsys freezer_subsys = {
390 .populate = freezer_populate, 381 .populate = freezer_populate,
391 .subsys_id = freezer_subsys_id, 382 .subsys_id = freezer_subsys_id,
392 .can_attach = freezer_can_attach, 383 .can_attach = freezer_can_attach,
384 .can_attach_task = freezer_can_attach_task,
385 .pre_attach = NULL,
386 .attach_task = NULL,
393 .attach = NULL, 387 .attach = NULL,
394 .fork = freezer_fork, 388 .fork = freezer_fork,
395 .exit = NULL, 389 .exit = NULL,
diff --git a/kernel/compat.c b/kernel/compat.c
index 9214dcd087b7..fc9eb093acd5 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -293,6 +293,8 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
293 return compat_jiffies_to_clock_t(jiffies); 293 return compat_jiffies_to_clock_t(jiffies);
294} 294}
295 295
296#ifdef __ARCH_WANT_SYS_SIGPENDING
297
296/* 298/*
297 * Assumption: old_sigset_t and compat_old_sigset_t are both 299 * Assumption: old_sigset_t and compat_old_sigset_t are both
298 * types that can be passed to put_user()/get_user(). 300 * types that can be passed to put_user()/get_user().
@@ -312,6 +314,10 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
312 return ret; 314 return ret;
313} 315}
314 316
317#endif
318
319#ifdef __ARCH_WANT_SYS_SIGPROCMASK
320
315asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, 321asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
316 compat_old_sigset_t __user *oset) 322 compat_old_sigset_t __user *oset)
317{ 323{
@@ -333,6 +339,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
333 return ret; 339 return ret;
334} 340}
335 341
342#endif
343
336asmlinkage long compat_sys_setrlimit(unsigned int resource, 344asmlinkage long compat_sys_setrlimit(unsigned int resource,
337 struct compat_rlimit __user *rlim) 345 struct compat_rlimit __user *rlim)
338{ 346{
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2bb8c2e98fff..1ceeb049c827 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1367,14 +1367,10 @@ static int fmeter_getrate(struct fmeter *fmp)
1367 return val; 1367 return val;
1368} 1368}
1369 1369
1370/* Protected by cgroup_lock */
1371static cpumask_var_t cpus_attach;
1372
1373/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ 1370/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1374static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, 1371static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1375 struct task_struct *tsk, bool threadgroup) 1372 struct task_struct *tsk)
1376{ 1373{
1377 int ret;
1378 struct cpuset *cs = cgroup_cs(cont); 1374 struct cpuset *cs = cgroup_cs(cont);
1379 1375
1380 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1376 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
@@ -1391,29 +1387,42 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1391 if (tsk->flags & PF_THREAD_BOUND) 1387 if (tsk->flags & PF_THREAD_BOUND)
1392 return -EINVAL; 1388 return -EINVAL;
1393 1389
1394 ret = security_task_setscheduler(tsk);
1395 if (ret)
1396 return ret;
1397 if (threadgroup) {
1398 struct task_struct *c;
1399
1400 rcu_read_lock();
1401 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1402 ret = security_task_setscheduler(c);
1403 if (ret) {
1404 rcu_read_unlock();
1405 return ret;
1406 }
1407 }
1408 rcu_read_unlock();
1409 }
1410 return 0; 1390 return 0;
1411} 1391}
1412 1392
1413static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to, 1393static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
1414 struct cpuset *cs) 1394{
1395 return security_task_setscheduler(task);
1396}
1397
1398/*
1399 * Protected by cgroup_lock. The nodemasks must be stored globally because
1400 * dynamically allocating them is not allowed in pre_attach, and they must
1401 * persist among pre_attach, attach_task, and attach.
1402 */
1403static cpumask_var_t cpus_attach;
1404static nodemask_t cpuset_attach_nodemask_from;
1405static nodemask_t cpuset_attach_nodemask_to;
1406
1407/* Set-up work for before attaching each task. */
1408static void cpuset_pre_attach(struct cgroup *cont)
1409{
1410 struct cpuset *cs = cgroup_cs(cont);
1411
1412 if (cs == &top_cpuset)
1413 cpumask_copy(cpus_attach, cpu_possible_mask);
1414 else
1415 guarantee_online_cpus(cs, cpus_attach);
1416
1417 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1418}
1419
1420/* Per-thread attachment work. */
1421static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
1415{ 1422{
1416 int err; 1423 int err;
1424 struct cpuset *cs = cgroup_cs(cont);
1425
1417 /* 1426 /*
1418 * can_attach beforehand should guarantee that this doesn't fail. 1427 * can_attach beforehand should guarantee that this doesn't fail.
1419 * TODO: have a better way to handle failure here 1428 * TODO: have a better way to handle failure here
@@ -1421,45 +1430,29 @@ static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to,
1421 err = set_cpus_allowed_ptr(tsk, cpus_attach); 1430 err = set_cpus_allowed_ptr(tsk, cpus_attach);
1422 WARN_ON_ONCE(err); 1431 WARN_ON_ONCE(err);
1423 1432
1424 cpuset_change_task_nodemask(tsk, to); 1433 cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
1425 cpuset_update_task_spread_flag(cs, tsk); 1434 cpuset_update_task_spread_flag(cs, tsk);
1426
1427} 1435}
1428 1436
1429static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, 1437static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1430 struct cgroup *oldcont, struct task_struct *tsk, 1438 struct cgroup *oldcont, struct task_struct *tsk)
1431 bool threadgroup)
1432{ 1439{
1433 struct mm_struct *mm; 1440 struct mm_struct *mm;
1434 struct cpuset *cs = cgroup_cs(cont); 1441 struct cpuset *cs = cgroup_cs(cont);
1435 struct cpuset *oldcs = cgroup_cs(oldcont); 1442 struct cpuset *oldcs = cgroup_cs(oldcont);
1436 static nodemask_t to; /* protected by cgroup_mutex */
1437 1443
1438 if (cs == &top_cpuset) { 1444 /*
1439 cpumask_copy(cpus_attach, cpu_possible_mask); 1445 * Change mm, possibly for multiple threads in a threadgroup. This is
1440 } else { 1446 * expensive and may sleep.
1441 guarantee_online_cpus(cs, cpus_attach); 1447 */
1442 } 1448 cpuset_attach_nodemask_from = oldcs->mems_allowed;
1443 guarantee_online_mems(cs, &to); 1449 cpuset_attach_nodemask_to = cs->mems_allowed;
1444
1445 /* do per-task migration stuff possibly for each in the threadgroup */
1446 cpuset_attach_task(tsk, &to, cs);
1447 if (threadgroup) {
1448 struct task_struct *c;
1449 rcu_read_lock();
1450 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1451 cpuset_attach_task(c, &to, cs);
1452 }
1453 rcu_read_unlock();
1454 }
1455
1456 /* change mm; only needs to be done once even if threadgroup */
1457 to = cs->mems_allowed;
1458 mm = get_task_mm(tsk); 1450 mm = get_task_mm(tsk);
1459 if (mm) { 1451 if (mm) {
1460 mpol_rebind_mm(mm, &to); 1452 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1461 if (is_memory_migrate(cs)) 1453 if (is_memory_migrate(cs))
1462 cpuset_migrate_mm(mm, &oldcs->mems_allowed, &to); 1454 cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
1455 &cpuset_attach_nodemask_to);
1463 mmput(mm); 1456 mmput(mm);
1464 } 1457 }
1465} 1458}
@@ -1809,10 +1802,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1809} 1802}
1810 1803
1811/* 1804/*
1812 * post_clone() is called at the end of cgroup_clone(). 1805 * post_clone() is called during cgroup_create() when the
1813 * 'cgroup' was just created automatically as a result of 1806 * clone_children mount argument was specified. The cgroup
1814 * a cgroup_clone(), and the current task is about to 1807 * can not yet have any tasks.
1815 * be moved into 'cgroup'.
1816 * 1808 *
1817 * Currently we refuse to set up the cgroup - thereby 1809 * Currently we refuse to set up the cgroup - thereby
1818 * refusing the task to be entered, and as a result refusing 1810 * refusing the task to be entered, and as a result refusing
@@ -1911,6 +1903,9 @@ struct cgroup_subsys cpuset_subsys = {
1911 .create = cpuset_create, 1903 .create = cpuset_create,
1912 .destroy = cpuset_destroy, 1904 .destroy = cpuset_destroy,
1913 .can_attach = cpuset_can_attach, 1905 .can_attach = cpuset_can_attach,
1906 .can_attach_task = cpuset_can_attach_task,
1907 .pre_attach = cpuset_pre_attach,
1908 .attach_task = cpuset_attach_task,
1914 .attach = cpuset_attach, 1909 .attach = cpuset_attach,
1915 .populate = cpuset_populate, 1910 .populate = cpuset_populate,
1916 .post_clone = cpuset_post_clone, 1911 .post_clone = cpuset_post_clone,
diff --git a/kernel/fork.c b/kernel/fork.c
index 2b44d82b8237..ca406d916713 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -59,7 +59,6 @@
59#include <linux/taskstats_kern.h> 59#include <linux/taskstats_kern.h>
60#include <linux/random.h> 60#include <linux/random.h>
61#include <linux/tty.h> 61#include <linux/tty.h>
62#include <linux/proc_fs.h>
63#include <linux/blkdev.h> 62#include <linux/blkdev.h>
64#include <linux/fs_struct.h> 63#include <linux/fs_struct.h>
65#include <linux/magic.h> 64#include <linux/magic.h>
@@ -383,15 +382,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
383 get_file(file); 382 get_file(file);
384 if (tmp->vm_flags & VM_DENYWRITE) 383 if (tmp->vm_flags & VM_DENYWRITE)
385 atomic_dec(&inode->i_writecount); 384 atomic_dec(&inode->i_writecount);
386 spin_lock(&mapping->i_mmap_lock); 385 mutex_lock(&mapping->i_mmap_mutex);
387 if (tmp->vm_flags & VM_SHARED) 386 if (tmp->vm_flags & VM_SHARED)
388 mapping->i_mmap_writable++; 387 mapping->i_mmap_writable++;
389 tmp->vm_truncate_count = mpnt->vm_truncate_count;
390 flush_dcache_mmap_lock(mapping); 388 flush_dcache_mmap_lock(mapping);
391 /* insert tmp into the share list, just after mpnt */ 389 /* insert tmp into the share list, just after mpnt */
392 vma_prio_tree_add(tmp, mpnt); 390 vma_prio_tree_add(tmp, mpnt);
393 flush_dcache_mmap_unlock(mapping); 391 flush_dcache_mmap_unlock(mapping);
394 spin_unlock(&mapping->i_mmap_lock); 392 mutex_unlock(&mapping->i_mmap_mutex);
395 } 393 }
396 394
397 /* 395 /*
@@ -486,6 +484,20 @@ static void mm_init_aio(struct mm_struct *mm)
486#endif 484#endif
487} 485}
488 486
487int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm)
488{
489#ifdef CONFIG_CPUMASK_OFFSTACK
490 if (!alloc_cpumask_var(&mm->cpu_vm_mask_var, GFP_KERNEL))
491 return -ENOMEM;
492
493 if (oldmm)
494 cpumask_copy(mm_cpumask(mm), mm_cpumask(oldmm));
495 else
496 memset(mm_cpumask(mm), 0, cpumask_size());
497#endif
498 return 0;
499}
500
489static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) 501static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
490{ 502{
491 atomic_set(&mm->mm_users, 1); 503 atomic_set(&mm->mm_users, 1);
@@ -522,10 +534,20 @@ struct mm_struct * mm_alloc(void)
522 struct mm_struct * mm; 534 struct mm_struct * mm;
523 535
524 mm = allocate_mm(); 536 mm = allocate_mm();
525 if (mm) { 537 if (!mm)
526 memset(mm, 0, sizeof(*mm)); 538 return NULL;
527 mm = mm_init(mm, current); 539
540 memset(mm, 0, sizeof(*mm));
541 mm = mm_init(mm, current);
542 if (!mm)
543 return NULL;
544
545 if (mm_init_cpumask(mm, NULL)) {
546 mm_free_pgd(mm);
547 free_mm(mm);
548 return NULL;
528 } 549 }
550
529 return mm; 551 return mm;
530} 552}
531 553
@@ -537,6 +559,7 @@ struct mm_struct * mm_alloc(void)
537void __mmdrop(struct mm_struct *mm) 559void __mmdrop(struct mm_struct *mm)
538{ 560{
539 BUG_ON(mm == &init_mm); 561 BUG_ON(mm == &init_mm);
562 free_cpumask_var(mm->cpu_vm_mask_var);
540 mm_free_pgd(mm); 563 mm_free_pgd(mm);
541 destroy_context(mm); 564 destroy_context(mm);
542 mmu_notifier_mm_destroy(mm); 565 mmu_notifier_mm_destroy(mm);
@@ -573,6 +596,57 @@ void mmput(struct mm_struct *mm)
573} 596}
574EXPORT_SYMBOL_GPL(mmput); 597EXPORT_SYMBOL_GPL(mmput);
575 598
599/*
600 * We added or removed a vma mapping the executable. The vmas are only mapped
601 * during exec and are not mapped with the mmap system call.
602 * Callers must hold down_write() on the mm's mmap_sem for these
603 */
604void added_exe_file_vma(struct mm_struct *mm)
605{
606 mm->num_exe_file_vmas++;
607}
608
609void removed_exe_file_vma(struct mm_struct *mm)
610{
611 mm->num_exe_file_vmas--;
612 if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
613 fput(mm->exe_file);
614 mm->exe_file = NULL;
615 }
616
617}
618
619void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
620{
621 if (new_exe_file)
622 get_file(new_exe_file);
623 if (mm->exe_file)
624 fput(mm->exe_file);
625 mm->exe_file = new_exe_file;
626 mm->num_exe_file_vmas = 0;
627}
628
629struct file *get_mm_exe_file(struct mm_struct *mm)
630{
631 struct file *exe_file;
632
633 /* We need mmap_sem to protect against races with removal of
634 * VM_EXECUTABLE vmas */
635 down_read(&mm->mmap_sem);
636 exe_file = mm->exe_file;
637 if (exe_file)
638 get_file(exe_file);
639 up_read(&mm->mmap_sem);
640 return exe_file;
641}
642
643static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
644{
645 /* It's safe to write the exe_file pointer without exe_file_lock because
646 * this is called during fork when the task is not yet in /proc */
647 newmm->exe_file = get_mm_exe_file(oldmm);
648}
649
576/** 650/**
577 * get_task_mm - acquire a reference to the task's mm 651 * get_task_mm - acquire a reference to the task's mm
578 * 652 *
@@ -691,6 +765,9 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
691 if (!mm_init(mm, tsk)) 765 if (!mm_init(mm, tsk))
692 goto fail_nomem; 766 goto fail_nomem;
693 767
768 if (mm_init_cpumask(mm, oldmm))
769 goto fail_nocpumask;
770
694 if (init_new_context(tsk, mm)) 771 if (init_new_context(tsk, mm))
695 goto fail_nocontext; 772 goto fail_nocontext;
696 773
@@ -717,6 +794,9 @@ fail_nomem:
717 return NULL; 794 return NULL;
718 795
719fail_nocontext: 796fail_nocontext:
797 free_cpumask_var(mm->cpu_vm_mask_var);
798
799fail_nocpumask:
720 /* 800 /*
721 * If init_new_context() failed, we cannot use mmput() to free the mm 801 * If init_new_context() failed, we cannot use mmput() to free the mm
722 * because it calls destroy_context() 802 * because it calls destroy_context()
@@ -927,6 +1007,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
927 tty_audit_fork(sig); 1007 tty_audit_fork(sig);
928 sched_autogroup_fork(sig); 1008 sched_autogroup_fork(sig);
929 1009
1010#ifdef CONFIG_CGROUPS
1011 init_rwsem(&sig->threadgroup_fork_lock);
1012#endif
1013
930 sig->oom_adj = current->signal->oom_adj; 1014 sig->oom_adj = current->signal->oom_adj;
931 sig->oom_score_adj = current->signal->oom_score_adj; 1015 sig->oom_score_adj = current->signal->oom_score_adj;
932 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 1016 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
@@ -1108,6 +1192,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1108 monotonic_to_bootbased(&p->real_start_time); 1192 monotonic_to_bootbased(&p->real_start_time);
1109 p->io_context = NULL; 1193 p->io_context = NULL;
1110 p->audit_context = NULL; 1194 p->audit_context = NULL;
1195 if (clone_flags & CLONE_THREAD)
1196 threadgroup_fork_read_lock(current);
1111 cgroup_fork(p); 1197 cgroup_fork(p);
1112#ifdef CONFIG_NUMA 1198#ifdef CONFIG_NUMA
1113 p->mempolicy = mpol_dup(p->mempolicy); 1199 p->mempolicy = mpol_dup(p->mempolicy);
@@ -1193,12 +1279,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1193 if (clone_flags & CLONE_THREAD) 1279 if (clone_flags & CLONE_THREAD)
1194 p->tgid = current->tgid; 1280 p->tgid = current->tgid;
1195 1281
1196 if (current->nsproxy != p->nsproxy) {
1197 retval = ns_cgroup_clone(p, pid);
1198 if (retval)
1199 goto bad_fork_free_pid;
1200 }
1201
1202 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1282 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1203 /* 1283 /*
1204 * Clear TID on mm_release()? 1284 * Clear TID on mm_release()?
@@ -1312,6 +1392,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1312 write_unlock_irq(&tasklist_lock); 1392 write_unlock_irq(&tasklist_lock);
1313 proc_fork_connector(p); 1393 proc_fork_connector(p);
1314 cgroup_post_fork(p); 1394 cgroup_post_fork(p);
1395 if (clone_flags & CLONE_THREAD)
1396 threadgroup_fork_read_unlock(current);
1315 perf_event_fork(p); 1397 perf_event_fork(p);
1316 return p; 1398 return p;
1317 1399
@@ -1350,6 +1432,8 @@ bad_fork_cleanup_policy:
1350 mpol_put(p->mempolicy); 1432 mpol_put(p->mempolicy);
1351bad_fork_cleanup_cgroup: 1433bad_fork_cleanup_cgroup:
1352#endif 1434#endif
1435 if (clone_flags & CLONE_THREAD)
1436 threadgroup_fork_read_unlock(current);
1353 cgroup_exit(p, cgroup_callbacks_done); 1437 cgroup_exit(p, cgroup_callbacks_done);
1354 delayacct_tsk_free(p); 1438 delayacct_tsk_free(p);
1355 module_put(task_thread_info(p)->exec_domain->module); 1439 module_put(task_thread_info(p)->exec_domain->module);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c541ee527ecb..a9205e32a059 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -748,7 +748,7 @@ static inline void retrigger_next_event(void *arg) { }
748 */ 748 */
749void clock_was_set(void) 749void clock_was_set(void)
750{ 750{
751#ifdef CONFIG_HIGHRES_TIMERS 751#ifdef CONFIG_HIGH_RES_TIMERS
752 /* Retrigger the CPU local events everywhere */ 752 /* Retrigger the CPU local events everywhere */
753 on_each_cpu(retrigger_next_event, NULL, 1); 753 on_each_cpu(retrigger_next_event, NULL, 1);
754#endif 754#endif
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 834899f2500f..4bd4faa6323a 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -19,7 +19,7 @@ static struct proc_dir_entry *root_irq_dir;
19 19
20#ifdef CONFIG_SMP 20#ifdef CONFIG_SMP
21 21
22static int irq_affinity_proc_show(struct seq_file *m, void *v) 22static int show_irq_affinity(int type, struct seq_file *m, void *v)
23{ 23{
24 struct irq_desc *desc = irq_to_desc((long)m->private); 24 struct irq_desc *desc = irq_to_desc((long)m->private);
25 const struct cpumask *mask = desc->irq_data.affinity; 25 const struct cpumask *mask = desc->irq_data.affinity;
@@ -28,7 +28,10 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v)
28 if (irqd_is_setaffinity_pending(&desc->irq_data)) 28 if (irqd_is_setaffinity_pending(&desc->irq_data))
29 mask = desc->pending_mask; 29 mask = desc->pending_mask;
30#endif 30#endif
31 seq_cpumask(m, mask); 31 if (type)
32 seq_cpumask_list(m, mask);
33 else
34 seq_cpumask(m, mask);
32 seq_putc(m, '\n'); 35 seq_putc(m, '\n');
33 return 0; 36 return 0;
34} 37}
@@ -59,7 +62,18 @@ static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
59#endif 62#endif
60 63
61int no_irq_affinity; 64int no_irq_affinity;
62static ssize_t irq_affinity_proc_write(struct file *file, 65static int irq_affinity_proc_show(struct seq_file *m, void *v)
66{
67 return show_irq_affinity(0, m, v);
68}
69
70static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
71{
72 return show_irq_affinity(1, m, v);
73}
74
75
76static ssize_t write_irq_affinity(int type, struct file *file,
63 const char __user *buffer, size_t count, loff_t *pos) 77 const char __user *buffer, size_t count, loff_t *pos)
64{ 78{
65 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; 79 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
@@ -72,7 +86,10 @@ static ssize_t irq_affinity_proc_write(struct file *file,
72 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 86 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
73 return -ENOMEM; 87 return -ENOMEM;
74 88
75 err = cpumask_parse_user(buffer, count, new_value); 89 if (type)
90 err = cpumask_parselist_user(buffer, count, new_value);
91 else
92 err = cpumask_parse_user(buffer, count, new_value);
76 if (err) 93 if (err)
77 goto free_cpumask; 94 goto free_cpumask;
78 95
@@ -100,11 +117,28 @@ free_cpumask:
100 return err; 117 return err;
101} 118}
102 119
120static ssize_t irq_affinity_proc_write(struct file *file,
121 const char __user *buffer, size_t count, loff_t *pos)
122{
123 return write_irq_affinity(0, file, buffer, count, pos);
124}
125
126static ssize_t irq_affinity_list_proc_write(struct file *file,
127 const char __user *buffer, size_t count, loff_t *pos)
128{
129 return write_irq_affinity(1, file, buffer, count, pos);
130}
131
103static int irq_affinity_proc_open(struct inode *inode, struct file *file) 132static int irq_affinity_proc_open(struct inode *inode, struct file *file)
104{ 133{
105 return single_open(file, irq_affinity_proc_show, PDE(inode)->data); 134 return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
106} 135}
107 136
137static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
138{
139 return single_open(file, irq_affinity_list_proc_show, PDE(inode)->data);
140}
141
108static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) 142static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
109{ 143{
110 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); 144 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data);
@@ -125,6 +159,14 @@ static const struct file_operations irq_affinity_hint_proc_fops = {
125 .release = single_release, 159 .release = single_release,
126}; 160};
127 161
162static const struct file_operations irq_affinity_list_proc_fops = {
163 .open = irq_affinity_list_proc_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 .write = irq_affinity_list_proc_write,
168};
169
128static int default_affinity_show(struct seq_file *m, void *v) 170static int default_affinity_show(struct seq_file *m, void *v)
129{ 171{
130 seq_cpumask(m, irq_default_affinity); 172 seq_cpumask(m, irq_default_affinity);
@@ -289,6 +331,10 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
289 proc_create_data("affinity_hint", 0400, desc->dir, 331 proc_create_data("affinity_hint", 0400, desc->dir,
290 &irq_affinity_hint_proc_fops, (void *)(long)irq); 332 &irq_affinity_hint_proc_fops, (void *)(long)irq);
291 333
334 /* create /proc/irq/<irq>/smp_affinity_list */
335 proc_create_data("smp_affinity_list", 0600, desc->dir,
336 &irq_affinity_list_proc_fops, (void *)(long)irq);
337
292 proc_create_data("node", 0444, desc->dir, 338 proc_create_data("node", 0444, desc->dir,
293 &irq_node_proc_fops, (void *)(long)irq); 339 &irq_node_proc_fops, (void *)(long)irq);
294#endif 340#endif
@@ -306,6 +352,7 @@ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
306#ifdef CONFIG_SMP 352#ifdef CONFIG_SMP
307 remove_proc_entry("smp_affinity", desc->dir); 353 remove_proc_entry("smp_affinity", desc->dir);
308 remove_proc_entry("affinity_hint", desc->dir); 354 remove_proc_entry("affinity_hint", desc->dir);
355 remove_proc_entry("smp_affinity_list", desc->dir);
309 remove_proc_entry("node", desc->dir); 356 remove_proc_entry("node", desc->dir);
310#endif 357#endif
311 remove_proc_entry("spurious", desc->dir); 358 remove_proc_entry("spurious", desc->dir);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 2c938e2337cd..d607ed5dd441 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -131,14 +131,14 @@ EXPORT_SYMBOL(mutex_unlock);
131 */ 131 */
132static inline int __sched 132static inline int __sched
133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
134 unsigned long ip) 134 struct lockdep_map *nest_lock, unsigned long ip)
135{ 135{
136 struct task_struct *task = current; 136 struct task_struct *task = current;
137 struct mutex_waiter waiter; 137 struct mutex_waiter waiter;
138 unsigned long flags; 138 unsigned long flags;
139 139
140 preempt_disable(); 140 preempt_disable();
141 mutex_acquire(&lock->dep_map, subclass, 0, ip); 141 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
142 142
143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
144 /* 144 /*
@@ -269,16 +269,25 @@ void __sched
269mutex_lock_nested(struct mutex *lock, unsigned int subclass) 269mutex_lock_nested(struct mutex *lock, unsigned int subclass)
270{ 270{
271 might_sleep(); 271 might_sleep();
272 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); 272 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
273} 273}
274 274
275EXPORT_SYMBOL_GPL(mutex_lock_nested); 275EXPORT_SYMBOL_GPL(mutex_lock_nested);
276 276
277void __sched
278_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
279{
280 might_sleep();
281 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
282}
283
284EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
285
277int __sched 286int __sched
278mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 287mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
279{ 288{
280 might_sleep(); 289 might_sleep();
281 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); 290 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
282} 291}
283EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 292EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
284 293
@@ -287,7 +296,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
287{ 296{
288 might_sleep(); 297 might_sleep();
289 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 298 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
290 subclass, _RET_IP_); 299 subclass, NULL, _RET_IP_);
291} 300}
292 301
293EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 302EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -393,7 +402,7 @@ __mutex_lock_slowpath(atomic_t *lock_count)
393{ 402{
394 struct mutex *lock = container_of(lock_count, struct mutex, count); 403 struct mutex *lock = container_of(lock_count, struct mutex, count);
395 404
396 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); 405 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
397} 406}
398 407
399static noinline int __sched 408static noinline int __sched
@@ -401,7 +410,7 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count)
401{ 410{
402 struct mutex *lock = container_of(lock_count, struct mutex, count); 411 struct mutex *lock = container_of(lock_count, struct mutex, count);
403 412
404 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); 413 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
405} 414}
406 415
407static noinline int __sched 416static noinline int __sched
@@ -409,7 +418,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
409{ 418{
410 struct mutex *lock = container_of(lock_count, struct mutex, count); 419 struct mutex *lock = container_of(lock_count, struct mutex, count);
411 420
412 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); 421 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
413} 422}
414#endif 423#endif
415 424
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
deleted file mode 100644
index 2c98ad94ba0e..000000000000
--- a/kernel/ns_cgroup.c
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * ns_cgroup.c - namespace cgroup subsystem
3 *
4 * Copyright 2006, 2007 IBM Corp
5 */
6
7#include <linux/module.h>
8#include <linux/cgroup.h>
9#include <linux/fs.h>
10#include <linux/proc_fs.h>
11#include <linux/slab.h>
12#include <linux/nsproxy.h>
13
14struct ns_cgroup {
15 struct cgroup_subsys_state css;
16};
17
18struct cgroup_subsys ns_subsys;
19
20static inline struct ns_cgroup *cgroup_to_ns(
21 struct cgroup *cgroup)
22{
23 return container_of(cgroup_subsys_state(cgroup, ns_subsys_id),
24 struct ns_cgroup, css);
25}
26
27int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
28{
29 char name[PROC_NUMBUF];
30
31 snprintf(name, PROC_NUMBUF, "%d", pid_vnr(pid));
32 return cgroup_clone(task, &ns_subsys, name);
33}
34
35/*
36 * Rules:
37 * 1. you can only enter a cgroup which is a descendant of your current
38 * cgroup
39 * 2. you can only place another process into a cgroup if
40 * a. you have CAP_SYS_ADMIN
41 * b. your cgroup is an ancestor of task's destination cgroup
42 * (hence either you are in the same cgroup as task, or in an
43 * ancestor cgroup thereof)
44 */
45static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup,
46 struct task_struct *task, bool threadgroup)
47{
48 if (current != task) {
49 if (!capable(CAP_SYS_ADMIN))
50 return -EPERM;
51
52 if (!cgroup_is_descendant(new_cgroup, current))
53 return -EPERM;
54 }
55
56 if (!cgroup_is_descendant(new_cgroup, task))
57 return -EPERM;
58
59 if (threadgroup) {
60 struct task_struct *c;
61 rcu_read_lock();
62 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
63 if (!cgroup_is_descendant(new_cgroup, c)) {
64 rcu_read_unlock();
65 return -EPERM;
66 }
67 }
68 rcu_read_unlock();
69 }
70
71 return 0;
72}
73
74/*
75 * Rules: you can only create a cgroup if
76 * 1. you are capable(CAP_SYS_ADMIN)
77 * 2. the target cgroup is a descendant of your own cgroup
78 */
79static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
80 struct cgroup *cgroup)
81{
82 struct ns_cgroup *ns_cgroup;
83
84 if (!capable(CAP_SYS_ADMIN))
85 return ERR_PTR(-EPERM);
86 if (!cgroup_is_descendant(cgroup, current))
87 return ERR_PTR(-EPERM);
88 if (test_bit(CGRP_CLONE_CHILDREN, &cgroup->flags)) {
89 printk("ns_cgroup can't be created with parent "
90 "'clone_children' set.\n");
91 return ERR_PTR(-EINVAL);
92 }
93
94 printk_once("ns_cgroup deprecated: consider using the "
95 "'clone_children' flag without the ns_cgroup.\n");
96
97 ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
98 if (!ns_cgroup)
99 return ERR_PTR(-ENOMEM);
100 return &ns_cgroup->css;
101}
102
103static void ns_destroy(struct cgroup_subsys *ss,
104 struct cgroup *cgroup)
105{
106 struct ns_cgroup *ns_cgroup;
107
108 ns_cgroup = cgroup_to_ns(cgroup);
109 kfree(ns_cgroup);
110}
111
112struct cgroup_subsys ns_subsys = {
113 .name = "ns",
114 .can_attach = ns_can_attach,
115 .create = ns_create,
116 .destroy = ns_destroy,
117 .subsys_id = ns_subsys_id,
118};
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index a05d191ffdd9..d6a00f3de15d 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -22,6 +22,9 @@
22#include <linux/pid_namespace.h> 22#include <linux/pid_namespace.h>
23#include <net/net_namespace.h> 23#include <net/net_namespace.h>
24#include <linux/ipc_namespace.h> 24#include <linux/ipc_namespace.h>
25#include <linux/proc_fs.h>
26#include <linux/file.h>
27#include <linux/syscalls.h>
25 28
26static struct kmem_cache *nsproxy_cachep; 29static struct kmem_cache *nsproxy_cachep;
27 30
@@ -198,10 +201,6 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
198 goto out; 201 goto out;
199 } 202 }
200 203
201 err = ns_cgroup_clone(current, task_pid(current));
202 if (err)
203 put_nsproxy(*new_nsp);
204
205out: 204out:
206 return err; 205 return err;
207} 206}
@@ -233,6 +232,45 @@ void exit_task_namespaces(struct task_struct *p)
233 switch_task_namespaces(p, NULL); 232 switch_task_namespaces(p, NULL);
234} 233}
235 234
235SYSCALL_DEFINE2(setns, int, fd, int, nstype)
236{
237 const struct proc_ns_operations *ops;
238 struct task_struct *tsk = current;
239 struct nsproxy *new_nsproxy;
240 struct proc_inode *ei;
241 struct file *file;
242 int err;
243
244 if (!capable(CAP_SYS_ADMIN))
245 return -EPERM;
246
247 file = proc_ns_fget(fd);
248 if (IS_ERR(file))
249 return PTR_ERR(file);
250
251 err = -EINVAL;
252 ei = PROC_I(file->f_dentry->d_inode);
253 ops = ei->ns_ops;
254 if (nstype && (ops->type != nstype))
255 goto out;
256
257 new_nsproxy = create_new_namespaces(0, tsk, tsk->fs);
258 if (IS_ERR(new_nsproxy)) {
259 err = PTR_ERR(new_nsproxy);
260 goto out;
261 }
262
263 err = ops->install(new_nsproxy, ei->ns);
264 if (err) {
265 free_nsproxy(new_nsproxy);
266 goto out;
267 }
268 switch_task_namespaces(tsk, new_nsproxy);
269out:
270 fput(file);
271 return err;
272}
273
236static int __init nsproxy_cache_init(void) 274static int __init nsproxy_cache_init(void)
237{ 275{
238 nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC); 276 nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index a1b5edf1bf92..4556182527f3 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -491,6 +491,13 @@ static struct k_itimer * alloc_posix_timer(void)
491 return tmr; 491 return tmr;
492} 492}
493 493
494static void k_itimer_rcu_free(struct rcu_head *head)
495{
496 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
497
498 kmem_cache_free(posix_timers_cache, tmr);
499}
500
494#define IT_ID_SET 1 501#define IT_ID_SET 1
495#define IT_ID_NOT_SET 0 502#define IT_ID_NOT_SET 0
496static void release_posix_timer(struct k_itimer *tmr, int it_id_set) 503static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
@@ -503,7 +510,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
503 } 510 }
504 put_pid(tmr->it_pid); 511 put_pid(tmr->it_pid);
505 sigqueue_free(tmr->sigq); 512 sigqueue_free(tmr->sigq);
506 kmem_cache_free(posix_timers_cache, tmr); 513 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
507} 514}
508 515
509static struct k_clock *clockid_to_kclock(const clockid_t id) 516static struct k_clock *clockid_to_kclock(const clockid_t id)
@@ -631,22 +638,18 @@ out:
631static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) 638static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
632{ 639{
633 struct k_itimer *timr; 640 struct k_itimer *timr;
634 /* 641
635 * Watch out here. We do a irqsave on the idr_lock and pass the 642 rcu_read_lock();
636 * flags part over to the timer lock. Must not let interrupts in
637 * while we are moving the lock.
638 */
639 spin_lock_irqsave(&idr_lock, *flags);
640 timr = idr_find(&posix_timers_id, (int)timer_id); 643 timr = idr_find(&posix_timers_id, (int)timer_id);
641 if (timr) { 644 if (timr) {
642 spin_lock(&timr->it_lock); 645 spin_lock_irqsave(&timr->it_lock, *flags);
643 if (timr->it_signal == current->signal) { 646 if (timr->it_signal == current->signal) {
644 spin_unlock(&idr_lock); 647 rcu_read_unlock();
645 return timr; 648 return timr;
646 } 649 }
647 spin_unlock(&timr->it_lock); 650 spin_unlock_irqrestore(&timr->it_lock, *flags);
648 } 651 }
649 spin_unlock_irqrestore(&idr_lock, *flags); 652 rcu_read_unlock();
650 653
651 return NULL; 654 return NULL;
652} 655}
diff --git a/kernel/printk.c b/kernel/printk.c
index da8ca817eae3..35185392173f 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -31,6 +31,7 @@
31#include <linux/smp.h> 31#include <linux/smp.h>
32#include <linux/security.h> 32#include <linux/security.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/memblock.h>
34#include <linux/syscalls.h> 35#include <linux/syscalls.h>
35#include <linux/kexec.h> 36#include <linux/kexec.h>
36#include <linux/kdb.h> 37#include <linux/kdb.h>
@@ -167,46 +168,74 @@ void log_buf_kexec_setup(void)
167} 168}
168#endif 169#endif
169 170
171/* requested log_buf_len from kernel cmdline */
172static unsigned long __initdata new_log_buf_len;
173
174/* save requested log_buf_len since it's too early to process it */
170static int __init log_buf_len_setup(char *str) 175static int __init log_buf_len_setup(char *str)
171{ 176{
172 unsigned size = memparse(str, &str); 177 unsigned size = memparse(str, &str);
173 unsigned long flags;
174 178
175 if (size) 179 if (size)
176 size = roundup_pow_of_two(size); 180 size = roundup_pow_of_two(size);
177 if (size > log_buf_len) { 181 if (size > log_buf_len)
178 unsigned start, dest_idx, offset; 182 new_log_buf_len = size;
179 char *new_log_buf;
180 183
181 new_log_buf = alloc_bootmem(size); 184 return 0;
182 if (!new_log_buf) { 185}
183 printk(KERN_WARNING "log_buf_len: allocation failed\n"); 186early_param("log_buf_len", log_buf_len_setup);
184 goto out;
185 }
186 187
187 spin_lock_irqsave(&logbuf_lock, flags); 188void __init setup_log_buf(int early)
188 log_buf_len = size; 189{
189 log_buf = new_log_buf; 190 unsigned long flags;
190 191 unsigned start, dest_idx, offset;
191 offset = start = min(con_start, log_start); 192 char *new_log_buf;
192 dest_idx = 0; 193 int free;
193 while (start != log_end) { 194
194 log_buf[dest_idx] = __log_buf[start & (__LOG_BUF_LEN - 1)]; 195 if (!new_log_buf_len)
195 start++; 196 return;
196 dest_idx++; 197
197 } 198 if (early) {
198 log_start -= offset; 199 unsigned long mem;
199 con_start -= offset;
200 log_end -= offset;
201 spin_unlock_irqrestore(&logbuf_lock, flags);
202 200
203 printk(KERN_NOTICE "log_buf_len: %d\n", log_buf_len); 201 mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
202 if (mem == MEMBLOCK_ERROR)
203 return;
204 new_log_buf = __va(mem);
205 } else {
206 new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
204 } 207 }
205out:
206 return 1;
207}
208 208
209__setup("log_buf_len=", log_buf_len_setup); 209 if (unlikely(!new_log_buf)) {
210 pr_err("log_buf_len: %ld bytes not available\n",
211 new_log_buf_len);
212 return;
213 }
214
215 spin_lock_irqsave(&logbuf_lock, flags);
216 log_buf_len = new_log_buf_len;
217 log_buf = new_log_buf;
218 new_log_buf_len = 0;
219 free = __LOG_BUF_LEN - log_end;
220
221 offset = start = min(con_start, log_start);
222 dest_idx = 0;
223 while (start != log_end) {
224 unsigned log_idx_mask = start & (__LOG_BUF_LEN - 1);
225
226 log_buf[dest_idx] = __log_buf[log_idx_mask];
227 start++;
228 dest_idx++;
229 }
230 log_start -= offset;
231 con_start -= offset;
232 log_end -= offset;
233 spin_unlock_irqrestore(&logbuf_lock, flags);
234
235 pr_info("log_buf_len: %d\n", log_buf_len);
236 pr_info("early log buf free: %d(%d%%)\n",
237 free, (free * 100) / __LOG_BUF_LEN);
238}
210 239
211#ifdef CONFIG_BOOT_PRINTK_DELAY 240#ifdef CONFIG_BOOT_PRINTK_DELAY
212 241
diff --git a/kernel/profile.c b/kernel/profile.c
index 14c9f87b9fc9..961b389fe52f 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -303,14 +303,12 @@ static void profile_discard_flip_buffers(void)
303 mutex_unlock(&profile_flip_mutex); 303 mutex_unlock(&profile_flip_mutex);
304} 304}
305 305
306void profile_hits(int type, void *__pc, unsigned int nr_hits) 306static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
307{ 307{
308 unsigned long primary, secondary, flags, pc = (unsigned long)__pc; 308 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
309 int i, j, cpu; 309 int i, j, cpu;
310 struct profile_hit *hits; 310 struct profile_hit *hits;
311 311
312 if (prof_on != type || !prof_buffer)
313 return;
314 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); 312 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
315 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 313 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
316 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 314 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
@@ -417,16 +415,20 @@ out_free:
417#define profile_discard_flip_buffers() do { } while (0) 415#define profile_discard_flip_buffers() do { } while (0)
418#define profile_cpu_callback NULL 416#define profile_cpu_callback NULL
419 417
420void profile_hits(int type, void *__pc, unsigned int nr_hits) 418static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
421{ 419{
422 unsigned long pc; 420 unsigned long pc;
423
424 if (prof_on != type || !prof_buffer)
425 return;
426 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; 421 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
427 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); 422 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
428} 423}
429#endif /* !CONFIG_SMP */ 424#endif /* !CONFIG_SMP */
425
426void profile_hits(int type, void *__pc, unsigned int nr_hits)
427{
428 if (prof_on != type || !prof_buffer)
429 return;
430 do_profile_hits(type, __pc, nr_hits);
431}
430EXPORT_SYMBOL_GPL(profile_hits); 432EXPORT_SYMBOL_GPL(profile_hits);
431 433
432void profile_tick(int type) 434void profile_tick(int type)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 7a81fc071344..2df115790cd9 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -562,7 +562,7 @@ static int ptrace_resume(struct task_struct *child, long request,
562 } 562 }
563 563
564 child->exit_code = data; 564 child->exit_code = data;
565 wake_up_process(child); 565 wake_up_state(child, __TASK_TRACED);
566 566
567 return 0; 567 return 0;
568} 568}
diff --git a/kernel/sched.c b/kernel/sched.c
index 2d12893b8b0f..5e43e9dc65d1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8764,42 +8764,10 @@ cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
8764 return 0; 8764 return 0;
8765} 8765}
8766 8766
8767static int
8768cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8769 struct task_struct *tsk, bool threadgroup)
8770{
8771 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
8772 if (retval)
8773 return retval;
8774 if (threadgroup) {
8775 struct task_struct *c;
8776 rcu_read_lock();
8777 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8778 retval = cpu_cgroup_can_attach_task(cgrp, c);
8779 if (retval) {
8780 rcu_read_unlock();
8781 return retval;
8782 }
8783 }
8784 rcu_read_unlock();
8785 }
8786 return 0;
8787}
8788
8789static void 8767static void
8790cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 8768cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
8791 struct cgroup *old_cont, struct task_struct *tsk,
8792 bool threadgroup)
8793{ 8769{
8794 sched_move_task(tsk); 8770 sched_move_task(tsk);
8795 if (threadgroup) {
8796 struct task_struct *c;
8797 rcu_read_lock();
8798 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8799 sched_move_task(c);
8800 }
8801 rcu_read_unlock();
8802 }
8803} 8771}
8804 8772
8805static void 8773static void
@@ -8887,8 +8855,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
8887 .name = "cpu", 8855 .name = "cpu",
8888 .create = cpu_cgroup_create, 8856 .create = cpu_cgroup_create,
8889 .destroy = cpu_cgroup_destroy, 8857 .destroy = cpu_cgroup_destroy,
8890 .can_attach = cpu_cgroup_can_attach, 8858 .can_attach_task = cpu_cgroup_can_attach_task,
8891 .attach = cpu_cgroup_attach, 8859 .attach_task = cpu_cgroup_attach_task,
8892 .exit = cpu_cgroup_exit, 8860 .exit = cpu_cgroup_exit,
8893 .populate = cpu_cgroup_populate, 8861 .populate = cpu_cgroup_populate,
8894 .subsys_id = cpu_cgroup_subsys_id, 8862 .subsys_id = cpu_cgroup_subsys_id,
diff --git a/kernel/signal.c b/kernel/signal.c
index ad5e818baacc..86c32b884f8e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3023,8 +3023,10 @@ SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3023 3023
3024SYSCALL_DEFINE0(pause) 3024SYSCALL_DEFINE0(pause)
3025{ 3025{
3026 current->state = TASK_INTERRUPTIBLE; 3026 while (!signal_pending(current)) {
3027 schedule(); 3027 current->state = TASK_INTERRUPTIBLE;
3028 schedule();
3029 }
3028 return -ERESTARTNOHAND; 3030 return -ERESTARTNOHAND;
3029} 3031}
3030 3032
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4bffd62c2f13..4fc92445a29c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1506,7 +1506,7 @@ static struct ctl_table fs_table[] = {
1506 1506
1507static struct ctl_table debug_table[] = { 1507static struct ctl_table debug_table[] = {
1508#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \ 1508#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \
1509 defined(CONFIG_S390) 1509 defined(CONFIG_S390) || defined(CONFIG_TILE)
1510 { 1510 {
1511 .procname = "exception-trace", 1511 .procname = "exception-trace",
1512 .data = &show_unhandled_signals, 1512 .data = &show_unhandled_signals,
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 44646179eaba..bff131b9510a 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -15,6 +15,7 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/user_namespace.h> 17#include <linux/user_namespace.h>
18#include <linux/proc_fs.h>
18 19
19static struct uts_namespace *create_uts_ns(void) 20static struct uts_namespace *create_uts_ns(void)
20{ 21{
@@ -79,3 +80,41 @@ void free_uts_ns(struct kref *kref)
79 put_user_ns(ns->user_ns); 80 put_user_ns(ns->user_ns);
80 kfree(ns); 81 kfree(ns);
81} 82}
83
84static void *utsns_get(struct task_struct *task)
85{
86 struct uts_namespace *ns = NULL;
87 struct nsproxy *nsproxy;
88
89 rcu_read_lock();
90 nsproxy = task_nsproxy(task);
91 if (nsproxy) {
92 ns = nsproxy->uts_ns;
93 get_uts_ns(ns);
94 }
95 rcu_read_unlock();
96
97 return ns;
98}
99
100static void utsns_put(void *ns)
101{
102 put_uts_ns(ns);
103}
104
105static int utsns_install(struct nsproxy *nsproxy, void *ns)
106{
107 get_uts_ns(ns);
108 put_uts_ns(nsproxy->uts_ns);
109 nsproxy->uts_ns = ns;
110 return 0;
111}
112
113const struct proc_ns_operations utsns_operations = {
114 .name = "uts",
115 .type = CLONE_NEWUTS,
116 .get = utsns_get,
117 .put = utsns_put,
118 .install = utsns_install,
119};
120
diff --git a/lib/Kconfig b/lib/Kconfig
index 9c10e38fc609..830181cc7a83 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -19,16 +19,6 @@ config RATIONAL
19config GENERIC_FIND_FIRST_BIT 19config GENERIC_FIND_FIRST_BIT
20 bool 20 bool
21 21
22config GENERIC_FIND_NEXT_BIT
23 bool
24
25config GENERIC_FIND_BIT_LE
26 bool
27
28config GENERIC_FIND_LAST_BIT
29 bool
30 default y
31
32config CRC_CCITT 22config CRC_CCITT
33 tristate "CRC-CCITT functions" 23 tristate "CRC-CCITT functions"
34 help 24 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0efcdca9751a..28afa4c5333c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -670,6 +670,15 @@ config STACKTRACE
670 bool 670 bool
671 depends on STACKTRACE_SUPPORT 671 depends on STACKTRACE_SUPPORT
672 672
673config DEBUG_STACK_USAGE
674 bool "Stack utilization instrumentation"
675 depends on DEBUG_KERNEL
676 help
677 Enables the display of the minimum amount of free stack which each
678 task has ever had available in the sysrq-T and sysrq-P debug output.
679
680 This option will slow down process creation somewhat.
681
673config DEBUG_KOBJECT 682config DEBUG_KOBJECT
674 bool "kobject debugging" 683 bool "kobject debugging"
675 depends on DEBUG_KERNEL 684 depends on DEBUG_KERNEL
@@ -983,6 +992,17 @@ config DEBUG_FORCE_WEAK_PER_CPU
983 To ensure that generic code follows the above rules, this 992 To ensure that generic code follows the above rules, this
984 option forces all percpu variables to be defined as weak. 993 option forces all percpu variables to be defined as weak.
985 994
995config DEBUG_PER_CPU_MAPS
996 bool "Debug access to per_cpu maps"
997 depends on DEBUG_KERNEL
998 depends on SMP
999 help
1000 Say Y to verify that the per_cpu map being accessed has
1001 been set up. This adds a fair amount of code to kernel memory
1002 and decreases performance.
1003
1004 Say N if unsure.
1005
986config LKDTM 1006config LKDTM
987 tristate "Linux Kernel Dump Test Tool Module" 1007 tristate "Linux Kernel Dump Test Tool Module"
988 depends on DEBUG_FS 1008 depends on DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index 4b49a249064b..6b597fdb1898 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o 15 is_single_threaded.o plist.o decompress.o find_next_bit.o
16 16
17lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
18lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
25 bsearch.o 25 bsearch.o find_last_bit.o
26obj-y += kstrtox.o 26obj-y += kstrtox.o
27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 27obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
28 28
@@ -39,10 +39,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
39obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 39obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
40lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 40lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
41lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 41lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
42lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
43lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
44lib-$(CONFIG_GENERIC_FIND_BIT_LE) += find_next_bit.o
45obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
46 42
47CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
48obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
diff --git a/lib/audit.c b/lib/audit.c
index 8e7dc1c63aa9..76bbed4a20e5 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -36,8 +36,10 @@ int audit_classify_arch(int arch)
36int audit_classify_syscall(int abi, unsigned syscall) 36int audit_classify_syscall(int abi, unsigned syscall)
37{ 37{
38 switch(syscall) { 38 switch(syscall) {
39#ifdef __NR_open
39 case __NR_open: 40 case __NR_open:
40 return 2; 41 return 2;
42#endif
41#ifdef __NR_openat 43#ifdef __NR_openat
42 case __NR_openat: 44 case __NR_openat:
43 return 3; 45 return 3;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 91e0ccfdb424..41baf02924e6 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -571,8 +571,11 @@ int bitmap_scnlistprintf(char *buf, unsigned int buflen,
571EXPORT_SYMBOL(bitmap_scnlistprintf); 571EXPORT_SYMBOL(bitmap_scnlistprintf);
572 572
573/** 573/**
574 * bitmap_parselist - convert list format ASCII string to bitmap 574 * __bitmap_parselist - convert list format ASCII string to bitmap
575 * @bp: read nul-terminated user string from this buffer 575 * @bp: read nul-terminated user string from this buffer
576 * @buflen: buffer size in bytes. If string is smaller than this
577 * then it must be terminated with a \0.
578 * @is_user: location of buffer, 0 indicates kernel space
576 * @maskp: write resulting mask here 579 * @maskp: write resulting mask here
577 * @nmaskbits: number of bits in mask to be written 580 * @nmaskbits: number of bits in mask to be written
578 * 581 *
@@ -587,20 +590,63 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
587 * %-EINVAL: invalid character in string 590 * %-EINVAL: invalid character in string
588 * %-ERANGE: bit number specified too large for mask 591 * %-ERANGE: bit number specified too large for mask
589 */ 592 */
590int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) 593static int __bitmap_parselist(const char *buf, unsigned int buflen,
594 int is_user, unsigned long *maskp,
595 int nmaskbits)
591{ 596{
592 unsigned a, b; 597 unsigned a, b;
598 int c, old_c, totaldigits;
599 const char __user *ubuf = buf;
600 int exp_digit, in_range;
593 601
602 totaldigits = c = 0;
594 bitmap_zero(maskp, nmaskbits); 603 bitmap_zero(maskp, nmaskbits);
595 do { 604 do {
596 if (!isdigit(*bp)) 605 exp_digit = 1;
597 return -EINVAL; 606 in_range = 0;
598 b = a = simple_strtoul(bp, (char **)&bp, BASEDEC); 607 a = b = 0;
599 if (*bp == '-') { 608
600 bp++; 609 /* Get the next cpu# or a range of cpu#'s */
601 if (!isdigit(*bp)) 610 while (buflen) {
611 old_c = c;
612 if (is_user) {
613 if (__get_user(c, ubuf++))
614 return -EFAULT;
615 } else
616 c = *buf++;
617 buflen--;
618 if (isspace(c))
619 continue;
620
621 /*
622 * If the last character was a space and the current
623 * character isn't '\0', we've got embedded whitespace.
624 * This is a no-no, so throw an error.
625 */
626 if (totaldigits && c && isspace(old_c))
627 return -EINVAL;
628
629 /* A '\0' or a ',' signal the end of a cpu# or range */
630 if (c == '\0' || c == ',')
631 break;
632
633 if (c == '-') {
634 if (exp_digit || in_range)
635 return -EINVAL;
636 b = 0;
637 in_range = 1;
638 exp_digit = 1;
639 continue;
640 }
641
642 if (!isdigit(c))
602 return -EINVAL; 643 return -EINVAL;
603 b = simple_strtoul(bp, (char **)&bp, BASEDEC); 644
645 b = b * 10 + (c - '0');
646 if (!in_range)
647 a = b;
648 exp_digit = 0;
649 totaldigits++;
604 } 650 }
605 if (!(a <= b)) 651 if (!(a <= b))
606 return -EINVAL; 652 return -EINVAL;
@@ -610,13 +656,52 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
610 set_bit(a, maskp); 656 set_bit(a, maskp);
611 a++; 657 a++;
612 } 658 }
613 if (*bp == ',') 659 } while (buflen && c == ',');
614 bp++;
615 } while (*bp != '\0' && *bp != '\n');
616 return 0; 660 return 0;
617} 661}
662
663int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
664{
665 char *nl = strchr(bp, '\n');
666 int len;
667
668 if (nl)
669 len = nl - bp;
670 else
671 len = strlen(bp);
672
673 return __bitmap_parselist(bp, len, 0, maskp, nmaskbits);
674}
618EXPORT_SYMBOL(bitmap_parselist); 675EXPORT_SYMBOL(bitmap_parselist);
619 676
677
678/**
679 * bitmap_parselist_user()
680 *
681 * @ubuf: pointer to user buffer containing string.
682 * @ulen: buffer size in bytes. If string is smaller than this
683 * then it must be terminated with a \0.
684 * @maskp: pointer to bitmap array that will contain result.
685 * @nmaskbits: size of bitmap, in bits.
686 *
687 * Wrapper for bitmap_parselist(), providing it with user buffer.
688 *
689 * We cannot have this as an inline function in bitmap.h because it needs
690 * linux/uaccess.h to get the access_ok() declaration and this causes
691 * cyclic dependencies.
692 */
693int bitmap_parselist_user(const char __user *ubuf,
694 unsigned int ulen, unsigned long *maskp,
695 int nmaskbits)
696{
697 if (!access_ok(VERIFY_READ, ubuf, ulen))
698 return -EFAULT;
699 return __bitmap_parselist((const char *)ubuf,
700 ulen, 1, maskp, nmaskbits);
701}
702EXPORT_SYMBOL(bitmap_parselist_user);
703
704
620/** 705/**
621 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap 706 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
622 * @buf: pointer to a bitmap 707 * @buf: pointer to a bitmap
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
index 5d202e36bdd8..d903959ad695 100644
--- a/lib/find_last_bit.c
+++ b/lib/find_last_bit.c
@@ -15,6 +15,8 @@
15#include <asm/types.h> 15#include <asm/types.h>
16#include <asm/byteorder.h> 16#include <asm/byteorder.h>
17 17
18#ifndef find_last_bit
19
18unsigned long find_last_bit(const unsigned long *addr, unsigned long size) 20unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
19{ 21{
20 unsigned long words; 22 unsigned long words;
@@ -43,3 +45,5 @@ found:
43 return size; 45 return size;
44} 46}
45EXPORT_SYMBOL(find_last_bit); 47EXPORT_SYMBOL(find_last_bit);
48
49#endif
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index b0a8767282bf..4bd75a73ba00 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -16,7 +16,7 @@
16 16
17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) 17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
18 18
19#ifdef CONFIG_GENERIC_FIND_NEXT_BIT 19#ifndef find_next_bit
20/* 20/*
21 * Find the next set bit in a memory region. 21 * Find the next set bit in a memory region.
22 */ 22 */
@@ -59,7 +59,9 @@ found_middle:
59 return result + __ffs(tmp); 59 return result + __ffs(tmp);
60} 60}
61EXPORT_SYMBOL(find_next_bit); 61EXPORT_SYMBOL(find_next_bit);
62#endif
62 63
64#ifndef find_next_zero_bit
63/* 65/*
64 * This implementation of find_{first,next}_zero_bit was stolen from 66 * This implementation of find_{first,next}_zero_bit was stolen from
65 * Linus' asm-alpha/bitops.h. 67 * Linus' asm-alpha/bitops.h.
@@ -103,9 +105,9 @@ found_middle:
103 return result + ffz(tmp); 105 return result + ffz(tmp);
104} 106}
105EXPORT_SYMBOL(find_next_zero_bit); 107EXPORT_SYMBOL(find_next_zero_bit);
106#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ 108#endif
107 109
108#ifdef CONFIG_GENERIC_FIND_FIRST_BIT 110#ifndef find_first_bit
109/* 111/*
110 * Find the first set bit in a memory region. 112 * Find the first set bit in a memory region.
111 */ 113 */
@@ -131,7 +133,9 @@ found:
131 return result + __ffs(tmp); 133 return result + __ffs(tmp);
132} 134}
133EXPORT_SYMBOL(find_first_bit); 135EXPORT_SYMBOL(find_first_bit);
136#endif
134 137
138#ifndef find_first_zero_bit
135/* 139/*
136 * Find the first cleared bit in a memory region. 140 * Find the first cleared bit in a memory region.
137 */ 141 */
@@ -157,10 +161,9 @@ found:
157 return result + ffz(tmp); 161 return result + ffz(tmp);
158} 162}
159EXPORT_SYMBOL(find_first_zero_bit); 163EXPORT_SYMBOL(find_first_zero_bit);
160#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ 164#endif
161 165
162#ifdef __BIG_ENDIAN 166#ifdef __BIG_ENDIAN
163#ifdef CONFIG_GENERIC_FIND_BIT_LE
164 167
165/* include/linux/byteorder does not support "unsigned long" type */ 168/* include/linux/byteorder does not support "unsigned long" type */
166static inline unsigned long ext2_swabp(const unsigned long * x) 169static inline unsigned long ext2_swabp(const unsigned long * x)
@@ -186,6 +189,7 @@ static inline unsigned long ext2_swab(const unsigned long y)
186#endif 189#endif
187} 190}
188 191
192#ifndef find_next_zero_bit_le
189unsigned long find_next_zero_bit_le(const void *addr, unsigned 193unsigned long find_next_zero_bit_le(const void *addr, unsigned
190 long size, unsigned long offset) 194 long size, unsigned long offset)
191{ 195{
@@ -229,7 +233,9 @@ found_middle_swap:
229 return result + ffz(ext2_swab(tmp)); 233 return result + ffz(ext2_swab(tmp));
230} 234}
231EXPORT_SYMBOL(find_next_zero_bit_le); 235EXPORT_SYMBOL(find_next_zero_bit_le);
236#endif
232 237
238#ifndef find_next_bit_le
233unsigned long find_next_bit_le(const void *addr, unsigned 239unsigned long find_next_bit_le(const void *addr, unsigned
234 long size, unsigned long offset) 240 long size, unsigned long offset)
235{ 241{
@@ -274,6 +280,6 @@ found_middle_swap:
274 return result + __ffs(ext2_swab(tmp)); 280 return result + __ffs(ext2_swab(tmp));
275} 281}
276EXPORT_SYMBOL(find_next_bit_le); 282EXPORT_SYMBOL(find_next_bit_le);
283#endif
277 284
278#endif /* CONFIG_GENERIC_FIND_BIT_LE */
279#endif /* __BIG_ENDIAN */ 285#endif /* __BIG_ENDIAN */
diff --git a/lib/flex_array.c b/lib/flex_array.c
index cab7621f98aa..9b8b89458c4c 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -24,6 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/stddef.h> 25#include <linux/stddef.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/reciprocal_div.h>
27 28
28struct flex_array_part { 29struct flex_array_part {
29 char elements[FLEX_ARRAY_PART_SIZE]; 30 char elements[FLEX_ARRAY_PART_SIZE];
@@ -70,15 +71,15 @@ static inline int elements_fit_in_base(struct flex_array *fa)
70 * Element size | Objects | Objects | 71 * Element size | Objects | Objects |
71 * PAGE_SIZE=4k | 32-bit | 64-bit | 72 * PAGE_SIZE=4k | 32-bit | 64-bit |
72 * ---------------------------------| 73 * ---------------------------------|
73 * 1 bytes | 4186112 | 2093056 | 74 * 1 bytes | 4177920 | 2088960 |
74 * 2 bytes | 2093056 | 1046528 | 75 * 2 bytes | 2088960 | 1044480 |
75 * 3 bytes | 1395030 | 697515 | 76 * 3 bytes | 1392300 | 696150 |
76 * 4 bytes | 1046528 | 523264 | 77 * 4 bytes | 1044480 | 522240 |
77 * 32 bytes | 130816 | 65408 | 78 * 32 bytes | 130560 | 65408 |
78 * 33 bytes | 126728 | 63364 | 79 * 33 bytes | 126480 | 63240 |
79 * 2048 bytes | 2044 | 1022 | 80 * 2048 bytes | 2040 | 1020 |
80 * 2049 bytes | 1022 | 511 | 81 * 2049 bytes | 1020 | 510 |
81 * void * | 1046528 | 261632 | 82 * void * | 1044480 | 261120 |
82 * 83 *
83 * Since 64-bit pointers are twice the size, we lose half the 84 * Since 64-bit pointers are twice the size, we lose half the
84 * capacity in the base structure. Also note that no effort is made 85 * capacity in the base structure. Also note that no effort is made
@@ -88,11 +89,15 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
88 gfp_t flags) 89 gfp_t flags)
89{ 90{
90 struct flex_array *ret; 91 struct flex_array *ret;
92 int elems_per_part = 0;
93 int reciprocal_elems = 0;
91 int max_size = 0; 94 int max_size = 0;
92 95
93 if (element_size) 96 if (element_size) {
94 max_size = FLEX_ARRAY_NR_BASE_PTRS * 97 elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
95 FLEX_ARRAY_ELEMENTS_PER_PART(element_size); 98 reciprocal_elems = reciprocal_value(elems_per_part);
99 max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
100 }
96 101
97 /* max_size will end up 0 if element_size > PAGE_SIZE */ 102 /* max_size will end up 0 if element_size > PAGE_SIZE */
98 if (total > max_size) 103 if (total > max_size)
@@ -102,6 +107,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
102 return NULL; 107 return NULL;
103 ret->element_size = element_size; 108 ret->element_size = element_size;
104 ret->total_nr_elements = total; 109 ret->total_nr_elements = total;
110 ret->elems_per_part = elems_per_part;
111 ret->reciprocal_elems = reciprocal_elems;
105 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) 112 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
106 memset(&ret->parts[0], FLEX_ARRAY_FREE, 113 memset(&ret->parts[0], FLEX_ARRAY_FREE,
107 FLEX_ARRAY_BASE_BYTES_LEFT); 114 FLEX_ARRAY_BASE_BYTES_LEFT);
@@ -112,7 +119,7 @@ EXPORT_SYMBOL(flex_array_alloc);
112static int fa_element_to_part_nr(struct flex_array *fa, 119static int fa_element_to_part_nr(struct flex_array *fa,
113 unsigned int element_nr) 120 unsigned int element_nr)
114{ 121{
115 return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); 122 return reciprocal_divide(element_nr, fa->reciprocal_elems);
116} 123}
117 124
118/** 125/**
@@ -141,12 +148,12 @@ void flex_array_free(struct flex_array *fa)
141EXPORT_SYMBOL(flex_array_free); 148EXPORT_SYMBOL(flex_array_free);
142 149
143static unsigned int index_inside_part(struct flex_array *fa, 150static unsigned int index_inside_part(struct flex_array *fa,
144 unsigned int element_nr) 151 unsigned int element_nr,
152 unsigned int part_nr)
145{ 153{
146 unsigned int part_offset; 154 unsigned int part_offset;
147 155
148 part_offset = element_nr % 156 part_offset = element_nr - part_nr * fa->elems_per_part;
149 FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
150 return part_offset * fa->element_size; 157 return part_offset * fa->element_size;
151} 158}
152 159
@@ -186,7 +193,7 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
186int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, 193int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
187 gfp_t flags) 194 gfp_t flags)
188{ 195{
189 int part_nr; 196 int part_nr = 0;
190 struct flex_array_part *part; 197 struct flex_array_part *part;
191 void *dst; 198 void *dst;
192 199
@@ -202,7 +209,7 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
202 if (!part) 209 if (!part)
203 return -ENOMEM; 210 return -ENOMEM;
204 } 211 }
205 dst = &part->elements[index_inside_part(fa, element_nr)]; 212 dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
206 memcpy(dst, src, fa->element_size); 213 memcpy(dst, src, fa->element_size);
207 return 0; 214 return 0;
208} 215}
@@ -217,7 +224,7 @@ EXPORT_SYMBOL(flex_array_put);
217 */ 224 */
218int flex_array_clear(struct flex_array *fa, unsigned int element_nr) 225int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
219{ 226{
220 int part_nr; 227 int part_nr = 0;
221 struct flex_array_part *part; 228 struct flex_array_part *part;
222 void *dst; 229 void *dst;
223 230
@@ -233,7 +240,7 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
233 if (!part) 240 if (!part)
234 return -EINVAL; 241 return -EINVAL;
235 } 242 }
236 dst = &part->elements[index_inside_part(fa, element_nr)]; 243 dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
237 memset(dst, FLEX_ARRAY_FREE, fa->element_size); 244 memset(dst, FLEX_ARRAY_FREE, fa->element_size);
238 return 0; 245 return 0;
239} 246}
@@ -302,7 +309,7 @@ EXPORT_SYMBOL(flex_array_prealloc);
302 */ 309 */
303void *flex_array_get(struct flex_array *fa, unsigned int element_nr) 310void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
304{ 311{
305 int part_nr; 312 int part_nr = 0;
306 struct flex_array_part *part; 313 struct flex_array_part *part;
307 314
308 if (!fa->element_size) 315 if (!fa->element_size)
@@ -317,7 +324,7 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
317 if (!part) 324 if (!part)
318 return NULL; 325 return NULL;
319 } 326 }
320 return &part->elements[index_inside_part(fa, element_nr)]; 327 return &part->elements[index_inside_part(fa, element_nr, part_nr)];
321} 328}
322EXPORT_SYMBOL(flex_array_get); 329EXPORT_SYMBOL(flex_array_get);
323 330
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 1923f1490e72..577ddf805975 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -39,17 +39,20 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
39EXPORT_SYMBOL(gen_pool_create); 39EXPORT_SYMBOL(gen_pool_create);
40 40
41/** 41/**
42 * gen_pool_add - add a new chunk of special memory to the pool 42 * gen_pool_add_virt - add a new chunk of special memory to the pool
43 * @pool: pool to add new memory chunk to 43 * @pool: pool to add new memory chunk to
44 * @addr: starting address of memory chunk to add to pool 44 * @virt: virtual starting address of memory chunk to add to pool
45 * @phys: physical starting address of memory chunk to add to pool
45 * @size: size in bytes of the memory chunk to add to pool 46 * @size: size in bytes of the memory chunk to add to pool
46 * @nid: node id of the node the chunk structure and bitmap should be 47 * @nid: node id of the node the chunk structure and bitmap should be
47 * allocated on, or -1 48 * allocated on, or -1
48 * 49 *
49 * Add a new chunk of special memory to the specified pool. 50 * Add a new chunk of special memory to the specified pool.
51 *
52 * Returns 0 on success or a -ve errno on failure.
50 */ 53 */
51int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, 54int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
52 int nid) 55 size_t size, int nid)
53{ 56{
54 struct gen_pool_chunk *chunk; 57 struct gen_pool_chunk *chunk;
55 int nbits = size >> pool->min_alloc_order; 58 int nbits = size >> pool->min_alloc_order;
@@ -58,11 +61,12 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
58 61
59 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); 62 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
60 if (unlikely(chunk == NULL)) 63 if (unlikely(chunk == NULL))
61 return -1; 64 return -ENOMEM;
62 65
63 spin_lock_init(&chunk->lock); 66 spin_lock_init(&chunk->lock);
64 chunk->start_addr = addr; 67 chunk->phys_addr = phys;
65 chunk->end_addr = addr + size; 68 chunk->start_addr = virt;
69 chunk->end_addr = virt + size;
66 70
67 write_lock(&pool->lock); 71 write_lock(&pool->lock);
68 list_add(&chunk->next_chunk, &pool->chunks); 72 list_add(&chunk->next_chunk, &pool->chunks);
@@ -70,7 +74,32 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
70 74
71 return 0; 75 return 0;
72} 76}
73EXPORT_SYMBOL(gen_pool_add); 77EXPORT_SYMBOL(gen_pool_add_virt);
78
79/**
80 * gen_pool_virt_to_phys - return the physical address of memory
81 * @pool: pool to allocate from
82 * @addr: starting address of memory
83 *
84 * Returns the physical address on success, or -1 on error.
85 */
86phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
87{
88 struct list_head *_chunk;
89 struct gen_pool_chunk *chunk;
90
91 read_lock(&pool->lock);
92 list_for_each(_chunk, &pool->chunks) {
93 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
94
95 if (addr >= chunk->start_addr && addr < chunk->end_addr)
96 return chunk->phys_addr + addr - chunk->start_addr;
97 }
98 read_unlock(&pool->lock);
99
100 return -1;
101}
102EXPORT_SYMBOL(gen_pool_virt_to_phys);
74 103
75/** 104/**
76 * gen_pool_destroy - destroy a special memory pool 105 * gen_pool_destroy - destroy a special memory pool
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index a235f3cc471c..2dbae88090ac 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -17,6 +17,7 @@
17#include <linux/math64.h> 17#include <linux/math64.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <asm/uaccess.h>
20 21
21static inline char _tolower(const char c) 22static inline char _tolower(const char c)
22{ 23{
@@ -222,3 +223,28 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
222 return 0; 223 return 0;
223} 224}
224EXPORT_SYMBOL(kstrtos8); 225EXPORT_SYMBOL(kstrtos8);
226
227#define kstrto_from_user(f, g, type) \
228int f(const char __user *s, size_t count, unsigned int base, type *res) \
229{ \
230 /* sign, base 2 representation, newline, terminator */ \
231 char buf[1 + sizeof(type) * 8 + 1 + 1]; \
232 \
233 count = min(count, sizeof(buf) - 1); \
234 if (copy_from_user(buf, s, count)) \
235 return -EFAULT; \
236 buf[count] = '\0'; \
237 return g(buf, base, res); \
238} \
239EXPORT_SYMBOL(f)
240
241kstrto_from_user(kstrtoull_from_user, kstrtoull, unsigned long long);
242kstrto_from_user(kstrtoll_from_user, kstrtoll, long long);
243kstrto_from_user(kstrtoul_from_user, kstrtoul, unsigned long);
244kstrto_from_user(kstrtol_from_user, kstrtol, long);
245kstrto_from_user(kstrtouint_from_user, kstrtouint, unsigned int);
246kstrto_from_user(kstrtoint_from_user, kstrtoint, int);
247kstrto_from_user(kstrtou16_from_user, kstrtou16, u16);
248kstrto_from_user(kstrtos16_from_user, kstrtos16, s16);
249kstrto_from_user(kstrtou8_from_user, kstrtou8, u8);
250kstrto_from_user(kstrtos8_from_user, kstrtos8, s8);
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 270de9d31b8c..a07e7268d7ed 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -84,7 +84,7 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
84 if (e_count > LC_MAX_ACTIVE) 84 if (e_count > LC_MAX_ACTIVE)
85 return NULL; 85 return NULL;
86 86
87 slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL); 87 slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
88 if (!slot) 88 if (!slot)
89 goto out_fail; 89 goto out_fail;
90 element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL); 90 element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 90cbe4bb5960..4407f8c9b1f7 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -16,7 +16,7 @@ void show_mem(unsigned int filter)
16 nonshared = 0, highmem = 0; 16 nonshared = 0, highmem = 0;
17 17
18 printk("Mem-Info:\n"); 18 printk("Mem-Info:\n");
19 __show_free_areas(filter); 19 show_free_areas(filter);
20 20
21 for_each_online_pgdat(pgdat) { 21 for_each_online_pgdat(pgdat) {
22 unsigned long i, flags; 22 unsigned long i, flags;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 1d659d7bb0f8..c11205688fb4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -898,7 +898,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
898 case 'U': 898 case 'U':
899 return uuid_string(buf, end, ptr, spec, fmt); 899 return uuid_string(buf, end, ptr, spec, fmt);
900 case 'V': 900 case 'V':
901 return buf + vsnprintf(buf, end - buf, 901 return buf + vsnprintf(buf, end > buf ? end - buf : 0,
902 ((struct va_format *)ptr)->fmt, 902 ((struct va_format *)ptr)->fmt,
903 *(((struct va_format *)ptr)->va)); 903 *(((struct va_format *)ptr)->va));
904 case 'K': 904 case 'K':
diff --git a/mm/Kconfig b/mm/Kconfig
index e9c0c61f2ddd..8ca47a5ee9c8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -347,3 +347,26 @@ config NEED_PER_CPU_KM
347 depends on !SMP 347 depends on !SMP
348 bool 348 bool
349 default y 349 default y
350
351config CLEANCACHE
352 bool "Enable cleancache driver to cache clean pages if tmem is present"
353 default n
354 help
355 Cleancache can be thought of as a page-granularity victim cache
356 for clean pages that the kernel's pageframe replacement algorithm
357 (PFRA) would like to keep around, but can't since there isn't enough
358 memory. So when the PFRA "evicts" a page, it first attempts to use
359 cleancacne code to put the data contained in that page into
360 "transcendent memory", memory that is not directly accessible or
361 addressable by the kernel and is of unknown and possibly
362 time-varying size. And when a cleancache-enabled
363 filesystem wishes to access a page in a file on disk, it first
364 checks cleancache to see if it already contains it; if it does,
365 the page is copied into the kernel and a disk access is avoided.
366 When a transcendent memory driver is available (such as zcache or
367 Xen transcendent memory), a significant I/O reduction
368 may be achieved. When none is available, all cleancache calls
369 are reduced to a single pointer-compare-against-NULL resulting
370 in a negligible performance hit.
371
372 If unsure, say Y to enable cleancache
diff --git a/mm/Makefile b/mm/Makefile
index 42a8326c3e3d..836e4163c1bf 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -49,3 +49,4 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
49obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o 49obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
50obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o 50obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
51obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o 51obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
52obj-$(CONFIG_CLEANCACHE) += cleancache.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index befc87531e4f..f032e6e1e09a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -63,10 +63,10 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
63 unsigned long background_thresh; 63 unsigned long background_thresh;
64 unsigned long dirty_thresh; 64 unsigned long dirty_thresh;
65 unsigned long bdi_thresh; 65 unsigned long bdi_thresh;
66 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; 66 unsigned long nr_dirty, nr_io, nr_more_io;
67 struct inode *inode; 67 struct inode *inode;
68 68
69 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 69 nr_dirty = nr_io = nr_more_io = 0;
70 spin_lock(&inode_wb_list_lock); 70 spin_lock(&inode_wb_list_lock);
71 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 71 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
72 nr_dirty++; 72 nr_dirty++;
diff --git a/mm/cleancache.c b/mm/cleancache.c
new file mode 100644
index 000000000000..bcaae4c2a770
--- /dev/null
+++ b/mm/cleancache.c
@@ -0,0 +1,244 @@
1/*
2 * Cleancache frontend
3 *
4 * This code provides the generic "frontend" layer to call a matching
5 * "backend" driver implementation of cleancache. See
6 * Documentation/vm/cleancache.txt for more information.
7 *
8 * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
9 * Author: Dan Magenheimer
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2.
12 */
13
14#include <linux/module.h>
15#include <linux/fs.h>
16#include <linux/exportfs.h>
17#include <linux/mm.h>
18#include <linux/cleancache.h>
19
20/*
21 * This global enablement flag may be read thousands of times per second
22 * by cleancache_get/put/flush even on systems where cleancache_ops
23 * is not claimed (e.g. cleancache is config'ed on but remains
24 * disabled), so is preferred to the slower alternative: a function
25 * call that checks a non-global.
26 */
27int cleancache_enabled;
28EXPORT_SYMBOL(cleancache_enabled);
29
30/*
31 * cleancache_ops is set by cleancache_ops_register to contain the pointers
32 * to the cleancache "backend" implementation functions.
33 */
34static struct cleancache_ops cleancache_ops;
35
36/* useful stats available in /sys/kernel/mm/cleancache */
37static unsigned long cleancache_succ_gets;
38static unsigned long cleancache_failed_gets;
39static unsigned long cleancache_puts;
40static unsigned long cleancache_flushes;
41
42/*
43 * register operations for cleancache, returning previous thus allowing
44 * detection of multiple backends and possible nesting
45 */
46struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
47{
48 struct cleancache_ops old = cleancache_ops;
49
50 cleancache_ops = *ops;
51 cleancache_enabled = 1;
52 return old;
53}
54EXPORT_SYMBOL(cleancache_register_ops);
55
56/* Called by a cleancache-enabled filesystem at time of mount */
57void __cleancache_init_fs(struct super_block *sb)
58{
59 sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
60}
61EXPORT_SYMBOL(__cleancache_init_fs);
62
63/* Called by a cleancache-enabled clustered filesystem at time of mount */
64void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
65{
66 sb->cleancache_poolid =
67 (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
68}
69EXPORT_SYMBOL(__cleancache_init_shared_fs);
70
71/*
72 * If the filesystem uses exportable filehandles, use the filehandle as
73 * the key, else use the inode number.
74 */
75static int cleancache_get_key(struct inode *inode,
76 struct cleancache_filekey *key)
77{
78 int (*fhfn)(struct dentry *, __u32 *fh, int *, int);
79 int len = 0, maxlen = CLEANCACHE_KEY_MAX;
80 struct super_block *sb = inode->i_sb;
81
82 key->u.ino = inode->i_ino;
83 if (sb->s_export_op != NULL) {
84 fhfn = sb->s_export_op->encode_fh;
85 if (fhfn) {
86 struct dentry d;
87 d.d_inode = inode;
88 len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0);
89 if (len <= 0 || len == 255)
90 return -1;
91 if (maxlen > CLEANCACHE_KEY_MAX)
92 return -1;
93 }
94 }
95 return 0;
96}
97
98/*
99 * "Get" data from cleancache associated with the poolid/inode/index
100 * that were specified when the data was put to cleanache and, if
101 * successful, use it to fill the specified page with data and return 0.
102 * The pageframe is unchanged and returns -1 if the get fails.
103 * Page must be locked by caller.
104 */
105int __cleancache_get_page(struct page *page)
106{
107 int ret = -1;
108 int pool_id;
109 struct cleancache_filekey key = { .u.key = { 0 } };
110
111 VM_BUG_ON(!PageLocked(page));
112 pool_id = page->mapping->host->i_sb->cleancache_poolid;
113 if (pool_id < 0)
114 goto out;
115
116 if (cleancache_get_key(page->mapping->host, &key) < 0)
117 goto out;
118
119 ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
120 if (ret == 0)
121 cleancache_succ_gets++;
122 else
123 cleancache_failed_gets++;
124out:
125 return ret;
126}
127EXPORT_SYMBOL(__cleancache_get_page);
128
129/*
130 * "Put" data from a page to cleancache and associate it with the
131 * (previously-obtained per-filesystem) poolid and the page's,
132 * inode and page index. Page must be locked. Note that a put_page
133 * always "succeeds", though a subsequent get_page may succeed or fail.
134 */
135void __cleancache_put_page(struct page *page)
136{
137 int pool_id;
138 struct cleancache_filekey key = { .u.key = { 0 } };
139
140 VM_BUG_ON(!PageLocked(page));
141 pool_id = page->mapping->host->i_sb->cleancache_poolid;
142 if (pool_id >= 0 &&
143 cleancache_get_key(page->mapping->host, &key) >= 0) {
144 (*cleancache_ops.put_page)(pool_id, key, page->index, page);
145 cleancache_puts++;
146 }
147}
148EXPORT_SYMBOL(__cleancache_put_page);
149
150/*
151 * Flush any data from cleancache associated with the poolid and the
152 * page's inode and page index so that a subsequent "get" will fail.
153 */
154void __cleancache_flush_page(struct address_space *mapping, struct page *page)
155{
156 /* careful... page->mapping is NULL sometimes when this is called */
157 int pool_id = mapping->host->i_sb->cleancache_poolid;
158 struct cleancache_filekey key = { .u.key = { 0 } };
159
160 if (pool_id >= 0) {
161 VM_BUG_ON(!PageLocked(page));
162 if (cleancache_get_key(mapping->host, &key) >= 0) {
163 (*cleancache_ops.flush_page)(pool_id, key, page->index);
164 cleancache_flushes++;
165 }
166 }
167}
168EXPORT_SYMBOL(__cleancache_flush_page);
169
170/*
171 * Flush all data from cleancache associated with the poolid and the
172 * mappings's inode so that all subsequent gets to this poolid/inode
173 * will fail.
174 */
175void __cleancache_flush_inode(struct address_space *mapping)
176{
177 int pool_id = mapping->host->i_sb->cleancache_poolid;
178 struct cleancache_filekey key = { .u.key = { 0 } };
179
180 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
181 (*cleancache_ops.flush_inode)(pool_id, key);
182}
183EXPORT_SYMBOL(__cleancache_flush_inode);
184
185/*
186 * Called by any cleancache-enabled filesystem at time of unmount;
187 * note that pool_id is surrendered and may be reutrned by a subsequent
188 * cleancache_init_fs or cleancache_init_shared_fs
189 */
190void __cleancache_flush_fs(struct super_block *sb)
191{
192 if (sb->cleancache_poolid >= 0) {
193 int old_poolid = sb->cleancache_poolid;
194 sb->cleancache_poolid = -1;
195 (*cleancache_ops.flush_fs)(old_poolid);
196 }
197}
198EXPORT_SYMBOL(__cleancache_flush_fs);
199
200#ifdef CONFIG_SYSFS
201
202/* see Documentation/ABI/xxx/sysfs-kernel-mm-cleancache */
203
204#define CLEANCACHE_SYSFS_RO(_name) \
205 static ssize_t cleancache_##_name##_show(struct kobject *kobj, \
206 struct kobj_attribute *attr, char *buf) \
207 { \
208 return sprintf(buf, "%lu\n", cleancache_##_name); \
209 } \
210 static struct kobj_attribute cleancache_##_name##_attr = { \
211 .attr = { .name = __stringify(_name), .mode = 0444 }, \
212 .show = cleancache_##_name##_show, \
213 }
214
215CLEANCACHE_SYSFS_RO(succ_gets);
216CLEANCACHE_SYSFS_RO(failed_gets);
217CLEANCACHE_SYSFS_RO(puts);
218CLEANCACHE_SYSFS_RO(flushes);
219
220static struct attribute *cleancache_attrs[] = {
221 &cleancache_succ_gets_attr.attr,
222 &cleancache_failed_gets_attr.attr,
223 &cleancache_puts_attr.attr,
224 &cleancache_flushes_attr.attr,
225 NULL,
226};
227
228static struct attribute_group cleancache_attr_group = {
229 .attrs = cleancache_attrs,
230 .name = "cleancache",
231};
232
233#endif /* CONFIG_SYSFS */
234
235static int __init init_cleancache(void)
236{
237#ifdef CONFIG_SYSFS
238 int err;
239
240 err = sysfs_create_group(mm_kobj, &cleancache_attr_group);
241#endif /* CONFIG_SYSFS */
242 return 0;
243}
244module_init(init_cleancache)
diff --git a/mm/filemap.c b/mm/filemap.c
index c641edf553a9..bcdc393b6580 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -34,6 +34,7 @@
34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35#include <linux/memcontrol.h> 35#include <linux/memcontrol.h>
36#include <linux/mm_inline.h> /* for page_is_file_cache() */ 36#include <linux/mm_inline.h> /* for page_is_file_cache() */
37#include <linux/cleancache.h>
37#include "internal.h" 38#include "internal.h"
38 39
39/* 40/*
@@ -58,16 +59,16 @@
58/* 59/*
59 * Lock ordering: 60 * Lock ordering:
60 * 61 *
61 * ->i_mmap_lock (truncate_pagecache) 62 * ->i_mmap_mutex (truncate_pagecache)
62 * ->private_lock (__free_pte->__set_page_dirty_buffers) 63 * ->private_lock (__free_pte->__set_page_dirty_buffers)
63 * ->swap_lock (exclusive_swap_page, others) 64 * ->swap_lock (exclusive_swap_page, others)
64 * ->mapping->tree_lock 65 * ->mapping->tree_lock
65 * 66 *
66 * ->i_mutex 67 * ->i_mutex
67 * ->i_mmap_lock (truncate->unmap_mapping_range) 68 * ->i_mmap_mutex (truncate->unmap_mapping_range)
68 * 69 *
69 * ->mmap_sem 70 * ->mmap_sem
70 * ->i_mmap_lock 71 * ->i_mmap_mutex
71 * ->page_table_lock or pte_lock (various, mainly in memory.c) 72 * ->page_table_lock or pte_lock (various, mainly in memory.c)
72 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 73 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
73 * 74 *
@@ -84,7 +85,7 @@
84 * sb_lock (fs/fs-writeback.c) 85 * sb_lock (fs/fs-writeback.c)
85 * ->mapping->tree_lock (__sync_single_inode) 86 * ->mapping->tree_lock (__sync_single_inode)
86 * 87 *
87 * ->i_mmap_lock 88 * ->i_mmap_mutex
88 * ->anon_vma.lock (vma_adjust) 89 * ->anon_vma.lock (vma_adjust)
89 * 90 *
90 * ->anon_vma.lock 91 * ->anon_vma.lock
@@ -106,7 +107,7 @@
106 * 107 *
107 * (code doesn't rely on that order, so you could switch it around) 108 * (code doesn't rely on that order, so you could switch it around)
108 * ->tasklist_lock (memory_failure, collect_procs_ao) 109 * ->tasklist_lock (memory_failure, collect_procs_ao)
109 * ->i_mmap_lock 110 * ->i_mmap_mutex
110 */ 111 */
111 112
112/* 113/*
@@ -118,6 +119,16 @@ void __delete_from_page_cache(struct page *page)
118{ 119{
119 struct address_space *mapping = page->mapping; 120 struct address_space *mapping = page->mapping;
120 121
122 /*
123 * if we're uptodate, flush out into the cleancache, otherwise
124 * invalidate any existing cleancache entries. We can't leave
125 * stale data around in the cleancache once our page is gone
126 */
127 if (PageUptodate(page) && PageMappedToDisk(page))
128 cleancache_put_page(page);
129 else
130 cleancache_flush_page(mapping, page);
131
121 radix_tree_delete(&mapping->page_tree, page->index); 132 radix_tree_delete(&mapping->page_tree, page->index);
122 page->mapping = NULL; 133 page->mapping = NULL;
123 mapping->nrpages--; 134 mapping->nrpages--;
@@ -562,6 +573,17 @@ void wait_on_page_bit(struct page *page, int bit_nr)
562} 573}
563EXPORT_SYMBOL(wait_on_page_bit); 574EXPORT_SYMBOL(wait_on_page_bit);
564 575
576int wait_on_page_bit_killable(struct page *page, int bit_nr)
577{
578 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
579
580 if (!test_bit(bit_nr, &page->flags))
581 return 0;
582
583 return __wait_on_bit(page_waitqueue(page), &wait,
584 sleep_on_page_killable, TASK_KILLABLE);
585}
586
565/** 587/**
566 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 588 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
567 * @page: Page defining the wait queue of interest 589 * @page: Page defining the wait queue of interest
@@ -643,15 +665,32 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
643int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 665int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
644 unsigned int flags) 666 unsigned int flags)
645{ 667{
646 if (!(flags & FAULT_FLAG_ALLOW_RETRY)) { 668 if (flags & FAULT_FLAG_ALLOW_RETRY) {
647 __lock_page(page); 669 /*
648 return 1; 670 * CAUTION! In this case, mmap_sem is not released
649 } else { 671 * even though return 0.
650 if (!(flags & FAULT_FLAG_RETRY_NOWAIT)) { 672 */
651 up_read(&mm->mmap_sem); 673 if (flags & FAULT_FLAG_RETRY_NOWAIT)
674 return 0;
675
676 up_read(&mm->mmap_sem);
677 if (flags & FAULT_FLAG_KILLABLE)
678 wait_on_page_locked_killable(page);
679 else
652 wait_on_page_locked(page); 680 wait_on_page_locked(page);
653 }
654 return 0; 681 return 0;
682 } else {
683 if (flags & FAULT_FLAG_KILLABLE) {
684 int ret;
685
686 ret = __lock_page_killable(page);
687 if (ret) {
688 up_read(&mm->mmap_sem);
689 return 0;
690 }
691 } else
692 __lock_page(page);
693 return 1;
655 } 694 }
656} 695}
657 696
@@ -1528,15 +1567,17 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1528 /* If we don't want any read-ahead, don't bother */ 1567 /* If we don't want any read-ahead, don't bother */
1529 if (VM_RandomReadHint(vma)) 1568 if (VM_RandomReadHint(vma))
1530 return; 1569 return;
1570 if (!ra->ra_pages)
1571 return;
1531 1572
1532 if (VM_SequentialReadHint(vma) || 1573 if (VM_SequentialReadHint(vma)) {
1533 offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
1534 page_cache_sync_readahead(mapping, ra, file, offset, 1574 page_cache_sync_readahead(mapping, ra, file, offset,
1535 ra->ra_pages); 1575 ra->ra_pages);
1536 return; 1576 return;
1537 } 1577 }
1538 1578
1539 if (ra->mmap_miss < INT_MAX) 1579 /* Avoid banging the cache line if not needed */
1580 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1540 ra->mmap_miss++; 1581 ra->mmap_miss++;
1541 1582
1542 /* 1583 /*
@@ -1550,12 +1591,10 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1550 * mmap read-around 1591 * mmap read-around
1551 */ 1592 */
1552 ra_pages = max_sane_readahead(ra->ra_pages); 1593 ra_pages = max_sane_readahead(ra->ra_pages);
1553 if (ra_pages) { 1594 ra->start = max_t(long, 0, offset - ra_pages / 2);
1554 ra->start = max_t(long, 0, offset - ra_pages/2); 1595 ra->size = ra_pages;
1555 ra->size = ra_pages; 1596 ra->async_size = ra_pages / 4;
1556 ra->async_size = 0; 1597 ra_submit(ra, mapping, file);
1557 ra_submit(ra, mapping, file);
1558 }
1559} 1598}
1560 1599
1561/* 1600/*
@@ -1622,6 +1661,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1622 /* No page in the page cache at all */ 1661 /* No page in the page cache at all */
1623 do_sync_mmap_readahead(vma, ra, file, offset); 1662 do_sync_mmap_readahead(vma, ra, file, offset);
1624 count_vm_event(PGMAJFAULT); 1663 count_vm_event(PGMAJFAULT);
1664 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1625 ret = VM_FAULT_MAJOR; 1665 ret = VM_FAULT_MAJOR;
1626retry_find: 1666retry_find:
1627 page = find_get_page(mapping, offset); 1667 page = find_get_page(mapping, offset);
@@ -1660,7 +1700,6 @@ retry_find:
1660 return VM_FAULT_SIGBUS; 1700 return VM_FAULT_SIGBUS;
1661 } 1701 }
1662 1702
1663 ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
1664 vmf->page = page; 1703 vmf->page = page;
1665 return ret | VM_FAULT_LOCKED; 1704 return ret | VM_FAULT_LOCKED;
1666 1705
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 83364df74a33..93356cd12828 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -183,7 +183,7 @@ __xip_unmap (struct address_space * mapping,
183 return; 183 return;
184 184
185retry: 185retry:
186 spin_lock(&mapping->i_mmap_lock); 186 mutex_lock(&mapping->i_mmap_mutex);
187 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 187 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
188 mm = vma->vm_mm; 188 mm = vma->vm_mm;
189 address = vma->vm_start + 189 address = vma->vm_start +
@@ -201,7 +201,7 @@ retry:
201 page_cache_release(page); 201 page_cache_release(page);
202 } 202 }
203 } 203 }
204 spin_unlock(&mapping->i_mmap_lock); 204 mutex_unlock(&mapping->i_mmap_mutex);
205 205
206 if (locked) { 206 if (locked) {
207 mutex_unlock(&xip_sparse_mutex); 207 mutex_unlock(&xip_sparse_mutex);
diff --git a/mm/fremap.c b/mm/fremap.c
index ec520c7b28df..b8e0e2d468af 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -211,20 +211,20 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
211 } 211 }
212 goto out; 212 goto out;
213 } 213 }
214 spin_lock(&mapping->i_mmap_lock); 214 mutex_lock(&mapping->i_mmap_mutex);
215 flush_dcache_mmap_lock(mapping); 215 flush_dcache_mmap_lock(mapping);
216 vma->vm_flags |= VM_NONLINEAR; 216 vma->vm_flags |= VM_NONLINEAR;
217 vma_prio_tree_remove(vma, &mapping->i_mmap); 217 vma_prio_tree_remove(vma, &mapping->i_mmap);
218 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); 218 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
219 flush_dcache_mmap_unlock(mapping); 219 flush_dcache_mmap_unlock(mapping);
220 spin_unlock(&mapping->i_mmap_lock); 220 mutex_unlock(&mapping->i_mmap_mutex);
221 } 221 }
222 222
223 if (vma->vm_flags & VM_LOCKED) { 223 if (vma->vm_flags & VM_LOCKED) {
224 /* 224 /*
225 * drop PG_Mlocked flag for over-mapped range 225 * drop PG_Mlocked flag for over-mapped range
226 */ 226 */
227 unsigned int saved_flags = vma->vm_flags; 227 vm_flags_t saved_flags = vma->vm_flags;
228 munlock_vma_pages_range(vma, start, start + size); 228 munlock_vma_pages_range(vma, start, start + size);
229 vma->vm_flags = saved_flags; 229 vma->vm_flags = saved_flags;
230 } 230 }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 83326ad66d9b..615d9743a3cb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1139,7 +1139,7 @@ static int __split_huge_page_splitting(struct page *page,
1139 * We can't temporarily set the pmd to null in order 1139 * We can't temporarily set the pmd to null in order
1140 * to split it, the pmd must remain marked huge at all 1140 * to split it, the pmd must remain marked huge at all
1141 * times or the VM won't take the pmd_trans_huge paths 1141 * times or the VM won't take the pmd_trans_huge paths
1142 * and it won't wait on the anon_vma->root->lock to 1142 * and it won't wait on the anon_vma->root->mutex to
1143 * serialize against split_huge_page*. 1143 * serialize against split_huge_page*.
1144 */ 1144 */
1145 pmdp_splitting_flush_notify(vma, address, pmd); 1145 pmdp_splitting_flush_notify(vma, address, pmd);
@@ -1333,7 +1333,7 @@ static int __split_huge_page_map(struct page *page,
1333 return ret; 1333 return ret;
1334} 1334}
1335 1335
1336/* must be called with anon_vma->root->lock hold */ 1336/* must be called with anon_vma->root->mutex hold */
1337static void __split_huge_page(struct page *page, 1337static void __split_huge_page(struct page *page,
1338 struct anon_vma *anon_vma) 1338 struct anon_vma *anon_vma)
1339{ 1339{
@@ -1771,12 +1771,9 @@ static void collapse_huge_page(struct mm_struct *mm,
1771 1771
1772 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1772 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1773#ifndef CONFIG_NUMA 1773#ifndef CONFIG_NUMA
1774 up_read(&mm->mmap_sem);
1774 VM_BUG_ON(!*hpage); 1775 VM_BUG_ON(!*hpage);
1775 new_page = *hpage; 1776 new_page = *hpage;
1776 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1777 up_read(&mm->mmap_sem);
1778 return;
1779 }
1780#else 1777#else
1781 VM_BUG_ON(*hpage); 1778 VM_BUG_ON(*hpage);
1782 /* 1779 /*
@@ -1791,22 +1788,26 @@ static void collapse_huge_page(struct mm_struct *mm,
1791 */ 1788 */
1792 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 1789 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1793 node, __GFP_OTHER_NODE); 1790 node, __GFP_OTHER_NODE);
1791
1792 /*
1793 * After allocating the hugepage, release the mmap_sem read lock in
1794 * preparation for taking it in write mode.
1795 */
1796 up_read(&mm->mmap_sem);
1794 if (unlikely(!new_page)) { 1797 if (unlikely(!new_page)) {
1795 up_read(&mm->mmap_sem);
1796 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 1798 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1797 *hpage = ERR_PTR(-ENOMEM); 1799 *hpage = ERR_PTR(-ENOMEM);
1798 return; 1800 return;
1799 } 1801 }
1802#endif
1803
1800 count_vm_event(THP_COLLAPSE_ALLOC); 1804 count_vm_event(THP_COLLAPSE_ALLOC);
1801 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1805 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1802 up_read(&mm->mmap_sem); 1806#ifdef CONFIG_NUMA
1803 put_page(new_page); 1807 put_page(new_page);
1808#endif
1804 return; 1809 return;
1805 } 1810 }
1806#endif
1807
1808 /* after allocating the hugepage upgrade to mmap_sem write mode */
1809 up_read(&mm->mmap_sem);
1810 1811
1811 /* 1812 /*
1812 * Prevent all access to pagetables with the exception of 1813 * Prevent all access to pagetables with the exception of
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bbb4a5bbb958..f33bb319b73f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2205,7 +2205,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2205 unsigned long sz = huge_page_size(h); 2205 unsigned long sz = huge_page_size(h);
2206 2206
2207 /* 2207 /*
2208 * A page gathering list, protected by per file i_mmap_lock. The 2208 * A page gathering list, protected by per file i_mmap_mutex. The
2209 * lock is used to avoid list corruption from multiple unmapping 2209 * lock is used to avoid list corruption from multiple unmapping
2210 * of the same page since we are using page->lru. 2210 * of the same page since we are using page->lru.
2211 */ 2211 */
@@ -2274,9 +2274,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2274void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 2274void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2275 unsigned long end, struct page *ref_page) 2275 unsigned long end, struct page *ref_page)
2276{ 2276{
2277 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2277 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2278 __unmap_hugepage_range(vma, start, end, ref_page); 2278 __unmap_hugepage_range(vma, start, end, ref_page);
2279 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 2279 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2280} 2280}
2281 2281
2282/* 2282/*
@@ -2308,7 +2308,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2308 * this mapping should be shared between all the VMAs, 2308 * this mapping should be shared between all the VMAs,
2309 * __unmap_hugepage_range() is called as the lock is already held 2309 * __unmap_hugepage_range() is called as the lock is already held
2310 */ 2310 */
2311 spin_lock(&mapping->i_mmap_lock); 2311 mutex_lock(&mapping->i_mmap_mutex);
2312 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 2312 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2313 /* Do not unmap the current VMA */ 2313 /* Do not unmap the current VMA */
2314 if (iter_vma == vma) 2314 if (iter_vma == vma)
@@ -2326,7 +2326,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2326 address, address + huge_page_size(h), 2326 address, address + huge_page_size(h),
2327 page); 2327 page);
2328 } 2328 }
2329 spin_unlock(&mapping->i_mmap_lock); 2329 mutex_unlock(&mapping->i_mmap_mutex);
2330 2330
2331 return 1; 2331 return 1;
2332} 2332}
@@ -2810,7 +2810,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2810 BUG_ON(address >= end); 2810 BUG_ON(address >= end);
2811 flush_cache_range(vma, address, end); 2811 flush_cache_range(vma, address, end);
2812 2812
2813 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2813 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2814 spin_lock(&mm->page_table_lock); 2814 spin_lock(&mm->page_table_lock);
2815 for (; address < end; address += huge_page_size(h)) { 2815 for (; address < end; address += huge_page_size(h)) {
2816 ptep = huge_pte_offset(mm, address); 2816 ptep = huge_pte_offset(mm, address);
@@ -2825,7 +2825,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2825 } 2825 }
2826 } 2826 }
2827 spin_unlock(&mm->page_table_lock); 2827 spin_unlock(&mm->page_table_lock);
2828 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 2828 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2829 2829
2830 flush_tlb_range(vma, start, end); 2830 flush_tlb_range(vma, start, end);
2831} 2831}
@@ -2833,7 +2833,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2833int hugetlb_reserve_pages(struct inode *inode, 2833int hugetlb_reserve_pages(struct inode *inode,
2834 long from, long to, 2834 long from, long to,
2835 struct vm_area_struct *vma, 2835 struct vm_area_struct *vma,
2836 int acctflag) 2836 vm_flags_t vm_flags)
2837{ 2837{
2838 long ret, chg; 2838 long ret, chg;
2839 struct hstate *h = hstate_inode(inode); 2839 struct hstate *h = hstate_inode(inode);
@@ -2843,7 +2843,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2843 * attempt will be made for VM_NORESERVE to allocate a page 2843 * attempt will be made for VM_NORESERVE to allocate a page
2844 * and filesystem quota without using reserves 2844 * and filesystem quota without using reserves
2845 */ 2845 */
2846 if (acctflag & VM_NORESERVE) 2846 if (vm_flags & VM_NORESERVE)
2847 return 0; 2847 return 0;
2848 2848
2849 /* 2849 /*
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 1d29cdfe8ebb..4019979b2637 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -21,6 +21,5 @@ struct mm_struct init_mm = {
21 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), 21 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
22 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), 22 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
23 .mmlist = LIST_HEAD_INIT(init_mm.mmlist), 23 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
24 .cpu_vm_mask = CPU_MASK_ALL,
25 INIT_MM_CONTEXT(init_mm) 24 INIT_MM_CONTEXT(init_mm)
26}; 25};
diff --git a/mm/internal.h b/mm/internal.h
index 9d0ced8e505e..d071d380fb49 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -66,6 +66,10 @@ static inline unsigned long page_order(struct page *page)
66 return page_private(page); 66 return page_private(page);
67} 67}
68 68
69/* mm/util.c */
70void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71 struct vm_area_struct *prev, struct rb_node *rb_parent);
72
69#ifdef CONFIG_MMU 73#ifdef CONFIG_MMU
70extern long mlock_vma_pages_range(struct vm_area_struct *vma, 74extern long mlock_vma_pages_range(struct vm_area_struct *vma,
71 unsigned long start, unsigned long end); 75 unsigned long start, unsigned long end);
diff --git a/mm/ksm.c b/mm/ksm.c
index 942dfc73a2ff..d708b3ef2260 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -35,6 +35,7 @@
35#include <linux/ksm.h> 35#include <linux/ksm.h>
36#include <linux/hash.h> 36#include <linux/hash.h>
37#include <linux/freezer.h> 37#include <linux/freezer.h>
38#include <linux/oom.h>
38 39
39#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
40#include "internal.h" 41#include "internal.h"
@@ -1894,9 +1895,11 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1894 if (ksm_run != flags) { 1895 if (ksm_run != flags) {
1895 ksm_run = flags; 1896 ksm_run = flags;
1896 if (flags & KSM_RUN_UNMERGE) { 1897 if (flags & KSM_RUN_UNMERGE) {
1897 current->flags |= PF_OOM_ORIGIN; 1898 int oom_score_adj;
1899
1900 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1898 err = unmerge_and_remove_all_rmap_items(); 1901 err = unmerge_and_remove_all_rmap_items();
1899 current->flags &= ~PF_OOM_ORIGIN; 1902 test_set_oom_score_adj(oom_score_adj);
1900 if (err) { 1903 if (err) {
1901 ksm_run = KSM_RUN_STOP; 1904 ksm_run = KSM_RUN_STOP;
1902 count = err; 1905 count = err;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 010f9166fa6e..bd9052a5d3ad 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -94,6 +94,8 @@ enum mem_cgroup_events_index {
94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 94 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 95 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ 96 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
97 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
98 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
97 MEM_CGROUP_EVENTS_NSTATS, 99 MEM_CGROUP_EVENTS_NSTATS,
98}; 100};
99/* 101/*
@@ -231,6 +233,11 @@ struct mem_cgroup {
231 * reclaimed from. 233 * reclaimed from.
232 */ 234 */
233 int last_scanned_child; 235 int last_scanned_child;
236 int last_scanned_node;
237#if MAX_NUMNODES > 1
238 nodemask_t scan_nodes;
239 unsigned long next_scan_node_update;
240#endif
234 /* 241 /*
235 * Should the accounting and control be hierarchical, per subtree? 242 * Should the accounting and control be hierarchical, per subtree?
236 */ 243 */
@@ -585,6 +592,16 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
585 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); 592 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
586} 593}
587 594
595void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
596{
597 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
598}
599
600void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
601{
602 this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
603}
604
588static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, 605static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
589 enum mem_cgroup_events_index idx) 606 enum mem_cgroup_events_index idx)
590{ 607{
@@ -624,18 +641,27 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
624 preempt_enable(); 641 preempt_enable();
625} 642}
626 643
644static unsigned long
645mem_cgroup_get_zonestat_node(struct mem_cgroup *mem, int nid, enum lru_list idx)
646{
647 struct mem_cgroup_per_zone *mz;
648 u64 total = 0;
649 int zid;
650
651 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
652 mz = mem_cgroup_zoneinfo(mem, nid, zid);
653 total += MEM_CGROUP_ZSTAT(mz, idx);
654 }
655 return total;
656}
627static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, 657static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
628 enum lru_list idx) 658 enum lru_list idx)
629{ 659{
630 int nid, zid; 660 int nid;
631 struct mem_cgroup_per_zone *mz;
632 u64 total = 0; 661 u64 total = 0;
633 662
634 for_each_online_node(nid) 663 for_each_online_node(nid)
635 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 664 total += mem_cgroup_get_zonestat_node(mem, nid, idx);
636 mz = mem_cgroup_zoneinfo(mem, nid, zid);
637 total += MEM_CGROUP_ZSTAT(mz, idx);
638 }
639 return total; 665 return total;
640} 666}
641 667
@@ -813,6 +839,33 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
813 return (mem == root_mem_cgroup); 839 return (mem == root_mem_cgroup);
814} 840}
815 841
842void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
843{
844 struct mem_cgroup *mem;
845
846 if (!mm)
847 return;
848
849 rcu_read_lock();
850 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
851 if (unlikely(!mem))
852 goto out;
853
854 switch (idx) {
855 case PGMAJFAULT:
856 mem_cgroup_pgmajfault(mem, 1);
857 break;
858 case PGFAULT:
859 mem_cgroup_pgfault(mem, 1);
860 break;
861 default:
862 BUG();
863 }
864out:
865 rcu_read_unlock();
866}
867EXPORT_SYMBOL(mem_cgroup_count_vm_event);
868
816/* 869/*
817 * Following LRU functions are allowed to be used without PCG_LOCK. 870 * Following LRU functions are allowed to be used without PCG_LOCK.
818 * Operations are called by routine of global LRU independently from memcg. 871 * Operations are called by routine of global LRU independently from memcg.
@@ -1064,9 +1117,9 @@ int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
1064 return (active > inactive); 1117 return (active > inactive);
1065} 1118}
1066 1119
1067unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, 1120unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
1068 struct zone *zone, 1121 struct zone *zone,
1069 enum lru_list lru) 1122 enum lru_list lru)
1070{ 1123{
1071 int nid = zone_to_nid(zone); 1124 int nid = zone_to_nid(zone);
1072 int zid = zone_idx(zone); 1125 int zid = zone_idx(zone);
@@ -1075,6 +1128,93 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
1075 return MEM_CGROUP_ZSTAT(mz, lru); 1128 return MEM_CGROUP_ZSTAT(mz, lru);
1076} 1129}
1077 1130
1131#ifdef CONFIG_NUMA
1132static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
1133 int nid)
1134{
1135 unsigned long ret;
1136
1137 ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_FILE) +
1138 mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_FILE);
1139
1140 return ret;
1141}
1142
1143static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
1144{
1145 u64 total = 0;
1146 int nid;
1147
1148 for_each_node_state(nid, N_HIGH_MEMORY)
1149 total += mem_cgroup_node_nr_file_lru_pages(memcg, nid);
1150
1151 return total;
1152}
1153
1154static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
1155 int nid)
1156{
1157 unsigned long ret;
1158
1159 ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
1160 mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
1161
1162 return ret;
1163}
1164
1165static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg)
1166{
1167 u64 total = 0;
1168 int nid;
1169
1170 for_each_node_state(nid, N_HIGH_MEMORY)
1171 total += mem_cgroup_node_nr_anon_lru_pages(memcg, nid);
1172
1173 return total;
1174}
1175
1176static unsigned long
1177mem_cgroup_node_nr_unevictable_lru_pages(struct mem_cgroup *memcg, int nid)
1178{
1179 return mem_cgroup_get_zonestat_node(memcg, nid, LRU_UNEVICTABLE);
1180}
1181
1182static unsigned long
1183mem_cgroup_nr_unevictable_lru_pages(struct mem_cgroup *memcg)
1184{
1185 u64 total = 0;
1186 int nid;
1187
1188 for_each_node_state(nid, N_HIGH_MEMORY)
1189 total += mem_cgroup_node_nr_unevictable_lru_pages(memcg, nid);
1190
1191 return total;
1192}
1193
1194static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
1195 int nid)
1196{
1197 enum lru_list l;
1198 u64 total = 0;
1199
1200 for_each_lru(l)
1201 total += mem_cgroup_get_zonestat_node(memcg, nid, l);
1202
1203 return total;
1204}
1205
1206static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg)
1207{
1208 u64 total = 0;
1209 int nid;
1210
1211 for_each_node_state(nid, N_HIGH_MEMORY)
1212 total += mem_cgroup_node_nr_lru_pages(memcg, nid);
1213
1214 return total;
1215}
1216#endif /* CONFIG_NUMA */
1217
1078struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 1218struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1079 struct zone *zone) 1219 struct zone *zone)
1080{ 1220{
@@ -1418,6 +1558,81 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1418 return ret; 1558 return ret;
1419} 1559}
1420 1560
1561#if MAX_NUMNODES > 1
1562
1563/*
1564 * Always updating the nodemask is not very good - even if we have an empty
1565 * list or the wrong list here, we can start from some node and traverse all
1566 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1567 *
1568 */
1569static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
1570{
1571 int nid;
1572
1573 if (time_after(mem->next_scan_node_update, jiffies))
1574 return;
1575
1576 mem->next_scan_node_update = jiffies + 10*HZ;
1577 /* make a nodemask where this memcg uses memory from */
1578 mem->scan_nodes = node_states[N_HIGH_MEMORY];
1579
1580 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1581
1582 if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) ||
1583 mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE))
1584 continue;
1585
1586 if (total_swap_pages &&
1587 (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) ||
1588 mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON)))
1589 continue;
1590 node_clear(nid, mem->scan_nodes);
1591 }
1592}
1593
1594/*
1595 * Selecting a node where we start reclaim from. Because what we need is just
1596 * reducing usage counter, start from anywhere is O,K. Considering
1597 * memory reclaim from current node, there are pros. and cons.
1598 *
1599 * Freeing memory from current node means freeing memory from a node which
1600 * we'll use or we've used. So, it may make LRU bad. And if several threads
1601 * hit limits, it will see a contention on a node. But freeing from remote
1602 * node means more costs for memory reclaim because of memory latency.
1603 *
1604 * Now, we use round-robin. Better algorithm is welcomed.
1605 */
1606int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1607{
1608 int node;
1609
1610 mem_cgroup_may_update_nodemask(mem);
1611 node = mem->last_scanned_node;
1612
1613 node = next_node(node, mem->scan_nodes);
1614 if (node == MAX_NUMNODES)
1615 node = first_node(mem->scan_nodes);
1616 /*
1617 * We call this when we hit limit, not when pages are added to LRU.
1618 * No LRU may hold pages because all pages are UNEVICTABLE or
1619 * memcg is too small and all pages are not on LRU. In that case,
1620 * we use curret node.
1621 */
1622 if (unlikely(node == MAX_NUMNODES))
1623 node = numa_node_id();
1624
1625 mem->last_scanned_node = node;
1626 return node;
1627}
1628
1629#else
1630int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
1631{
1632 return 0;
1633}
1634#endif
1635
1421/* 1636/*
1422 * Scan the hierarchy if needed to reclaim memory. We remember the last child 1637 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1423 * we reclaimed from, so that we don't end up penalizing one child extensively 1638 * we reclaimed from, so that we don't end up penalizing one child extensively
@@ -1433,7 +1648,8 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1433static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, 1648static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1434 struct zone *zone, 1649 struct zone *zone,
1435 gfp_t gfp_mask, 1650 gfp_t gfp_mask,
1436 unsigned long reclaim_options) 1651 unsigned long reclaim_options,
1652 unsigned long *total_scanned)
1437{ 1653{
1438 struct mem_cgroup *victim; 1654 struct mem_cgroup *victim;
1439 int ret, total = 0; 1655 int ret, total = 0;
@@ -1442,6 +1658,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1442 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1658 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1443 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1659 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1444 unsigned long excess; 1660 unsigned long excess;
1661 unsigned long nr_scanned;
1445 1662
1446 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1447 1664
@@ -1484,10 +1701,12 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1484 continue; 1701 continue;
1485 } 1702 }
1486 /* we use swappiness of local cgroup */ 1703 /* we use swappiness of local cgroup */
1487 if (check_soft) 1704 if (check_soft) {
1488 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, 1705 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1489 noswap, get_swappiness(victim), zone); 1706 noswap, get_swappiness(victim), zone,
1490 else 1707 &nr_scanned);
1708 *total_scanned += nr_scanned;
1709 } else
1491 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, 1710 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1492 noswap, get_swappiness(victim)); 1711 noswap, get_swappiness(victim));
1493 css_put(&victim->css); 1712 css_put(&victim->css);
@@ -1503,7 +1722,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1503 if (!res_counter_soft_limit_excess(&root_mem->res)) 1722 if (!res_counter_soft_limit_excess(&root_mem->res))
1504 return total; 1723 return total;
1505 } else if (mem_cgroup_margin(root_mem)) 1724 } else if (mem_cgroup_margin(root_mem))
1506 return 1 + total; 1725 return total;
1507 } 1726 }
1508 return total; 1727 return total;
1509} 1728}
@@ -1928,7 +2147,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1928 return CHARGE_WOULDBLOCK; 2147 return CHARGE_WOULDBLOCK;
1929 2148
1930 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, 2149 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1931 gfp_mask, flags); 2150 gfp_mask, flags, NULL);
1932 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) 2151 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1933 return CHARGE_RETRY; 2152 return CHARGE_RETRY;
1934 /* 2153 /*
@@ -3211,7 +3430,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3211 break; 3430 break;
3212 3431
3213 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3432 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3214 MEM_CGROUP_RECLAIM_SHRINK); 3433 MEM_CGROUP_RECLAIM_SHRINK,
3434 NULL);
3215 curusage = res_counter_read_u64(&memcg->res, RES_USAGE); 3435 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3216 /* Usage is reduced ? */ 3436 /* Usage is reduced ? */
3217 if (curusage >= oldusage) 3437 if (curusage >= oldusage)
@@ -3271,7 +3491,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3271 3491
3272 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, 3492 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3273 MEM_CGROUP_RECLAIM_NOSWAP | 3493 MEM_CGROUP_RECLAIM_NOSWAP |
3274 MEM_CGROUP_RECLAIM_SHRINK); 3494 MEM_CGROUP_RECLAIM_SHRINK,
3495 NULL);
3275 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); 3496 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3276 /* Usage is reduced ? */ 3497 /* Usage is reduced ? */
3277 if (curusage >= oldusage) 3498 if (curusage >= oldusage)
@@ -3285,7 +3506,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3285} 3506}
3286 3507
3287unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 3508unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3288 gfp_t gfp_mask) 3509 gfp_t gfp_mask,
3510 unsigned long *total_scanned)
3289{ 3511{
3290 unsigned long nr_reclaimed = 0; 3512 unsigned long nr_reclaimed = 0;
3291 struct mem_cgroup_per_zone *mz, *next_mz = NULL; 3513 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
@@ -3293,6 +3515,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3293 int loop = 0; 3515 int loop = 0;
3294 struct mem_cgroup_tree_per_zone *mctz; 3516 struct mem_cgroup_tree_per_zone *mctz;
3295 unsigned long long excess; 3517 unsigned long long excess;
3518 unsigned long nr_scanned;
3296 3519
3297 if (order > 0) 3520 if (order > 0)
3298 return 0; 3521 return 0;
@@ -3311,10 +3534,13 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3311 if (!mz) 3534 if (!mz)
3312 break; 3535 break;
3313 3536
3537 nr_scanned = 0;
3314 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, 3538 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3315 gfp_mask, 3539 gfp_mask,
3316 MEM_CGROUP_RECLAIM_SOFT); 3540 MEM_CGROUP_RECLAIM_SOFT,
3541 &nr_scanned);
3317 nr_reclaimed += reclaimed; 3542 nr_reclaimed += reclaimed;
3543 *total_scanned += nr_scanned;
3318 spin_lock(&mctz->lock); 3544 spin_lock(&mctz->lock);
3319 3545
3320 /* 3546 /*
@@ -3337,10 +3563,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3337 */ 3563 */
3338 next_mz = 3564 next_mz =
3339 __mem_cgroup_largest_soft_limit_node(mctz); 3565 __mem_cgroup_largest_soft_limit_node(mctz);
3340 if (next_mz == mz) { 3566 if (next_mz == mz)
3341 css_put(&next_mz->mem->css); 3567 css_put(&next_mz->mem->css);
3342 next_mz = NULL; 3568 else /* next_mz == NULL or other memcg */
3343 } else /* next_mz == NULL or other memcg */
3344 break; 3569 break;
3345 } while (1); 3570 } while (1);
3346 } 3571 }
@@ -3772,6 +3997,8 @@ enum {
3772 MCS_PGPGIN, 3997 MCS_PGPGIN,
3773 MCS_PGPGOUT, 3998 MCS_PGPGOUT,
3774 MCS_SWAP, 3999 MCS_SWAP,
4000 MCS_PGFAULT,
4001 MCS_PGMAJFAULT,
3775 MCS_INACTIVE_ANON, 4002 MCS_INACTIVE_ANON,
3776 MCS_ACTIVE_ANON, 4003 MCS_ACTIVE_ANON,
3777 MCS_INACTIVE_FILE, 4004 MCS_INACTIVE_FILE,
@@ -3794,6 +4021,8 @@ struct {
3794 {"pgpgin", "total_pgpgin"}, 4021 {"pgpgin", "total_pgpgin"},
3795 {"pgpgout", "total_pgpgout"}, 4022 {"pgpgout", "total_pgpgout"},
3796 {"swap", "total_swap"}, 4023 {"swap", "total_swap"},
4024 {"pgfault", "total_pgfault"},
4025 {"pgmajfault", "total_pgmajfault"},
3797 {"inactive_anon", "total_inactive_anon"}, 4026 {"inactive_anon", "total_inactive_anon"},
3798 {"active_anon", "total_active_anon"}, 4027 {"active_anon", "total_active_anon"},
3799 {"inactive_file", "total_inactive_file"}, 4028 {"inactive_file", "total_inactive_file"},
@@ -3822,6 +4051,10 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3822 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); 4051 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3823 s->stat[MCS_SWAP] += val * PAGE_SIZE; 4052 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3824 } 4053 }
4054 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
4055 s->stat[MCS_PGFAULT] += val;
4056 val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
4057 s->stat[MCS_PGMAJFAULT] += val;
3825 4058
3826 /* per zone stat */ 4059 /* per zone stat */
3827 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON); 4060 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
@@ -3845,6 +4078,51 @@ mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3845 mem_cgroup_get_local_stat(iter, s); 4078 mem_cgroup_get_local_stat(iter, s);
3846} 4079}
3847 4080
4081#ifdef CONFIG_NUMA
4082static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4083{
4084 int nid;
4085 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4086 unsigned long node_nr;
4087 struct cgroup *cont = m->private;
4088 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4089
4090 total_nr = mem_cgroup_nr_lru_pages(mem_cont);
4091 seq_printf(m, "total=%lu", total_nr);
4092 for_each_node_state(nid, N_HIGH_MEMORY) {
4093 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid);
4094 seq_printf(m, " N%d=%lu", nid, node_nr);
4095 }
4096 seq_putc(m, '\n');
4097
4098 file_nr = mem_cgroup_nr_file_lru_pages(mem_cont);
4099 seq_printf(m, "file=%lu", file_nr);
4100 for_each_node_state(nid, N_HIGH_MEMORY) {
4101 node_nr = mem_cgroup_node_nr_file_lru_pages(mem_cont, nid);
4102 seq_printf(m, " N%d=%lu", nid, node_nr);
4103 }
4104 seq_putc(m, '\n');
4105
4106 anon_nr = mem_cgroup_nr_anon_lru_pages(mem_cont);
4107 seq_printf(m, "anon=%lu", anon_nr);
4108 for_each_node_state(nid, N_HIGH_MEMORY) {
4109 node_nr = mem_cgroup_node_nr_anon_lru_pages(mem_cont, nid);
4110 seq_printf(m, " N%d=%lu", nid, node_nr);
4111 }
4112 seq_putc(m, '\n');
4113
4114 unevictable_nr = mem_cgroup_nr_unevictable_lru_pages(mem_cont);
4115 seq_printf(m, "unevictable=%lu", unevictable_nr);
4116 for_each_node_state(nid, N_HIGH_MEMORY) {
4117 node_nr = mem_cgroup_node_nr_unevictable_lru_pages(mem_cont,
4118 nid);
4119 seq_printf(m, " N%d=%lu", nid, node_nr);
4120 }
4121 seq_putc(m, '\n');
4122 return 0;
4123}
4124#endif /* CONFIG_NUMA */
4125
3848static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4126static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3849 struct cgroup_map_cb *cb) 4127 struct cgroup_map_cb *cb)
3850{ 4128{
@@ -3855,6 +4133,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3855 memset(&mystat, 0, sizeof(mystat)); 4133 memset(&mystat, 0, sizeof(mystat));
3856 mem_cgroup_get_local_stat(mem_cont, &mystat); 4134 mem_cgroup_get_local_stat(mem_cont, &mystat);
3857 4135
4136
3858 for (i = 0; i < NR_MCS_STAT; i++) { 4137 for (i = 0; i < NR_MCS_STAT; i++) {
3859 if (i == MCS_SWAP && !do_swap_account) 4138 if (i == MCS_SWAP && !do_swap_account)
3860 continue; 4139 continue;
@@ -4278,6 +4557,22 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4278 return 0; 4557 return 0;
4279} 4558}
4280 4559
4560#ifdef CONFIG_NUMA
4561static const struct file_operations mem_control_numa_stat_file_operations = {
4562 .read = seq_read,
4563 .llseek = seq_lseek,
4564 .release = single_release,
4565};
4566
4567static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4568{
4569 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4570
4571 file->f_op = &mem_control_numa_stat_file_operations;
4572 return single_open(file, mem_control_numa_stat_show, cont);
4573}
4574#endif /* CONFIG_NUMA */
4575
4281static struct cftype mem_cgroup_files[] = { 4576static struct cftype mem_cgroup_files[] = {
4282 { 4577 {
4283 .name = "usage_in_bytes", 4578 .name = "usage_in_bytes",
@@ -4341,6 +4636,12 @@ static struct cftype mem_cgroup_files[] = {
4341 .unregister_event = mem_cgroup_oom_unregister_event, 4636 .unregister_event = mem_cgroup_oom_unregister_event,
4342 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), 4637 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4343 }, 4638 },
4639#ifdef CONFIG_NUMA
4640 {
4641 .name = "numa_stat",
4642 .open = mem_control_numa_stat_open,
4643 },
4644#endif
4344}; 4645};
4345 4646
4346#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4647#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4596,6 +4897,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4596 res_counter_init(&mem->memsw, NULL); 4897 res_counter_init(&mem->memsw, NULL);
4597 } 4898 }
4598 mem->last_scanned_child = 0; 4899 mem->last_scanned_child = 0;
4900 mem->last_scanned_node = MAX_NUMNODES;
4599 INIT_LIST_HEAD(&mem->oom_notify); 4901 INIT_LIST_HEAD(&mem->oom_notify);
4600 4902
4601 if (parent) 4903 if (parent)
@@ -4953,8 +5255,7 @@ static void mem_cgroup_clear_mc(void)
4953 5255
4954static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5256static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4955 struct cgroup *cgroup, 5257 struct cgroup *cgroup,
4956 struct task_struct *p, 5258 struct task_struct *p)
4957 bool threadgroup)
4958{ 5259{
4959 int ret = 0; 5260 int ret = 0;
4960 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); 5261 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
@@ -4993,8 +5294,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4993 5294
4994static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5295static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4995 struct cgroup *cgroup, 5296 struct cgroup *cgroup,
4996 struct task_struct *p, 5297 struct task_struct *p)
4997 bool threadgroup)
4998{ 5298{
4999 mem_cgroup_clear_mc(); 5299 mem_cgroup_clear_mc();
5000} 5300}
@@ -5112,8 +5412,7 @@ retry:
5112static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5412static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5113 struct cgroup *cont, 5413 struct cgroup *cont,
5114 struct cgroup *old_cont, 5414 struct cgroup *old_cont,
5115 struct task_struct *p, 5415 struct task_struct *p)
5116 bool threadgroup)
5117{ 5416{
5118 struct mm_struct *mm; 5417 struct mm_struct *mm;
5119 5418
@@ -5131,22 +5430,19 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5131#else /* !CONFIG_MMU */ 5430#else /* !CONFIG_MMU */
5132static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5431static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5133 struct cgroup *cgroup, 5432 struct cgroup *cgroup,
5134 struct task_struct *p, 5433 struct task_struct *p)
5135 bool threadgroup)
5136{ 5434{
5137 return 0; 5435 return 0;
5138} 5436}
5139static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, 5437static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5140 struct cgroup *cgroup, 5438 struct cgroup *cgroup,
5141 struct task_struct *p, 5439 struct task_struct *p)
5142 bool threadgroup)
5143{ 5440{
5144} 5441}
5145static void mem_cgroup_move_task(struct cgroup_subsys *ss, 5442static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5146 struct cgroup *cont, 5443 struct cgroup *cont,
5147 struct cgroup *old_cont, 5444 struct cgroup *old_cont,
5148 struct task_struct *p, 5445 struct task_struct *p)
5149 bool threadgroup)
5150{ 5446{
5151} 5447}
5152#endif 5448#endif
@@ -5169,19 +5465,12 @@ struct cgroup_subsys mem_cgroup_subsys = {
5169static int __init enable_swap_account(char *s) 5465static int __init enable_swap_account(char *s)
5170{ 5466{
5171 /* consider enabled if no parameter or 1 is given */ 5467 /* consider enabled if no parameter or 1 is given */
5172 if (!(*s) || !strcmp(s, "=1")) 5468 if (!strcmp(s, "1"))
5173 really_do_swap_account = 1; 5469 really_do_swap_account = 1;
5174 else if (!strcmp(s, "=0")) 5470 else if (!strcmp(s, "0"))
5175 really_do_swap_account = 0; 5471 really_do_swap_account = 0;
5176 return 1; 5472 return 1;
5177} 5473}
5178__setup("swapaccount", enable_swap_account); 5474__setup("swapaccount=", enable_swap_account);
5179 5475
5180static int __init disable_swap_account(char *s)
5181{
5182 printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
5183 enable_swap_account("=0");
5184 return 1;
5185}
5186__setup("noswapaccount", disable_swap_account);
5187#endif 5476#endif
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2b9a5eef39e0..5c8f7e08928d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -239,7 +239,11 @@ void shake_page(struct page *p, int access)
239 if (access) { 239 if (access) {
240 int nr; 240 int nr;
241 do { 241 do {
242 nr = shrink_slab(1000, GFP_KERNEL, 1000); 242 struct shrink_control shrink = {
243 .gfp_mask = GFP_KERNEL,
244 };
245
246 nr = shrink_slab(&shrink, 1000, 1000);
243 if (page_count(p) == 1) 247 if (page_count(p) == 1)
244 break; 248 break;
245 } while (nr > 10); 249 } while (nr > 10);
@@ -429,7 +433,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
429 */ 433 */
430 434
431 read_lock(&tasklist_lock); 435 read_lock(&tasklist_lock);
432 spin_lock(&mapping->i_mmap_lock); 436 mutex_lock(&mapping->i_mmap_mutex);
433 for_each_process(tsk) { 437 for_each_process(tsk) {
434 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 438 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
435 439
@@ -449,7 +453,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
449 add_to_kill(tsk, page, vma, to_kill, tkc); 453 add_to_kill(tsk, page, vma, to_kill, tkc);
450 } 454 }
451 } 455 }
452 spin_unlock(&mapping->i_mmap_lock); 456 mutex_unlock(&mapping->i_mmap_mutex);
453 read_unlock(&tasklist_lock); 457 read_unlock(&tasklist_lock);
454} 458}
455 459
@@ -1440,16 +1444,12 @@ int soft_offline_page(struct page *page, int flags)
1440 */ 1444 */
1441 ret = invalidate_inode_page(page); 1445 ret = invalidate_inode_page(page);
1442 unlock_page(page); 1446 unlock_page(page);
1443
1444 /* 1447 /*
1445 * Drop count because page migration doesn't like raised
1446 * counts. The page could get re-allocated, but if it becomes
1447 * LRU the isolation will just fail.
1448 * RED-PEN would be better to keep it isolated here, but we 1448 * RED-PEN would be better to keep it isolated here, but we
1449 * would need to fix isolation locking first. 1449 * would need to fix isolation locking first.
1450 */ 1450 */
1451 put_page(page);
1452 if (ret == 1) { 1451 if (ret == 1) {
1452 put_page(page);
1453 ret = 0; 1453 ret = 0;
1454 pr_info("soft_offline: %#lx: invalidated\n", pfn); 1454 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1455 goto done; 1455 goto done;
@@ -1461,6 +1461,11 @@ int soft_offline_page(struct page *page, int flags)
1461 * handles a large number of cases for us. 1461 * handles a large number of cases for us.
1462 */ 1462 */
1463 ret = isolate_lru_page(page); 1463 ret = isolate_lru_page(page);
1464 /*
1465 * Drop page reference which is came from get_any_page()
1466 * successful isolate_lru_page() already took another one.
1467 */
1468 put_page(page);
1464 if (!ret) { 1469 if (!ret) {
1465 LIST_HEAD(pagelist); 1470 LIST_HEAD(pagelist);
1466 1471
diff --git a/mm/memory.c b/mm/memory.c
index 61e66f026563..6953d3926e01 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -182,7 +182,7 @@ void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
182{ 182{
183 __sync_task_rss_stat(task, mm); 183 __sync_task_rss_stat(task, mm);
184} 184}
185#else 185#else /* SPLIT_RSS_COUNTING */
186 186
187#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) 187#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
188#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) 188#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
@@ -191,8 +191,205 @@ static void check_sync_rss_stat(struct task_struct *task)
191{ 191{
192} 192}
193 193
194#endif /* SPLIT_RSS_COUNTING */
195
196#ifdef HAVE_GENERIC_MMU_GATHER
197
198static int tlb_next_batch(struct mmu_gather *tlb)
199{
200 struct mmu_gather_batch *batch;
201
202 batch = tlb->active;
203 if (batch->next) {
204 tlb->active = batch->next;
205 return 1;
206 }
207
208 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
209 if (!batch)
210 return 0;
211
212 batch->next = NULL;
213 batch->nr = 0;
214 batch->max = MAX_GATHER_BATCH;
215
216 tlb->active->next = batch;
217 tlb->active = batch;
218
219 return 1;
220}
221
222/* tlb_gather_mmu
223 * Called to initialize an (on-stack) mmu_gather structure for page-table
224 * tear-down from @mm. The @fullmm argument is used when @mm is without
225 * users and we're going to destroy the full address space (exit/execve).
226 */
227void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
228{
229 tlb->mm = mm;
230
231 tlb->fullmm = fullmm;
232 tlb->need_flush = 0;
233 tlb->fast_mode = (num_possible_cpus() == 1);
234 tlb->local.next = NULL;
235 tlb->local.nr = 0;
236 tlb->local.max = ARRAY_SIZE(tlb->__pages);
237 tlb->active = &tlb->local;
238
239#ifdef CONFIG_HAVE_RCU_TABLE_FREE
240 tlb->batch = NULL;
241#endif
242}
243
244void tlb_flush_mmu(struct mmu_gather *tlb)
245{
246 struct mmu_gather_batch *batch;
247
248 if (!tlb->need_flush)
249 return;
250 tlb->need_flush = 0;
251 tlb_flush(tlb);
252#ifdef CONFIG_HAVE_RCU_TABLE_FREE
253 tlb_table_flush(tlb);
194#endif 254#endif
195 255
256 if (tlb_fast_mode(tlb))
257 return;
258
259 for (batch = &tlb->local; batch; batch = batch->next) {
260 free_pages_and_swap_cache(batch->pages, batch->nr);
261 batch->nr = 0;
262 }
263 tlb->active = &tlb->local;
264}
265
266/* tlb_finish_mmu
267 * Called at the end of the shootdown operation to free up any resources
268 * that were required.
269 */
270void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
271{
272 struct mmu_gather_batch *batch, *next;
273
274 tlb_flush_mmu(tlb);
275
276 /* keep the page table cache within bounds */
277 check_pgt_cache();
278
279 for (batch = tlb->local.next; batch; batch = next) {
280 next = batch->next;
281 free_pages((unsigned long)batch, 0);
282 }
283 tlb->local.next = NULL;
284}
285
286/* __tlb_remove_page
287 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
288 * handling the additional races in SMP caused by other CPUs caching valid
289 * mappings in their TLBs. Returns the number of free page slots left.
290 * When out of page slots we must call tlb_flush_mmu().
291 */
292int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
293{
294 struct mmu_gather_batch *batch;
295
296 tlb->need_flush = 1;
297
298 if (tlb_fast_mode(tlb)) {
299 free_page_and_swap_cache(page);
300 return 1; /* avoid calling tlb_flush_mmu() */
301 }
302
303 batch = tlb->active;
304 batch->pages[batch->nr++] = page;
305 if (batch->nr == batch->max) {
306 if (!tlb_next_batch(tlb))
307 return 0;
308 }
309 VM_BUG_ON(batch->nr > batch->max);
310
311 return batch->max - batch->nr;
312}
313
314#endif /* HAVE_GENERIC_MMU_GATHER */
315
316#ifdef CONFIG_HAVE_RCU_TABLE_FREE
317
318/*
319 * See the comment near struct mmu_table_batch.
320 */
321
322static void tlb_remove_table_smp_sync(void *arg)
323{
324 /* Simply deliver the interrupt */
325}
326
327static void tlb_remove_table_one(void *table)
328{
329 /*
330 * This isn't an RCU grace period and hence the page-tables cannot be
331 * assumed to be actually RCU-freed.
332 *
333 * It is however sufficient for software page-table walkers that rely on
334 * IRQ disabling. See the comment near struct mmu_table_batch.
335 */
336 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
337 __tlb_remove_table(table);
338}
339
340static void tlb_remove_table_rcu(struct rcu_head *head)
341{
342 struct mmu_table_batch *batch;
343 int i;
344
345 batch = container_of(head, struct mmu_table_batch, rcu);
346
347 for (i = 0; i < batch->nr; i++)
348 __tlb_remove_table(batch->tables[i]);
349
350 free_page((unsigned long)batch);
351}
352
353void tlb_table_flush(struct mmu_gather *tlb)
354{
355 struct mmu_table_batch **batch = &tlb->batch;
356
357 if (*batch) {
358 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
359 *batch = NULL;
360 }
361}
362
363void tlb_remove_table(struct mmu_gather *tlb, void *table)
364{
365 struct mmu_table_batch **batch = &tlb->batch;
366
367 tlb->need_flush = 1;
368
369 /*
370 * When there's less then two users of this mm there cannot be a
371 * concurrent page-table walk.
372 */
373 if (atomic_read(&tlb->mm->mm_users) < 2) {
374 __tlb_remove_table(table);
375 return;
376 }
377
378 if (*batch == NULL) {
379 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
380 if (*batch == NULL) {
381 tlb_remove_table_one(table);
382 return;
383 }
384 (*batch)->nr = 0;
385 }
386 (*batch)->tables[(*batch)->nr++] = table;
387 if ((*batch)->nr == MAX_TABLE_BATCH)
388 tlb_table_flush(tlb);
389}
390
391#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
392
196/* 393/*
197 * If a p?d_bad entry is found while walking page tables, report 394 * If a p?d_bad entry is found while walking page tables, report
198 * the error, before resetting entry to p?d_none. Usually (but 395 * the error, before resetting entry to p?d_none. Usually (but
@@ -533,7 +730,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
533 add_taint(TAINT_BAD_PAGE); 730 add_taint(TAINT_BAD_PAGE);
534} 731}
535 732
536static inline int is_cow_mapping(unsigned int flags) 733static inline int is_cow_mapping(vm_flags_t flags)
537{ 734{
538 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 735 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
539} 736}
@@ -909,26 +1106,24 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
909static unsigned long zap_pte_range(struct mmu_gather *tlb, 1106static unsigned long zap_pte_range(struct mmu_gather *tlb,
910 struct vm_area_struct *vma, pmd_t *pmd, 1107 struct vm_area_struct *vma, pmd_t *pmd,
911 unsigned long addr, unsigned long end, 1108 unsigned long addr, unsigned long end,
912 long *zap_work, struct zap_details *details) 1109 struct zap_details *details)
913{ 1110{
914 struct mm_struct *mm = tlb->mm; 1111 struct mm_struct *mm = tlb->mm;
915 pte_t *pte; 1112 int force_flush = 0;
916 spinlock_t *ptl;
917 int rss[NR_MM_COUNTERS]; 1113 int rss[NR_MM_COUNTERS];
1114 spinlock_t *ptl;
1115 pte_t *pte;
918 1116
1117again:
919 init_rss_vec(rss); 1118 init_rss_vec(rss);
920
921 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1119 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
922 arch_enter_lazy_mmu_mode(); 1120 arch_enter_lazy_mmu_mode();
923 do { 1121 do {
924 pte_t ptent = *pte; 1122 pte_t ptent = *pte;
925 if (pte_none(ptent)) { 1123 if (pte_none(ptent)) {
926 (*zap_work)--;
927 continue; 1124 continue;
928 } 1125 }
929 1126
930 (*zap_work) -= PAGE_SIZE;
931
932 if (pte_present(ptent)) { 1127 if (pte_present(ptent)) {
933 struct page *page; 1128 struct page *page;
934 1129
@@ -974,7 +1169,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
974 page_remove_rmap(page); 1169 page_remove_rmap(page);
975 if (unlikely(page_mapcount(page) < 0)) 1170 if (unlikely(page_mapcount(page) < 0))
976 print_bad_pte(vma, addr, ptent, page); 1171 print_bad_pte(vma, addr, ptent, page);
977 tlb_remove_page(tlb, page); 1172 force_flush = !__tlb_remove_page(tlb, page);
1173 if (force_flush)
1174 break;
978 continue; 1175 continue;
979 } 1176 }
980 /* 1177 /*
@@ -995,19 +1192,31 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
995 print_bad_pte(vma, addr, ptent, NULL); 1192 print_bad_pte(vma, addr, ptent, NULL);
996 } 1193 }
997 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 1194 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
998 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 1195 } while (pte++, addr += PAGE_SIZE, addr != end);
999 1196
1000 add_mm_rss_vec(mm, rss); 1197 add_mm_rss_vec(mm, rss);
1001 arch_leave_lazy_mmu_mode(); 1198 arch_leave_lazy_mmu_mode();
1002 pte_unmap_unlock(pte - 1, ptl); 1199 pte_unmap_unlock(pte - 1, ptl);
1003 1200
1201 /*
1202 * mmu_gather ran out of room to batch pages, we break out of
1203 * the PTE lock to avoid doing the potential expensive TLB invalidate
1204 * and page-free while holding it.
1205 */
1206 if (force_flush) {
1207 force_flush = 0;
1208 tlb_flush_mmu(tlb);
1209 if (addr != end)
1210 goto again;
1211 }
1212
1004 return addr; 1213 return addr;
1005} 1214}
1006 1215
1007static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1216static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1008 struct vm_area_struct *vma, pud_t *pud, 1217 struct vm_area_struct *vma, pud_t *pud,
1009 unsigned long addr, unsigned long end, 1218 unsigned long addr, unsigned long end,
1010 long *zap_work, struct zap_details *details) 1219 struct zap_details *details)
1011{ 1220{
1012 pmd_t *pmd; 1221 pmd_t *pmd;
1013 unsigned long next; 1222 unsigned long next;
@@ -1019,19 +1228,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1019 if (next-addr != HPAGE_PMD_SIZE) { 1228 if (next-addr != HPAGE_PMD_SIZE) {
1020 VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); 1229 VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
1021 split_huge_page_pmd(vma->vm_mm, pmd); 1230 split_huge_page_pmd(vma->vm_mm, pmd);
1022 } else if (zap_huge_pmd(tlb, vma, pmd)) { 1231 } else if (zap_huge_pmd(tlb, vma, pmd))
1023 (*zap_work)--;
1024 continue; 1232 continue;
1025 }
1026 /* fall through */ 1233 /* fall through */
1027 } 1234 }
1028 if (pmd_none_or_clear_bad(pmd)) { 1235 if (pmd_none_or_clear_bad(pmd))
1029 (*zap_work)--;
1030 continue; 1236 continue;
1031 } 1237 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1032 next = zap_pte_range(tlb, vma, pmd, addr, next, 1238 cond_resched();
1033 zap_work, details); 1239 } while (pmd++, addr = next, addr != end);
1034 } while (pmd++, addr = next, (addr != end && *zap_work > 0));
1035 1240
1036 return addr; 1241 return addr;
1037} 1242}
@@ -1039,7 +1244,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1039static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1244static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1040 struct vm_area_struct *vma, pgd_t *pgd, 1245 struct vm_area_struct *vma, pgd_t *pgd,
1041 unsigned long addr, unsigned long end, 1246 unsigned long addr, unsigned long end,
1042 long *zap_work, struct zap_details *details) 1247 struct zap_details *details)
1043{ 1248{
1044 pud_t *pud; 1249 pud_t *pud;
1045 unsigned long next; 1250 unsigned long next;
@@ -1047,13 +1252,10 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1047 pud = pud_offset(pgd, addr); 1252 pud = pud_offset(pgd, addr);
1048 do { 1253 do {
1049 next = pud_addr_end(addr, end); 1254 next = pud_addr_end(addr, end);
1050 if (pud_none_or_clear_bad(pud)) { 1255 if (pud_none_or_clear_bad(pud))
1051 (*zap_work)--;
1052 continue; 1256 continue;
1053 } 1257 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1054 next = zap_pmd_range(tlb, vma, pud, addr, next, 1258 } while (pud++, addr = next, addr != end);
1055 zap_work, details);
1056 } while (pud++, addr = next, (addr != end && *zap_work > 0));
1057 1259
1058 return addr; 1260 return addr;
1059} 1261}
@@ -1061,7 +1263,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1061static unsigned long unmap_page_range(struct mmu_gather *tlb, 1263static unsigned long unmap_page_range(struct mmu_gather *tlb,
1062 struct vm_area_struct *vma, 1264 struct vm_area_struct *vma,
1063 unsigned long addr, unsigned long end, 1265 unsigned long addr, unsigned long end,
1064 long *zap_work, struct zap_details *details) 1266 struct zap_details *details)
1065{ 1267{
1066 pgd_t *pgd; 1268 pgd_t *pgd;
1067 unsigned long next; 1269 unsigned long next;
@@ -1075,13 +1277,10 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
1075 pgd = pgd_offset(vma->vm_mm, addr); 1277 pgd = pgd_offset(vma->vm_mm, addr);
1076 do { 1278 do {
1077 next = pgd_addr_end(addr, end); 1279 next = pgd_addr_end(addr, end);
1078 if (pgd_none_or_clear_bad(pgd)) { 1280 if (pgd_none_or_clear_bad(pgd))
1079 (*zap_work)--;
1080 continue; 1281 continue;
1081 } 1282 next = zap_pud_range(tlb, vma, pgd, addr, next, details);
1082 next = zap_pud_range(tlb, vma, pgd, addr, next, 1283 } while (pgd++, addr = next, addr != end);
1083 zap_work, details);
1084 } while (pgd++, addr = next, (addr != end && *zap_work > 0));
1085 tlb_end_vma(tlb, vma); 1284 tlb_end_vma(tlb, vma);
1086 mem_cgroup_uncharge_end(); 1285 mem_cgroup_uncharge_end();
1087 1286
@@ -1121,17 +1320,12 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
1121 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1320 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1122 * drops the lock and schedules. 1321 * drops the lock and schedules.
1123 */ 1322 */
1124unsigned long unmap_vmas(struct mmu_gather **tlbp, 1323unsigned long unmap_vmas(struct mmu_gather *tlb,
1125 struct vm_area_struct *vma, unsigned long start_addr, 1324 struct vm_area_struct *vma, unsigned long start_addr,
1126 unsigned long end_addr, unsigned long *nr_accounted, 1325 unsigned long end_addr, unsigned long *nr_accounted,
1127 struct zap_details *details) 1326 struct zap_details *details)
1128{ 1327{
1129 long zap_work = ZAP_BLOCK_SIZE;
1130 unsigned long tlb_start = 0; /* For tlb_finish_mmu */
1131 int tlb_start_valid = 0;
1132 unsigned long start = start_addr; 1328 unsigned long start = start_addr;
1133 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
1134 int fullmm = (*tlbp)->fullmm;
1135 struct mm_struct *mm = vma->vm_mm; 1329 struct mm_struct *mm = vma->vm_mm;
1136 1330
1137 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1331 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
@@ -1152,11 +1346,6 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
1152 untrack_pfn_vma(vma, 0, 0); 1346 untrack_pfn_vma(vma, 0, 0);
1153 1347
1154 while (start != end) { 1348 while (start != end) {
1155 if (!tlb_start_valid) {
1156 tlb_start = start;
1157 tlb_start_valid = 1;
1158 }
1159
1160 if (unlikely(is_vm_hugetlb_page(vma))) { 1349 if (unlikely(is_vm_hugetlb_page(vma))) {
1161 /* 1350 /*
1162 * It is undesirable to test vma->vm_file as it 1351 * It is undesirable to test vma->vm_file as it
@@ -1169,39 +1358,15 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
1169 * Since no pte has actually been setup, it is 1358 * Since no pte has actually been setup, it is
1170 * safe to do nothing in this case. 1359 * safe to do nothing in this case.
1171 */ 1360 */
1172 if (vma->vm_file) { 1361 if (vma->vm_file)
1173 unmap_hugepage_range(vma, start, end, NULL); 1362 unmap_hugepage_range(vma, start, end, NULL);
1174 zap_work -= (end - start) /
1175 pages_per_huge_page(hstate_vma(vma));
1176 }
1177 1363
1178 start = end; 1364 start = end;
1179 } else 1365 } else
1180 start = unmap_page_range(*tlbp, vma, 1366 start = unmap_page_range(tlb, vma, start, end, details);
1181 start, end, &zap_work, details);
1182
1183 if (zap_work > 0) {
1184 BUG_ON(start != end);
1185 break;
1186 }
1187
1188 tlb_finish_mmu(*tlbp, tlb_start, start);
1189
1190 if (need_resched() ||
1191 (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
1192 if (i_mmap_lock) {
1193 *tlbp = NULL;
1194 goto out;
1195 }
1196 cond_resched();
1197 }
1198
1199 *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
1200 tlb_start_valid = 0;
1201 zap_work = ZAP_BLOCK_SIZE;
1202 } 1367 }
1203 } 1368 }
1204out: 1369
1205 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1370 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1206 return start; /* which is now the end (or restart) address */ 1371 return start; /* which is now the end (or restart) address */
1207} 1372}
@@ -1217,16 +1382,15 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
1217 unsigned long size, struct zap_details *details) 1382 unsigned long size, struct zap_details *details)
1218{ 1383{
1219 struct mm_struct *mm = vma->vm_mm; 1384 struct mm_struct *mm = vma->vm_mm;
1220 struct mmu_gather *tlb; 1385 struct mmu_gather tlb;
1221 unsigned long end = address + size; 1386 unsigned long end = address + size;
1222 unsigned long nr_accounted = 0; 1387 unsigned long nr_accounted = 0;
1223 1388
1224 lru_add_drain(); 1389 lru_add_drain();
1225 tlb = tlb_gather_mmu(mm, 0); 1390 tlb_gather_mmu(&tlb, mm, 0);
1226 update_hiwater_rss(mm); 1391 update_hiwater_rss(mm);
1227 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 1392 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
1228 if (tlb) 1393 tlb_finish_mmu(&tlb, address, end);
1229 tlb_finish_mmu(tlb, address, end);
1230 return end; 1394 return end;
1231} 1395}
1232 1396
@@ -2535,96 +2699,11 @@ unwritable_page:
2535 return ret; 2699 return ret;
2536} 2700}
2537 2701
2538/* 2702static void unmap_mapping_range_vma(struct vm_area_struct *vma,
2539 * Helper functions for unmap_mapping_range().
2540 *
2541 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
2542 *
2543 * We have to restart searching the prio_tree whenever we drop the lock,
2544 * since the iterator is only valid while the lock is held, and anyway
2545 * a later vma might be split and reinserted earlier while lock dropped.
2546 *
2547 * The list of nonlinear vmas could be handled more efficiently, using
2548 * a placeholder, but handle it in the same way until a need is shown.
2549 * It is important to search the prio_tree before nonlinear list: a vma
2550 * may become nonlinear and be shifted from prio_tree to nonlinear list
2551 * while the lock is dropped; but never shifted from list to prio_tree.
2552 *
2553 * In order to make forward progress despite restarting the search,
2554 * vm_truncate_count is used to mark a vma as now dealt with, so we can
2555 * quickly skip it next time around. Since the prio_tree search only
2556 * shows us those vmas affected by unmapping the range in question, we
2557 * can't efficiently keep all vmas in step with mapping->truncate_count:
2558 * so instead reset them all whenever it wraps back to 0 (then go to 1).
2559 * mapping->truncate_count and vma->vm_truncate_count are protected by
2560 * i_mmap_lock.
2561 *
2562 * In order to make forward progress despite repeatedly restarting some
2563 * large vma, note the restart_addr from unmap_vmas when it breaks out:
2564 * and restart from that address when we reach that vma again. It might
2565 * have been split or merged, shrunk or extended, but never shifted: so
2566 * restart_addr remains valid so long as it remains in the vma's range.
2567 * unmap_mapping_range forces truncate_count to leap over page-aligned
2568 * values so we can save vma's restart_addr in its truncate_count field.
2569 */
2570#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
2571
2572static void reset_vma_truncate_counts(struct address_space *mapping)
2573{
2574 struct vm_area_struct *vma;
2575 struct prio_tree_iter iter;
2576
2577 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
2578 vma->vm_truncate_count = 0;
2579 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
2580 vma->vm_truncate_count = 0;
2581}
2582
2583static int unmap_mapping_range_vma(struct vm_area_struct *vma,
2584 unsigned long start_addr, unsigned long end_addr, 2703 unsigned long start_addr, unsigned long end_addr,
2585 struct zap_details *details) 2704 struct zap_details *details)
2586{ 2705{
2587 unsigned long restart_addr; 2706 zap_page_range(vma, start_addr, end_addr - start_addr, details);
2588 int need_break;
2589
2590 /*
2591 * files that support invalidating or truncating portions of the
2592 * file from under mmaped areas must have their ->fault function
2593 * return a locked page (and set VM_FAULT_LOCKED in the return).
2594 * This provides synchronisation against concurrent unmapping here.
2595 */
2596
2597again:
2598 restart_addr = vma->vm_truncate_count;
2599 if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
2600 start_addr = restart_addr;
2601 if (start_addr >= end_addr) {
2602 /* Top of vma has been split off since last time */
2603 vma->vm_truncate_count = details->truncate_count;
2604 return 0;
2605 }
2606 }
2607
2608 restart_addr = zap_page_range(vma, start_addr,
2609 end_addr - start_addr, details);
2610 need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
2611
2612 if (restart_addr >= end_addr) {
2613 /* We have now completed this vma: mark it so */
2614 vma->vm_truncate_count = details->truncate_count;
2615 if (!need_break)
2616 return 0;
2617 } else {
2618 /* Note restart_addr in vma's truncate_count field */
2619 vma->vm_truncate_count = restart_addr;
2620 if (!need_break)
2621 goto again;
2622 }
2623
2624 spin_unlock(details->i_mmap_lock);
2625 cond_resched();
2626 spin_lock(details->i_mmap_lock);
2627 return -EINTR;
2628} 2707}
2629 2708
2630static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 2709static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
@@ -2634,12 +2713,8 @@ static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
2634 struct prio_tree_iter iter; 2713 struct prio_tree_iter iter;
2635 pgoff_t vba, vea, zba, zea; 2714 pgoff_t vba, vea, zba, zea;
2636 2715
2637restart:
2638 vma_prio_tree_foreach(vma, &iter, root, 2716 vma_prio_tree_foreach(vma, &iter, root,
2639 details->first_index, details->last_index) { 2717 details->first_index, details->last_index) {
2640 /* Skip quickly over those we have already dealt with */
2641 if (vma->vm_truncate_count == details->truncate_count)
2642 continue;
2643 2718
2644 vba = vma->vm_pgoff; 2719 vba = vma->vm_pgoff;
2645 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 2720 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
@@ -2651,11 +2726,10 @@ restart:
2651 if (zea > vea) 2726 if (zea > vea)
2652 zea = vea; 2727 zea = vea;
2653 2728
2654 if (unmap_mapping_range_vma(vma, 2729 unmap_mapping_range_vma(vma,
2655 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 2730 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2656 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 2731 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2657 details) < 0) 2732 details);
2658 goto restart;
2659 } 2733 }
2660} 2734}
2661 2735
@@ -2670,15 +2744,9 @@ static inline void unmap_mapping_range_list(struct list_head *head,
2670 * across *all* the pages in each nonlinear VMA, not just the pages 2744 * across *all* the pages in each nonlinear VMA, not just the pages
2671 * whose virtual address lies outside the file truncation point. 2745 * whose virtual address lies outside the file truncation point.
2672 */ 2746 */
2673restart:
2674 list_for_each_entry(vma, head, shared.vm_set.list) { 2747 list_for_each_entry(vma, head, shared.vm_set.list) {
2675 /* Skip quickly over those we have already dealt with */
2676 if (vma->vm_truncate_count == details->truncate_count)
2677 continue;
2678 details->nonlinear_vma = vma; 2748 details->nonlinear_vma = vma;
2679 if (unmap_mapping_range_vma(vma, vma->vm_start, 2749 unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
2680 vma->vm_end, details) < 0)
2681 goto restart;
2682 } 2750 }
2683} 2751}
2684 2752
@@ -2717,26 +2785,14 @@ void unmap_mapping_range(struct address_space *mapping,
2717 details.last_index = hba + hlen - 1; 2785 details.last_index = hba + hlen - 1;
2718 if (details.last_index < details.first_index) 2786 if (details.last_index < details.first_index)
2719 details.last_index = ULONG_MAX; 2787 details.last_index = ULONG_MAX;
2720 details.i_mmap_lock = &mapping->i_mmap_lock;
2721 2788
2722 mutex_lock(&mapping->unmap_mutex);
2723 spin_lock(&mapping->i_mmap_lock);
2724
2725 /* Protect against endless unmapping loops */
2726 mapping->truncate_count++;
2727 if (unlikely(is_restart_addr(mapping->truncate_count))) {
2728 if (mapping->truncate_count == 0)
2729 reset_vma_truncate_counts(mapping);
2730 mapping->truncate_count++;
2731 }
2732 details.truncate_count = mapping->truncate_count;
2733 2789
2790 mutex_lock(&mapping->i_mmap_mutex);
2734 if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 2791 if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
2735 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2792 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2736 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2793 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2737 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2794 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2738 spin_unlock(&mapping->i_mmap_lock); 2795 mutex_unlock(&mapping->i_mmap_mutex);
2739 mutex_unlock(&mapping->unmap_mutex);
2740} 2796}
2741EXPORT_SYMBOL(unmap_mapping_range); 2797EXPORT_SYMBOL(unmap_mapping_range);
2742 2798
@@ -2818,6 +2874,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2818 /* Had to read the page from swap area: Major fault */ 2874 /* Had to read the page from swap area: Major fault */
2819 ret = VM_FAULT_MAJOR; 2875 ret = VM_FAULT_MAJOR;
2820 count_vm_event(PGMAJFAULT); 2876 count_vm_event(PGMAJFAULT);
2877 mem_cgroup_count_vm_event(mm, PGMAJFAULT);
2821 } else if (PageHWPoison(page)) { 2878 } else if (PageHWPoison(page)) {
2822 /* 2879 /*
2823 * hwpoisoned dirty swapcache pages are kept for killing 2880 * hwpoisoned dirty swapcache pages are kept for killing
@@ -2966,7 +3023,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
2966 if (prev && prev->vm_end == address) 3023 if (prev && prev->vm_end == address)
2967 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; 3024 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2968 3025
2969 expand_stack(vma, address - PAGE_SIZE); 3026 expand_downwards(vma, address - PAGE_SIZE);
2970 } 3027 }
2971 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { 3028 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
2972 struct vm_area_struct *next = vma->vm_next; 3029 struct vm_area_struct *next = vma->vm_next;
@@ -3357,6 +3414,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3357 __set_current_state(TASK_RUNNING); 3414 __set_current_state(TASK_RUNNING);
3358 3415
3359 count_vm_event(PGFAULT); 3416 count_vm_event(PGFAULT);
3417 mem_cgroup_count_vm_event(mm, PGFAULT);
3360 3418
3361 /* do counter updates before entering really critical section. */ 3419 /* do counter updates before entering really critical section. */
3362 check_sync_rss_stat(current); 3420 check_sync_rss_stat(current);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9ca1d604f7cd..9f646374e32f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -374,10 +374,6 @@ void online_page(struct page *page)
374 totalhigh_pages++; 374 totalhigh_pages++;
375#endif 375#endif
376 376
377#ifdef CONFIG_FLATMEM
378 max_mapnr = max(pfn, max_mapnr);
379#endif
380
381 ClearPageReserved(page); 377 ClearPageReserved(page);
382 init_page_count(page); 378 init_page_count(page);
383 __free_page(page); 379 __free_page(page);
@@ -400,7 +396,7 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
400} 396}
401 397
402 398
403int online_pages(unsigned long pfn, unsigned long nr_pages) 399int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
404{ 400{
405 unsigned long onlined_pages = 0; 401 unsigned long onlined_pages = 0;
406 struct zone *zone; 402 struct zone *zone;
@@ -459,8 +455,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
459 zone_pcp_update(zone); 455 zone_pcp_update(zone);
460 456
461 mutex_unlock(&zonelists_mutex); 457 mutex_unlock(&zonelists_mutex);
462 setup_per_zone_wmarks(); 458
463 calculate_zone_inactive_ratio(zone); 459 init_per_zone_wmark_min();
460
464 if (onlined_pages) { 461 if (onlined_pages) {
465 kswapd_run(zone_to_nid(zone)); 462 kswapd_run(zone_to_nid(zone));
466 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); 463 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
@@ -705,7 +702,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
705 if (!pfn_valid(pfn)) 702 if (!pfn_valid(pfn))
706 continue; 703 continue;
707 page = pfn_to_page(pfn); 704 page = pfn_to_page(pfn);
708 if (!page_count(page)) 705 if (!get_page_unless_zero(page))
709 continue; 706 continue;
710 /* 707 /*
711 * We can skip free pages. And we can only deal with pages on 708 * We can skip free pages. And we can only deal with pages on
@@ -713,6 +710,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
713 */ 710 */
714 ret = isolate_lru_page(page); 711 ret = isolate_lru_page(page);
715 if (!ret) { /* Success */ 712 if (!ret) { /* Success */
713 put_page(page);
716 list_add_tail(&page->lru, &source); 714 list_add_tail(&page->lru, &source);
717 move_pages--; 715 move_pages--;
718 inc_zone_page_state(page, NR_ISOLATED_ANON + 716 inc_zone_page_state(page, NR_ISOLATED_ANON +
@@ -724,6 +722,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
724 pfn); 722 pfn);
725 dump_page(page); 723 dump_page(page);
726#endif 724#endif
725 put_page(page);
727 /* Because we don't have big zone->lock. we should 726 /* Because we don't have big zone->lock. we should
728 check this again here. */ 727 check this again here. */
729 if (page_count(page)) { 728 if (page_count(page)) {
@@ -795,7 +794,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
795 return offlined; 794 return offlined;
796} 795}
797 796
798static int offline_pages(unsigned long start_pfn, 797static int __ref offline_pages(unsigned long start_pfn,
799 unsigned long end_pfn, unsigned long timeout) 798 unsigned long end_pfn, unsigned long timeout)
800{ 799{
801 unsigned long pfn, nr_pages, expire; 800 unsigned long pfn, nr_pages, expire;
@@ -893,8 +892,8 @@ repeat:
893 zone->zone_pgdat->node_present_pages -= offlined_pages; 892 zone->zone_pgdat->node_present_pages -= offlined_pages;
894 totalram_pages -= offlined_pages; 893 totalram_pages -= offlined_pages;
895 894
896 setup_per_zone_wmarks(); 895 init_per_zone_wmark_min();
897 calculate_zone_inactive_ratio(zone); 896
898 if (!node_present_pages(node)) { 897 if (!node_present_pages(node)) {
899 node_clear_state(node, N_HIGH_MEMORY); 898 node_clear_state(node, N_HIGH_MEMORY);
900 kswapd_stop(node); 899 kswapd_stop(node);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 959a8b8c7350..e7fb9d25c54e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -99,7 +99,6 @@
99/* Internal flags */ 99/* Internal flags */
100#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 100#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
101#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 101#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
102#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
103 102
104static struct kmem_cache *policy_cache; 103static struct kmem_cache *policy_cache;
105static struct kmem_cache *sn_cache; 104static struct kmem_cache *sn_cache;
@@ -457,7 +456,6 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
457 }, 456 },
458}; 457};
459 458
460static void gather_stats(struct page *, void *, int pte_dirty);
461static void migrate_page_add(struct page *page, struct list_head *pagelist, 459static void migrate_page_add(struct page *page, struct list_head *pagelist,
462 unsigned long flags); 460 unsigned long flags);
463 461
@@ -492,9 +490,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
492 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 490 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
493 continue; 491 continue;
494 492
495 if (flags & MPOL_MF_STATS) 493 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
496 gather_stats(page, private, pte_dirty(*pte));
497 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
498 migrate_page_add(page, private, flags); 494 migrate_page_add(page, private, flags);
499 else 495 else
500 break; 496 break;
@@ -1489,7 +1485,7 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1489 * freeing by another task. It is the caller's responsibility to free the 1485 * freeing by another task. It is the caller's responsibility to free the
1490 * extra reference for shared policies. 1486 * extra reference for shared policies.
1491 */ 1487 */
1492static struct mempolicy *get_vma_policy(struct task_struct *task, 1488struct mempolicy *get_vma_policy(struct task_struct *task,
1493 struct vm_area_struct *vma, unsigned long addr) 1489 struct vm_area_struct *vma, unsigned long addr)
1494{ 1490{
1495 struct mempolicy *pol = task->mempolicy; 1491 struct mempolicy *pol = task->mempolicy;
@@ -2529,159 +2525,3 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2529 } 2525 }
2530 return p - buffer; 2526 return p - buffer;
2531} 2527}
2532
2533struct numa_maps {
2534 unsigned long pages;
2535 unsigned long anon;
2536 unsigned long active;
2537 unsigned long writeback;
2538 unsigned long mapcount_max;
2539 unsigned long dirty;
2540 unsigned long swapcache;
2541 unsigned long node[MAX_NUMNODES];
2542};
2543
2544static void gather_stats(struct page *page, void *private, int pte_dirty)
2545{
2546 struct numa_maps *md = private;
2547 int count = page_mapcount(page);
2548
2549 md->pages++;
2550 if (pte_dirty || PageDirty(page))
2551 md->dirty++;
2552
2553 if (PageSwapCache(page))
2554 md->swapcache++;
2555
2556 if (PageActive(page) || PageUnevictable(page))
2557 md->active++;
2558
2559 if (PageWriteback(page))
2560 md->writeback++;
2561
2562 if (PageAnon(page))
2563 md->anon++;
2564
2565 if (count > md->mapcount_max)
2566 md->mapcount_max = count;
2567
2568 md->node[page_to_nid(page)]++;
2569}
2570
2571#ifdef CONFIG_HUGETLB_PAGE
2572static void check_huge_range(struct vm_area_struct *vma,
2573 unsigned long start, unsigned long end,
2574 struct numa_maps *md)
2575{
2576 unsigned long addr;
2577 struct page *page;
2578 struct hstate *h = hstate_vma(vma);
2579 unsigned long sz = huge_page_size(h);
2580
2581 for (addr = start; addr < end; addr += sz) {
2582 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2583 addr & huge_page_mask(h));
2584 pte_t pte;
2585
2586 if (!ptep)
2587 continue;
2588
2589 pte = *ptep;
2590 if (pte_none(pte))
2591 continue;
2592
2593 page = pte_page(pte);
2594 if (!page)
2595 continue;
2596
2597 gather_stats(page, md, pte_dirty(*ptep));
2598 }
2599}
2600#else
2601static inline void check_huge_range(struct vm_area_struct *vma,
2602 unsigned long start, unsigned long end,
2603 struct numa_maps *md)
2604{
2605}
2606#endif
2607
2608/*
2609 * Display pages allocated per node and memory policy via /proc.
2610 */
2611int show_numa_map(struct seq_file *m, void *v)
2612{
2613 struct proc_maps_private *priv = m->private;
2614 struct vm_area_struct *vma = v;
2615 struct numa_maps *md;
2616 struct file *file = vma->vm_file;
2617 struct mm_struct *mm = vma->vm_mm;
2618 struct mempolicy *pol;
2619 int n;
2620 char buffer[50];
2621
2622 if (!mm)
2623 return 0;
2624
2625 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2626 if (!md)
2627 return 0;
2628
2629 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2630 mpol_to_str(buffer, sizeof(buffer), pol, 0);
2631 mpol_cond_put(pol);
2632
2633 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2634
2635 if (file) {
2636 seq_printf(m, " file=");
2637 seq_path(m, &file->f_path, "\n\t= ");
2638 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2639 seq_printf(m, " heap");
2640 } else if (vma->vm_start <= mm->start_stack &&
2641 vma->vm_end >= mm->start_stack) {
2642 seq_printf(m, " stack");
2643 }
2644
2645 if (is_vm_hugetlb_page(vma)) {
2646 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2647 seq_printf(m, " huge");
2648 } else {
2649 check_pgd_range(vma, vma->vm_start, vma->vm_end,
2650 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2651 }
2652
2653 if (!md->pages)
2654 goto out;
2655
2656 if (md->anon)
2657 seq_printf(m," anon=%lu",md->anon);
2658
2659 if (md->dirty)
2660 seq_printf(m," dirty=%lu",md->dirty);
2661
2662 if (md->pages != md->anon && md->pages != md->dirty)
2663 seq_printf(m, " mapped=%lu", md->pages);
2664
2665 if (md->mapcount_max > 1)
2666 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2667
2668 if (md->swapcache)
2669 seq_printf(m," swapcache=%lu", md->swapcache);
2670
2671 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2672 seq_printf(m," active=%lu", md->active);
2673
2674 if (md->writeback)
2675 seq_printf(m," writeback=%lu", md->writeback);
2676
2677 for_each_node_state(n, N_HIGH_MEMORY)
2678 if (md->node[n])
2679 seq_printf(m, " N%d=%lu", n, md->node[n]);
2680out:
2681 seq_putc(m, '\n');
2682 kfree(md);
2683
2684 if (m->count < m->size)
2685 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2686 return 0;
2687}
diff --git a/mm/migrate.c b/mm/migrate.c
index 34132f8e9109..e4a5c912983d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -721,15 +721,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
721 * Only page_lock_anon_vma() understands the subtleties of 721 * Only page_lock_anon_vma() understands the subtleties of
722 * getting a hold on an anon_vma from outside one of its mms. 722 * getting a hold on an anon_vma from outside one of its mms.
723 */ 723 */
724 anon_vma = page_lock_anon_vma(page); 724 anon_vma = page_get_anon_vma(page);
725 if (anon_vma) { 725 if (anon_vma) {
726 /* 726 /*
727 * Take a reference count on the anon_vma if the 727 * Anon page
728 * page is mapped so that it is guaranteed to
729 * exist when the page is remapped later
730 */ 728 */
731 get_anon_vma(anon_vma);
732 page_unlock_anon_vma(anon_vma);
733 } else if (PageSwapCache(page)) { 729 } else if (PageSwapCache(page)) {
734 /* 730 /*
735 * We cannot be sure that the anon_vma of an unmapped 731 * We cannot be sure that the anon_vma of an unmapped
@@ -857,13 +853,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
857 lock_page(hpage); 853 lock_page(hpage);
858 } 854 }
859 855
860 if (PageAnon(hpage)) { 856 if (PageAnon(hpage))
861 anon_vma = page_lock_anon_vma(hpage); 857 anon_vma = page_get_anon_vma(hpage);
862 if (anon_vma) {
863 get_anon_vma(anon_vma);
864 page_unlock_anon_vma(anon_vma);
865 }
866 }
867 858
868 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 859 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
869 860
diff --git a/mm/mlock.c b/mm/mlock.c
index 516b2c2ddd5a..048260c4e02e 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -307,13 +307,13 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
307 * For vmas that pass the filters, merge/split as appropriate. 307 * For vmas that pass the filters, merge/split as appropriate.
308 */ 308 */
309static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, 309static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
310 unsigned long start, unsigned long end, unsigned int newflags) 310 unsigned long start, unsigned long end, vm_flags_t newflags)
311{ 311{
312 struct mm_struct *mm = vma->vm_mm; 312 struct mm_struct *mm = vma->vm_mm;
313 pgoff_t pgoff; 313 pgoff_t pgoff;
314 int nr_pages; 314 int nr_pages;
315 int ret = 0; 315 int ret = 0;
316 int lock = newflags & VM_LOCKED; 316 int lock = !!(newflags & VM_LOCKED);
317 317
318 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || 318 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
319 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) 319 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
@@ -385,7 +385,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
385 prev = vma; 385 prev = vma;
386 386
387 for (nstart = start ; ; ) { 387 for (nstart = start ; ; ) {
388 unsigned int newflags; 388 vm_flags_t newflags;
389 389
390 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 390 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
391 391
@@ -524,7 +524,7 @@ static int do_mlockall(int flags)
524 goto out; 524 goto out;
525 525
526 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { 526 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
527 unsigned int newflags; 527 vm_flags_t newflags;
528 528
529 newflags = vma->vm_flags | VM_LOCKED; 529 newflags = vma->vm_flags | VM_LOCKED;
530 if (!(flags & MCL_CURRENT)) 530 if (!(flags & MCL_CURRENT))
diff --git a/mm/mmap.c b/mm/mmap.c
index 772140c53ab1..bbdc9af5e117 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -84,10 +84,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
84} 84}
85EXPORT_SYMBOL(vm_get_page_prot); 85EXPORT_SYMBOL(vm_get_page_prot);
86 86
87int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 87int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
88int sysctl_overcommit_ratio = 50; /* default is 50% */ 88int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
89int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 89int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
90struct percpu_counter vm_committed_as; 90/*
91 * Make sure vm_committed_as in one cacheline and not cacheline shared with
92 * other variables. It can be updated by several CPUs frequently.
93 */
94struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
91 95
92/* 96/*
93 * Check that a process has enough memory to allocate a new virtual 97 * Check that a process has enough memory to allocate a new virtual
@@ -190,7 +194,7 @@ error:
190} 194}
191 195
192/* 196/*
193 * Requires inode->i_mapping->i_mmap_lock 197 * Requires inode->i_mapping->i_mmap_mutex
194 */ 198 */
195static void __remove_shared_vm_struct(struct vm_area_struct *vma, 199static void __remove_shared_vm_struct(struct vm_area_struct *vma,
196 struct file *file, struct address_space *mapping) 200 struct file *file, struct address_space *mapping)
@@ -218,9 +222,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
218 222
219 if (file) { 223 if (file) {
220 struct address_space *mapping = file->f_mapping; 224 struct address_space *mapping = file->f_mapping;
221 spin_lock(&mapping->i_mmap_lock); 225 mutex_lock(&mapping->i_mmap_mutex);
222 __remove_shared_vm_struct(vma, file, mapping); 226 __remove_shared_vm_struct(vma, file, mapping);
223 spin_unlock(&mapping->i_mmap_lock); 227 mutex_unlock(&mapping->i_mmap_mutex);
224 } 228 }
225} 229}
226 230
@@ -394,29 +398,6 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
394 return vma; 398 return vma;
395} 399}
396 400
397static inline void
398__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
399 struct vm_area_struct *prev, struct rb_node *rb_parent)
400{
401 struct vm_area_struct *next;
402
403 vma->vm_prev = prev;
404 if (prev) {
405 next = prev->vm_next;
406 prev->vm_next = vma;
407 } else {
408 mm->mmap = vma;
409 if (rb_parent)
410 next = rb_entry(rb_parent,
411 struct vm_area_struct, vm_rb);
412 else
413 next = NULL;
414 }
415 vma->vm_next = next;
416 if (next)
417 next->vm_prev = vma;
418}
419
420void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 401void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
421 struct rb_node **rb_link, struct rb_node *rb_parent) 402 struct rb_node **rb_link, struct rb_node *rb_parent)
422{ 403{
@@ -464,16 +445,14 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
464 if (vma->vm_file) 445 if (vma->vm_file)
465 mapping = vma->vm_file->f_mapping; 446 mapping = vma->vm_file->f_mapping;
466 447
467 if (mapping) { 448 if (mapping)
468 spin_lock(&mapping->i_mmap_lock); 449 mutex_lock(&mapping->i_mmap_mutex);
469 vma->vm_truncate_count = mapping->truncate_count;
470 }
471 450
472 __vma_link(mm, vma, prev, rb_link, rb_parent); 451 __vma_link(mm, vma, prev, rb_link, rb_parent);
473 __vma_link_file(vma); 452 __vma_link_file(vma);
474 453
475 if (mapping) 454 if (mapping)
476 spin_unlock(&mapping->i_mmap_lock); 455 mutex_unlock(&mapping->i_mmap_mutex);
477 456
478 mm->map_count++; 457 mm->map_count++;
479 validate_mm(mm); 458 validate_mm(mm);
@@ -576,17 +555,8 @@ again: remove_next = 1 + (end > next->vm_end);
576 mapping = file->f_mapping; 555 mapping = file->f_mapping;
577 if (!(vma->vm_flags & VM_NONLINEAR)) 556 if (!(vma->vm_flags & VM_NONLINEAR))
578 root = &mapping->i_mmap; 557 root = &mapping->i_mmap;
579 spin_lock(&mapping->i_mmap_lock); 558 mutex_lock(&mapping->i_mmap_mutex);
580 if (importer &&
581 vma->vm_truncate_count != next->vm_truncate_count) {
582 /*
583 * unmap_mapping_range might be in progress:
584 * ensure that the expanding vma is rescanned.
585 */
586 importer->vm_truncate_count = 0;
587 }
588 if (insert) { 559 if (insert) {
589 insert->vm_truncate_count = vma->vm_truncate_count;
590 /* 560 /*
591 * Put into prio_tree now, so instantiated pages 561 * Put into prio_tree now, so instantiated pages
592 * are visible to arm/parisc __flush_dcache_page 562 * are visible to arm/parisc __flush_dcache_page
@@ -605,7 +575,7 @@ again: remove_next = 1 + (end > next->vm_end);
605 * lock may be shared between many sibling processes. Skipping 575 * lock may be shared between many sibling processes. Skipping
606 * the lock for brk adjustments makes a difference sometimes. 576 * the lock for brk adjustments makes a difference sometimes.
607 */ 577 */
608 if (vma->anon_vma && (insert || importer || start != vma->vm_start)) { 578 if (vma->anon_vma && (importer || start != vma->vm_start)) {
609 anon_vma = vma->anon_vma; 579 anon_vma = vma->anon_vma;
610 anon_vma_lock(anon_vma); 580 anon_vma_lock(anon_vma);
611 } 581 }
@@ -652,7 +622,7 @@ again: remove_next = 1 + (end > next->vm_end);
652 if (anon_vma) 622 if (anon_vma)
653 anon_vma_unlock(anon_vma); 623 anon_vma_unlock(anon_vma);
654 if (mapping) 624 if (mapping)
655 spin_unlock(&mapping->i_mmap_lock); 625 mutex_unlock(&mapping->i_mmap_mutex);
656 626
657 if (remove_next) { 627 if (remove_next) {
658 if (file) { 628 if (file) {
@@ -699,9 +669,17 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
699} 669}
700 670
701static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, 671static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
702 struct anon_vma *anon_vma2) 672 struct anon_vma *anon_vma2,
673 struct vm_area_struct *vma)
703{ 674{
704 return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2); 675 /*
676 * The list_is_singular() test is to avoid merging VMA cloned from
677 * parents. This can improve scalability caused by anon_vma lock.
678 */
679 if ((!anon_vma1 || !anon_vma2) && (!vma ||
680 list_is_singular(&vma->anon_vma_chain)))
681 return 1;
682 return anon_vma1 == anon_vma2;
705} 683}
706 684
707/* 685/*
@@ -720,7 +698,7 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
720 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) 698 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
721{ 699{
722 if (is_mergeable_vma(vma, file, vm_flags) && 700 if (is_mergeable_vma(vma, file, vm_flags) &&
723 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { 701 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
724 if (vma->vm_pgoff == vm_pgoff) 702 if (vma->vm_pgoff == vm_pgoff)
725 return 1; 703 return 1;
726 } 704 }
@@ -739,7 +717,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
739 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) 717 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
740{ 718{
741 if (is_mergeable_vma(vma, file, vm_flags) && 719 if (is_mergeable_vma(vma, file, vm_flags) &&
742 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { 720 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
743 pgoff_t vm_pglen; 721 pgoff_t vm_pglen;
744 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 722 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
745 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 723 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
@@ -817,7 +795,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
817 can_vma_merge_before(next, vm_flags, 795 can_vma_merge_before(next, vm_flags,
818 anon_vma, file, pgoff+pglen) && 796 anon_vma, file, pgoff+pglen) &&
819 is_mergeable_anon_vma(prev->anon_vma, 797 is_mergeable_anon_vma(prev->anon_vma,
820 next->anon_vma)) { 798 next->anon_vma, NULL)) {
821 /* cases 1, 6 */ 799 /* cases 1, 6 */
822 err = vma_adjust(prev, prev->vm_start, 800 err = vma_adjust(prev, prev->vm_start,
823 next->vm_end, prev->vm_pgoff, NULL); 801 next->vm_end, prev->vm_pgoff, NULL);
@@ -982,7 +960,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
982{ 960{
983 struct mm_struct * mm = current->mm; 961 struct mm_struct * mm = current->mm;
984 struct inode *inode; 962 struct inode *inode;
985 unsigned int vm_flags; 963 vm_flags_t vm_flags;
986 int error; 964 int error;
987 unsigned long reqprot = prot; 965 unsigned long reqprot = prot;
988 966
@@ -1187,7 +1165,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1187 */ 1165 */
1188int vma_wants_writenotify(struct vm_area_struct *vma) 1166int vma_wants_writenotify(struct vm_area_struct *vma)
1189{ 1167{
1190 unsigned int vm_flags = vma->vm_flags; 1168 vm_flags_t vm_flags = vma->vm_flags;
1191 1169
1192 /* If it was private or non-writable, the write bit is already clear */ 1170 /* If it was private or non-writable, the write bit is already clear */
1193 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) 1171 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
@@ -1215,7 +1193,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
1215 * We account for memory if it's a private writeable mapping, 1193 * We account for memory if it's a private writeable mapping,
1216 * not hugepages and VM_NORESERVE wasn't set. 1194 * not hugepages and VM_NORESERVE wasn't set.
1217 */ 1195 */
1218static inline int accountable_mapping(struct file *file, unsigned int vm_flags) 1196static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1219{ 1197{
1220 /* 1198 /*
1221 * hugetlb has its own accounting separate from the core VM 1199 * hugetlb has its own accounting separate from the core VM
@@ -1229,7 +1207,7 @@ static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
1229 1207
1230unsigned long mmap_region(struct file *file, unsigned long addr, 1208unsigned long mmap_region(struct file *file, unsigned long addr,
1231 unsigned long len, unsigned long flags, 1209 unsigned long len, unsigned long flags,
1232 unsigned int vm_flags, unsigned long pgoff) 1210 vm_flags_t vm_flags, unsigned long pgoff)
1233{ 1211{
1234 struct mm_struct *mm = current->mm; 1212 struct mm_struct *mm = current->mm;
1235 struct vm_area_struct *vma, *prev; 1213 struct vm_area_struct *vma, *prev;
@@ -1785,7 +1763,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1785/* 1763/*
1786 * vma is the first one with address < vma->vm_start. Have to extend vma. 1764 * vma is the first one with address < vma->vm_start. Have to extend vma.
1787 */ 1765 */
1788static int expand_downwards(struct vm_area_struct *vma, 1766int expand_downwards(struct vm_area_struct *vma,
1789 unsigned long address) 1767 unsigned long address)
1790{ 1768{
1791 int error; 1769 int error;
@@ -1832,11 +1810,6 @@ static int expand_downwards(struct vm_area_struct *vma,
1832 return error; 1810 return error;
1833} 1811}
1834 1812
1835int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
1836{
1837 return expand_downwards(vma, address);
1838}
1839
1840#ifdef CONFIG_STACK_GROWSUP 1813#ifdef CONFIG_STACK_GROWSUP
1841int expand_stack(struct vm_area_struct *vma, unsigned long address) 1814int expand_stack(struct vm_area_struct *vma, unsigned long address)
1842{ 1815{
@@ -1919,17 +1892,17 @@ static void unmap_region(struct mm_struct *mm,
1919 unsigned long start, unsigned long end) 1892 unsigned long start, unsigned long end)
1920{ 1893{
1921 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 1894 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1922 struct mmu_gather *tlb; 1895 struct mmu_gather tlb;
1923 unsigned long nr_accounted = 0; 1896 unsigned long nr_accounted = 0;
1924 1897
1925 lru_add_drain(); 1898 lru_add_drain();
1926 tlb = tlb_gather_mmu(mm, 0); 1899 tlb_gather_mmu(&tlb, mm, 0);
1927 update_hiwater_rss(mm); 1900 update_hiwater_rss(mm);
1928 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1901 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1929 vm_unacct_memory(nr_accounted); 1902 vm_unacct_memory(nr_accounted);
1930 free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, 1903 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1931 next? next->vm_start: 0); 1904 next ? next->vm_start : 0);
1932 tlb_finish_mmu(tlb, start, end); 1905 tlb_finish_mmu(&tlb, start, end);
1933} 1906}
1934 1907
1935/* 1908/*
@@ -2271,7 +2244,7 @@ EXPORT_SYMBOL(do_brk);
2271/* Release all mmaps. */ 2244/* Release all mmaps. */
2272void exit_mmap(struct mm_struct *mm) 2245void exit_mmap(struct mm_struct *mm)
2273{ 2246{
2274 struct mmu_gather *tlb; 2247 struct mmu_gather tlb;
2275 struct vm_area_struct *vma; 2248 struct vm_area_struct *vma;
2276 unsigned long nr_accounted = 0; 2249 unsigned long nr_accounted = 0;
2277 unsigned long end; 2250 unsigned long end;
@@ -2296,14 +2269,14 @@ void exit_mmap(struct mm_struct *mm)
2296 2269
2297 lru_add_drain(); 2270 lru_add_drain();
2298 flush_cache_mm(mm); 2271 flush_cache_mm(mm);
2299 tlb = tlb_gather_mmu(mm, 1); 2272 tlb_gather_mmu(&tlb, mm, 1);
2300 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2273 /* update_hiwater_rss(mm) here? but nobody should be looking */
2301 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2274 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2302 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2275 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2303 vm_unacct_memory(nr_accounted); 2276 vm_unacct_memory(nr_accounted);
2304 2277
2305 free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); 2278 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2306 tlb_finish_mmu(tlb, 0, end); 2279 tlb_finish_mmu(&tlb, 0, end);
2307 2280
2308 /* 2281 /*
2309 * Walk the list again, actually closing and freeing it, 2282 * Walk the list again, actually closing and freeing it,
@@ -2317,7 +2290,7 @@ void exit_mmap(struct mm_struct *mm)
2317 2290
2318/* Insert vm structure into process list sorted by address 2291/* Insert vm structure into process list sorted by address
2319 * and into the inode's i_mmap tree. If vm_file is non-NULL 2292 * and into the inode's i_mmap tree. If vm_file is non-NULL
2320 * then i_mmap_lock is taken here. 2293 * then i_mmap_mutex is taken here.
2321 */ 2294 */
2322int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) 2295int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2323{ 2296{
@@ -2529,15 +2502,15 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2529 * The LSB of head.next can't change from under us 2502 * The LSB of head.next can't change from under us
2530 * because we hold the mm_all_locks_mutex. 2503 * because we hold the mm_all_locks_mutex.
2531 */ 2504 */
2532 spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); 2505 mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
2533 /* 2506 /*
2534 * We can safely modify head.next after taking the 2507 * We can safely modify head.next after taking the
2535 * anon_vma->root->lock. If some other vma in this mm shares 2508 * anon_vma->root->mutex. If some other vma in this mm shares
2536 * the same anon_vma we won't take it again. 2509 * the same anon_vma we won't take it again.
2537 * 2510 *
2538 * No need of atomic instructions here, head.next 2511 * No need of atomic instructions here, head.next
2539 * can't change from under us thanks to the 2512 * can't change from under us thanks to the
2540 * anon_vma->root->lock. 2513 * anon_vma->root->mutex.
2541 */ 2514 */
2542 if (__test_and_set_bit(0, (unsigned long *) 2515 if (__test_and_set_bit(0, (unsigned long *)
2543 &anon_vma->root->head.next)) 2516 &anon_vma->root->head.next))
@@ -2559,7 +2532,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2559 */ 2532 */
2560 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 2533 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2561 BUG(); 2534 BUG();
2562 spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); 2535 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
2563 } 2536 }
2564} 2537}
2565 2538
@@ -2586,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2586 * vma in this mm is backed by the same anon_vma or address_space. 2559 * vma in this mm is backed by the same anon_vma or address_space.
2587 * 2560 *
2588 * We can take all the locks in random order because the VM code 2561 * We can take all the locks in random order because the VM code
2589 * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never 2562 * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
2590 * takes more than one of them in a row. Secondly we're protected 2563 * takes more than one of them in a row. Secondly we're protected
2591 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2564 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2592 * 2565 *
@@ -2642,7 +2615,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2642 * 2615 *
2643 * No need of atomic instructions here, head.next 2616 * No need of atomic instructions here, head.next
2644 * can't change from under us until we release the 2617 * can't change from under us until we release the
2645 * anon_vma->root->lock. 2618 * anon_vma->root->mutex.
2646 */ 2619 */
2647 if (!__test_and_clear_bit(0, (unsigned long *) 2620 if (!__test_and_clear_bit(0, (unsigned long *)
2648 &anon_vma->root->head.next)) 2621 &anon_vma->root->head.next))
@@ -2658,7 +2631,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
2658 * AS_MM_ALL_LOCKS can't change to 0 from under us 2631 * AS_MM_ALL_LOCKS can't change to 0 from under us
2659 * because we hold the mm_all_locks_mutex. 2632 * because we hold the mm_all_locks_mutex.
2660 */ 2633 */
2661 spin_unlock(&mapping->i_mmap_lock); 2634 mutex_unlock(&mapping->i_mmap_mutex);
2662 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 2635 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2663 &mapping->flags)) 2636 &mapping->flags))
2664 BUG(); 2637 BUG();
diff --git a/mm/mremap.c b/mm/mremap.c
index a7c1f9f9b941..506fa44403df 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -93,8 +93,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
93 * and we propagate stale pages into the dst afterward. 93 * and we propagate stale pages into the dst afterward.
94 */ 94 */
95 mapping = vma->vm_file->f_mapping; 95 mapping = vma->vm_file->f_mapping;
96 spin_lock(&mapping->i_mmap_lock); 96 mutex_lock(&mapping->i_mmap_mutex);
97 new_vma->vm_truncate_count = 0;
98 } 97 }
99 98
100 /* 99 /*
@@ -123,7 +122,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
123 pte_unmap(new_pte - 1); 122 pte_unmap(new_pte - 1);
124 pte_unmap_unlock(old_pte - 1, old_ptl); 123 pte_unmap_unlock(old_pte - 1, old_ptl);
125 if (mapping) 124 if (mapping)
126 spin_unlock(&mapping->i_mmap_lock); 125 mutex_unlock(&mapping->i_mmap_mutex);
127 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); 126 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
128} 127}
129 128
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 9109049f0bbc..6e93dc7f2586 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -307,30 +307,7 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
307void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 307void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
308 unsigned long align, unsigned long goal) 308 unsigned long align, unsigned long goal)
309{ 309{
310#ifdef MAX_DMA32_PFN
311 unsigned long end_pfn;
312
313 if (WARN_ON_ONCE(slab_is_available()))
314 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
315
316 /* update goal according ...MAX_DMA32_PFN */
317 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
318
319 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
320 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
321 void *ptr;
322 unsigned long new_goal;
323
324 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
325 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
326 new_goal, -1ULL);
327 if (ptr)
328 return ptr;
329 }
330#endif
331
332 return __alloc_bootmem_node(pgdat, size, align, goal); 310 return __alloc_bootmem_node(pgdat, size, align, goal);
333
334} 311}
335 312
336#ifdef CONFIG_SPARSEMEM 313#ifdef CONFIG_SPARSEMEM
diff --git a/mm/nommu.c b/mm/nommu.c
index c4c542c736a9..1fd0c51b10a6 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -680,9 +680,9 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
680 */ 680 */
681static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) 681static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
682{ 682{
683 struct vm_area_struct *pvma, **pp, *next; 683 struct vm_area_struct *pvma, *prev;
684 struct address_space *mapping; 684 struct address_space *mapping;
685 struct rb_node **p, *parent; 685 struct rb_node **p, *parent, *rb_prev;
686 686
687 kenter(",%p", vma); 687 kenter(",%p", vma);
688 688
@@ -703,7 +703,7 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
703 } 703 }
704 704
705 /* add the VMA to the tree */ 705 /* add the VMA to the tree */
706 parent = NULL; 706 parent = rb_prev = NULL;
707 p = &mm->mm_rb.rb_node; 707 p = &mm->mm_rb.rb_node;
708 while (*p) { 708 while (*p) {
709 parent = *p; 709 parent = *p;
@@ -713,17 +713,20 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
713 * (the latter is necessary as we may get identical VMAs) */ 713 * (the latter is necessary as we may get identical VMAs) */
714 if (vma->vm_start < pvma->vm_start) 714 if (vma->vm_start < pvma->vm_start)
715 p = &(*p)->rb_left; 715 p = &(*p)->rb_left;
716 else if (vma->vm_start > pvma->vm_start) 716 else if (vma->vm_start > pvma->vm_start) {
717 rb_prev = parent;
717 p = &(*p)->rb_right; 718 p = &(*p)->rb_right;
718 else if (vma->vm_end < pvma->vm_end) 719 } else if (vma->vm_end < pvma->vm_end)
719 p = &(*p)->rb_left; 720 p = &(*p)->rb_left;
720 else if (vma->vm_end > pvma->vm_end) 721 else if (vma->vm_end > pvma->vm_end) {
722 rb_prev = parent;
721 p = &(*p)->rb_right; 723 p = &(*p)->rb_right;
722 else if (vma < pvma) 724 } else if (vma < pvma)
723 p = &(*p)->rb_left; 725 p = &(*p)->rb_left;
724 else if (vma > pvma) 726 else if (vma > pvma) {
727 rb_prev = parent;
725 p = &(*p)->rb_right; 728 p = &(*p)->rb_right;
726 else 729 } else
727 BUG(); 730 BUG();
728 } 731 }
729 732
@@ -731,20 +734,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
731 rb_insert_color(&vma->vm_rb, &mm->mm_rb); 734 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
732 735
733 /* add VMA to the VMA list also */ 736 /* add VMA to the VMA list also */
734 for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { 737 prev = NULL;
735 if (pvma->vm_start > vma->vm_start) 738 if (rb_prev)
736 break; 739 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
737 if (pvma->vm_start < vma->vm_start)
738 continue;
739 if (pvma->vm_end < vma->vm_end)
740 break;
741 }
742 740
743 next = *pp; 741 __vma_link_list(mm, vma, prev, parent);
744 *pp = vma;
745 vma->vm_next = next;
746 if (next)
747 next->vm_prev = vma;
748} 742}
749 743
750/* 744/*
@@ -752,7 +746,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
752 */ 746 */
753static void delete_vma_from_mm(struct vm_area_struct *vma) 747static void delete_vma_from_mm(struct vm_area_struct *vma)
754{ 748{
755 struct vm_area_struct **pp;
756 struct address_space *mapping; 749 struct address_space *mapping;
757 struct mm_struct *mm = vma->vm_mm; 750 struct mm_struct *mm = vma->vm_mm;
758 751
@@ -775,12 +768,14 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
775 768
776 /* remove from the MM's tree and list */ 769 /* remove from the MM's tree and list */
777 rb_erase(&vma->vm_rb, &mm->mm_rb); 770 rb_erase(&vma->vm_rb, &mm->mm_rb);
778 for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) { 771
779 if (*pp == vma) { 772 if (vma->vm_prev)
780 *pp = vma->vm_next; 773 vma->vm_prev->vm_next = vma->vm_next;
781 break; 774 else
782 } 775 mm->mmap = vma->vm_next;
783 } 776
777 if (vma->vm_next)
778 vma->vm_next->vm_prev = vma->vm_prev;
784 779
785 vma->vm_mm = NULL; 780 vma->vm_mm = NULL;
786} 781}
@@ -809,17 +804,15 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
809struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 804struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
810{ 805{
811 struct vm_area_struct *vma; 806 struct vm_area_struct *vma;
812 struct rb_node *n = mm->mm_rb.rb_node;
813 807
814 /* check the cache first */ 808 /* check the cache first */
815 vma = mm->mmap_cache; 809 vma = mm->mmap_cache;
816 if (vma && vma->vm_start <= addr && vma->vm_end > addr) 810 if (vma && vma->vm_start <= addr && vma->vm_end > addr)
817 return vma; 811 return vma;
818 812
819 /* trawl the tree (there may be multiple mappings in which addr 813 /* trawl the list (there may be multiple mappings in which addr
820 * resides) */ 814 * resides) */
821 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 815 for (vma = mm->mmap; vma; vma = vma->vm_next) {
822 vma = rb_entry(n, struct vm_area_struct, vm_rb);
823 if (vma->vm_start > addr) 816 if (vma->vm_start > addr)
824 return NULL; 817 return NULL;
825 if (vma->vm_end > addr) { 818 if (vma->vm_end > addr) {
@@ -859,7 +852,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
859 unsigned long len) 852 unsigned long len)
860{ 853{
861 struct vm_area_struct *vma; 854 struct vm_area_struct *vma;
862 struct rb_node *n = mm->mm_rb.rb_node;
863 unsigned long end = addr + len; 855 unsigned long end = addr + len;
864 856
865 /* check the cache first */ 857 /* check the cache first */
@@ -867,10 +859,9 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
867 if (vma && vma->vm_start == addr && vma->vm_end == end) 859 if (vma && vma->vm_start == addr && vma->vm_end == end)
868 return vma; 860 return vma;
869 861
870 /* trawl the tree (there may be multiple mappings in which addr 862 /* trawl the list (there may be multiple mappings in which addr
871 * resides) */ 863 * resides) */
872 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 864 for (vma = mm->mmap; vma; vma = vma->vm_next) {
873 vma = rb_entry(n, struct vm_area_struct, vm_rb);
874 if (vma->vm_start < addr) 865 if (vma->vm_start < addr)
875 continue; 866 continue;
876 if (vma->vm_start > addr) 867 if (vma->vm_start > addr)
@@ -1133,7 +1124,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1133 unsigned long capabilities) 1124 unsigned long capabilities)
1134{ 1125{
1135 struct page *pages; 1126 struct page *pages;
1136 unsigned long total, point, n, rlen; 1127 unsigned long total, point, n;
1137 void *base; 1128 void *base;
1138 int ret, order; 1129 int ret, order;
1139 1130
@@ -1157,13 +1148,12 @@ static int do_mmap_private(struct vm_area_struct *vma,
1157 * make a private copy of the data and map that instead */ 1148 * make a private copy of the data and map that instead */
1158 } 1149 }
1159 1150
1160 rlen = PAGE_ALIGN(len);
1161 1151
1162 /* allocate some memory to hold the mapping 1152 /* allocate some memory to hold the mapping
1163 * - note that this may not return a page-aligned address if the object 1153 * - note that this may not return a page-aligned address if the object
1164 * we're allocating is smaller than a page 1154 * we're allocating is smaller than a page
1165 */ 1155 */
1166 order = get_order(rlen); 1156 order = get_order(len);
1167 kdebug("alloc order %d for %lx", order, len); 1157 kdebug("alloc order %d for %lx", order, len);
1168 1158
1169 pages = alloc_pages(GFP_KERNEL, order); 1159 pages = alloc_pages(GFP_KERNEL, order);
@@ -1173,7 +1163,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1173 total = 1 << order; 1163 total = 1 << order;
1174 atomic_long_add(total, &mmap_pages_allocated); 1164 atomic_long_add(total, &mmap_pages_allocated);
1175 1165
1176 point = rlen >> PAGE_SHIFT; 1166 point = len >> PAGE_SHIFT;
1177 1167
1178 /* we allocated a power-of-2 sized page set, so we may want to trim off 1168 /* we allocated a power-of-2 sized page set, so we may want to trim off
1179 * the excess */ 1169 * the excess */
@@ -1195,7 +1185,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1195 base = page_address(pages); 1185 base = page_address(pages);
1196 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; 1186 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1197 region->vm_start = (unsigned long) base; 1187 region->vm_start = (unsigned long) base;
1198 region->vm_end = region->vm_start + rlen; 1188 region->vm_end = region->vm_start + len;
1199 region->vm_top = region->vm_start + (total << PAGE_SHIFT); 1189 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1200 1190
1201 vma->vm_start = region->vm_start; 1191 vma->vm_start = region->vm_start;
@@ -1211,22 +1201,22 @@ static int do_mmap_private(struct vm_area_struct *vma,
1211 1201
1212 old_fs = get_fs(); 1202 old_fs = get_fs();
1213 set_fs(KERNEL_DS); 1203 set_fs(KERNEL_DS);
1214 ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); 1204 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1215 set_fs(old_fs); 1205 set_fs(old_fs);
1216 1206
1217 if (ret < 0) 1207 if (ret < 0)
1218 goto error_free; 1208 goto error_free;
1219 1209
1220 /* clear the last little bit */ 1210 /* clear the last little bit */
1221 if (ret < rlen) 1211 if (ret < len)
1222 memset(base + ret, 0, rlen - ret); 1212 memset(base + ret, 0, len - ret);
1223 1213
1224 } 1214 }
1225 1215
1226 return 0; 1216 return 0;
1227 1217
1228error_free: 1218error_free:
1229 free_page_series(region->vm_start, region->vm_end); 1219 free_page_series(region->vm_start, region->vm_top);
1230 region->vm_start = vma->vm_start = 0; 1220 region->vm_start = vma->vm_start = 0;
1231 region->vm_end = vma->vm_end = 0; 1221 region->vm_end = vma->vm_end = 0;
1232 region->vm_top = 0; 1222 region->vm_top = 0;
@@ -1235,7 +1225,7 @@ error_free:
1235enomem: 1225enomem:
1236 printk("Allocation of length %lu from process %d (%s) failed\n", 1226 printk("Allocation of length %lu from process %d (%s) failed\n",
1237 len, current->pid, current->comm); 1227 len, current->pid, current->comm);
1238 show_free_areas(); 1228 show_free_areas(0);
1239 return -ENOMEM; 1229 return -ENOMEM;
1240} 1230}
1241 1231
@@ -1268,6 +1258,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1268 1258
1269 /* we ignore the address hint */ 1259 /* we ignore the address hint */
1270 addr = 0; 1260 addr = 0;
1261 len = PAGE_ALIGN(len);
1271 1262
1272 /* we've determined that we can make the mapping, now translate what we 1263 /* we've determined that we can make the mapping, now translate what we
1273 * now know into VMA flags */ 1264 * now know into VMA flags */
@@ -1385,15 +1376,15 @@ unsigned long do_mmap_pgoff(struct file *file,
1385 if (capabilities & BDI_CAP_MAP_DIRECT) { 1376 if (capabilities & BDI_CAP_MAP_DIRECT) {
1386 addr = file->f_op->get_unmapped_area(file, addr, len, 1377 addr = file->f_op->get_unmapped_area(file, addr, len,
1387 pgoff, flags); 1378 pgoff, flags);
1388 if (IS_ERR((void *) addr)) { 1379 if (IS_ERR_VALUE(addr)) {
1389 ret = addr; 1380 ret = addr;
1390 if (ret != (unsigned long) -ENOSYS) 1381 if (ret != -ENOSYS)
1391 goto error_just_free; 1382 goto error_just_free;
1392 1383
1393 /* the driver refused to tell us where to site 1384 /* the driver refused to tell us where to site
1394 * the mapping so we'll have to attempt to copy 1385 * the mapping so we'll have to attempt to copy
1395 * it */ 1386 * it */
1396 ret = (unsigned long) -ENODEV; 1387 ret = -ENODEV;
1397 if (!(capabilities & BDI_CAP_MAP_COPY)) 1388 if (!(capabilities & BDI_CAP_MAP_COPY))
1398 goto error_just_free; 1389 goto error_just_free;
1399 1390
@@ -1468,14 +1459,14 @@ error_getting_vma:
1468 printk(KERN_WARNING "Allocation of vma for %lu byte allocation" 1459 printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1469 " from process %d failed\n", 1460 " from process %d failed\n",
1470 len, current->pid); 1461 len, current->pid);
1471 show_free_areas(); 1462 show_free_areas(0);
1472 return -ENOMEM; 1463 return -ENOMEM;
1473 1464
1474error_getting_region: 1465error_getting_region:
1475 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" 1466 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1476 " from process %d failed\n", 1467 " from process %d failed\n",
1477 len, current->pid); 1468 len, current->pid);
1478 show_free_areas(); 1469 show_free_areas(0);
1479 return -ENOMEM; 1470 return -ENOMEM;
1480} 1471}
1481EXPORT_SYMBOL(do_mmap_pgoff); 1472EXPORT_SYMBOL(do_mmap_pgoff);
@@ -1644,15 +1635,17 @@ static int shrink_vma(struct mm_struct *mm,
1644int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 1635int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1645{ 1636{
1646 struct vm_area_struct *vma; 1637 struct vm_area_struct *vma;
1647 struct rb_node *rb; 1638 unsigned long end;
1648 unsigned long end = start + len;
1649 int ret; 1639 int ret;
1650 1640
1651 kenter(",%lx,%zx", start, len); 1641 kenter(",%lx,%zx", start, len);
1652 1642
1643 len = PAGE_ALIGN(len);
1653 if (len == 0) 1644 if (len == 0)
1654 return -EINVAL; 1645 return -EINVAL;
1655 1646
1647 end = start + len;
1648
1656 /* find the first potentially overlapping VMA */ 1649 /* find the first potentially overlapping VMA */
1657 vma = find_vma(mm, start); 1650 vma = find_vma(mm, start);
1658 if (!vma) { 1651 if (!vma) {
@@ -1677,9 +1670,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1677 } 1670 }
1678 if (end == vma->vm_end) 1671 if (end == vma->vm_end)
1679 goto erase_whole_vma; 1672 goto erase_whole_vma;
1680 rb = rb_next(&vma->vm_rb); 1673 vma = vma->vm_next;
1681 vma = rb_entry(rb, struct vm_area_struct, vm_rb); 1674 } while (vma);
1682 } while (rb);
1683 kleave(" = -EINVAL [split file]"); 1675 kleave(" = -EINVAL [split file]");
1684 return -EINVAL; 1676 return -EINVAL;
1685 } else { 1677 } else {
@@ -1773,6 +1765,8 @@ unsigned long do_mremap(unsigned long addr,
1773 struct vm_area_struct *vma; 1765 struct vm_area_struct *vma;
1774 1766
1775 /* insanity checks first */ 1767 /* insanity checks first */
1768 old_len = PAGE_ALIGN(old_len);
1769 new_len = PAGE_ALIGN(new_len);
1776 if (old_len == 0 || new_len == 0) 1770 if (old_len == 0 || new_len == 0)
1777 return (unsigned long) -EINVAL; 1771 return (unsigned long) -EINVAL;
1778 1772
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f52e85c80e8d..e4b0991ca351 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -38,6 +38,33 @@ int sysctl_oom_kill_allocating_task;
38int sysctl_oom_dump_tasks = 1; 38int sysctl_oom_dump_tasks = 1;
39static DEFINE_SPINLOCK(zone_scan_lock); 39static DEFINE_SPINLOCK(zone_scan_lock);
40 40
41/**
42 * test_set_oom_score_adj() - set current's oom_score_adj and return old value
43 * @new_val: new oom_score_adj value
44 *
45 * Sets the oom_score_adj value for current to @new_val with proper
46 * synchronization and returns the old value. Usually used to temporarily
47 * set a value, save the old value in the caller, and then reinstate it later.
48 */
49int test_set_oom_score_adj(int new_val)
50{
51 struct sighand_struct *sighand = current->sighand;
52 int old_val;
53
54 spin_lock_irq(&sighand->siglock);
55 old_val = current->signal->oom_score_adj;
56 if (new_val != old_val) {
57 if (new_val == OOM_SCORE_ADJ_MIN)
58 atomic_inc(&current->mm->oom_disable_count);
59 else if (old_val == OOM_SCORE_ADJ_MIN)
60 atomic_dec(&current->mm->oom_disable_count);
61 current->signal->oom_score_adj = new_val;
62 }
63 spin_unlock_irq(&sighand->siglock);
64
65 return old_val;
66}
67
41#ifdef CONFIG_NUMA 68#ifdef CONFIG_NUMA
42/** 69/**
43 * has_intersects_mems_allowed() - check task eligiblity for kill 70 * has_intersects_mems_allowed() - check task eligiblity for kill
@@ -155,15 +182,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
155 } 182 }
156 183
157 /* 184 /*
158 * When the PF_OOM_ORIGIN bit is set, it indicates the task should have
159 * priority for oom killing.
160 */
161 if (p->flags & PF_OOM_ORIGIN) {
162 task_unlock(p);
163 return 1000;
164 }
165
166 /*
167 * The memory controller may have a limit of 0 bytes, so avoid a divide 185 * The memory controller may have a limit of 0 bytes, so avoid a divide
168 * by zero, if necessary. 186 * by zero, if necessary.
169 */ 187 */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9d5498e2d0f5..a4e1db3f1981 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -30,6 +30,7 @@
30#include <linux/pagevec.h> 30#include <linux/pagevec.h>
31#include <linux/blkdev.h> 31#include <linux/blkdev.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/ratelimit.h>
33#include <linux/oom.h> 34#include <linux/oom.h>
34#include <linux/notifier.h> 35#include <linux/notifier.h>
35#include <linux/topology.h> 36#include <linux/topology.h>
@@ -39,6 +40,7 @@
39#include <linux/memory_hotplug.h> 40#include <linux/memory_hotplug.h>
40#include <linux/nodemask.h> 41#include <linux/nodemask.h>
41#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/vmstat.h>
42#include <linux/mempolicy.h> 44#include <linux/mempolicy.h>
43#include <linux/stop_machine.h> 45#include <linux/stop_machine.h>
44#include <linux/sort.h> 46#include <linux/sort.h>
@@ -1735,6 +1737,45 @@ static inline bool should_suppress_show_mem(void)
1735 return ret; 1737 return ret;
1736} 1738}
1737 1739
1740static DEFINE_RATELIMIT_STATE(nopage_rs,
1741 DEFAULT_RATELIMIT_INTERVAL,
1742 DEFAULT_RATELIMIT_BURST);
1743
1744void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1745{
1746 va_list args;
1747 unsigned int filter = SHOW_MEM_FILTER_NODES;
1748
1749 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
1750 return;
1751
1752 /*
1753 * This documents exceptions given to allocations in certain
1754 * contexts that are allowed to allocate outside current's set
1755 * of allowed nodes.
1756 */
1757 if (!(gfp_mask & __GFP_NOMEMALLOC))
1758 if (test_thread_flag(TIF_MEMDIE) ||
1759 (current->flags & (PF_MEMALLOC | PF_EXITING)))
1760 filter &= ~SHOW_MEM_FILTER_NODES;
1761 if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
1762 filter &= ~SHOW_MEM_FILTER_NODES;
1763
1764 if (fmt) {
1765 printk(KERN_WARNING);
1766 va_start(args, fmt);
1767 vprintk(fmt, args);
1768 va_end(args);
1769 }
1770
1771 pr_warning("%s: page allocation failure: order:%d, mode:0x%x\n",
1772 current->comm, order, gfp_mask);
1773
1774 dump_stack();
1775 if (!should_suppress_show_mem())
1776 show_mem(filter);
1777}
1778
1738static inline int 1779static inline int
1739should_alloc_retry(gfp_t gfp_mask, unsigned int order, 1780should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1740 unsigned long pages_reclaimed) 1781 unsigned long pages_reclaimed)
@@ -2065,6 +2106,7 @@ restart:
2065 first_zones_zonelist(zonelist, high_zoneidx, NULL, 2106 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2066 &preferred_zone); 2107 &preferred_zone);
2067 2108
2109rebalance:
2068 /* This is the last chance, in general, before the goto nopage. */ 2110 /* This is the last chance, in general, before the goto nopage. */
2069 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 2111 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2070 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 2112 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2072,7 +2114,6 @@ restart:
2072 if (page) 2114 if (page)
2073 goto got_pg; 2115 goto got_pg;
2074 2116
2075rebalance:
2076 /* Allocate without watermarks if the context allows */ 2117 /* Allocate without watermarks if the context allows */
2077 if (alloc_flags & ALLOC_NO_WATERMARKS) { 2118 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2078 page = __alloc_pages_high_priority(gfp_mask, order, 2119 page = __alloc_pages_high_priority(gfp_mask, order,
@@ -2106,7 +2147,7 @@ rebalance:
2106 sync_migration); 2147 sync_migration);
2107 if (page) 2148 if (page)
2108 goto got_pg; 2149 goto got_pg;
2109 sync_migration = !(gfp_mask & __GFP_NO_KSWAPD); 2150 sync_migration = true;
2110 2151
2111 /* Try direct reclaim and then allocating */ 2152 /* Try direct reclaim and then allocating */
2112 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2153 page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2177,27 +2218,7 @@ rebalance:
2177 } 2218 }
2178 2219
2179nopage: 2220nopage:
2180 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 2221 warn_alloc_failed(gfp_mask, order, NULL);
2181 unsigned int filter = SHOW_MEM_FILTER_NODES;
2182
2183 /*
2184 * This documents exceptions given to allocations in certain
2185 * contexts that are allowed to allocate outside current's set
2186 * of allowed nodes.
2187 */
2188 if (!(gfp_mask & __GFP_NOMEMALLOC))
2189 if (test_thread_flag(TIF_MEMDIE) ||
2190 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2191 filter &= ~SHOW_MEM_FILTER_NODES;
2192 if (in_interrupt() || !wait)
2193 filter &= ~SHOW_MEM_FILTER_NODES;
2194
2195 pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n",
2196 current->comm, order, gfp_mask);
2197 dump_stack();
2198 if (!should_suppress_show_mem())
2199 show_mem(filter);
2200 }
2201 return page; 2222 return page;
2202got_pg: 2223got_pg:
2203 if (kmemcheck_enabled) 2224 if (kmemcheck_enabled)
@@ -2226,6 +2247,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2226 2247
2227 if (should_fail_alloc_page(gfp_mask, order)) 2248 if (should_fail_alloc_page(gfp_mask, order))
2228 return NULL; 2249 return NULL;
2250#ifndef CONFIG_ZONE_DMA
2251 if (WARN_ON_ONCE(gfp_mask & __GFP_DMA))
2252 return NULL;
2253#endif
2229 2254
2230 /* 2255 /*
2231 * Check the zones suitable for the gfp_mask contain at least one 2256 * Check the zones suitable for the gfp_mask contain at least one
@@ -2473,10 +2498,10 @@ void si_meminfo_node(struct sysinfo *val, int nid)
2473#endif 2498#endif
2474 2499
2475/* 2500/*
2476 * Determine whether the zone's node should be displayed or not, depending on 2501 * Determine whether the node should be displayed or not, depending on whether
2477 * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas(). 2502 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2478 */ 2503 */
2479static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone) 2504bool skip_free_areas_node(unsigned int flags, int nid)
2480{ 2505{
2481 bool ret = false; 2506 bool ret = false;
2482 2507
@@ -2484,8 +2509,7 @@ static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone)
2484 goto out; 2509 goto out;
2485 2510
2486 get_mems_allowed(); 2511 get_mems_allowed();
2487 ret = !node_isset(zone->zone_pgdat->node_id, 2512 ret = !node_isset(nid, cpuset_current_mems_allowed);
2488 cpuset_current_mems_allowed);
2489 put_mems_allowed(); 2513 put_mems_allowed();
2490out: 2514out:
2491 return ret; 2515 return ret;
@@ -2500,13 +2524,13 @@ out:
2500 * Suppresses nodes that are not allowed by current's cpuset if 2524 * Suppresses nodes that are not allowed by current's cpuset if
2501 * SHOW_MEM_FILTER_NODES is passed. 2525 * SHOW_MEM_FILTER_NODES is passed.
2502 */ 2526 */
2503void __show_free_areas(unsigned int filter) 2527void show_free_areas(unsigned int filter)
2504{ 2528{
2505 int cpu; 2529 int cpu;
2506 struct zone *zone; 2530 struct zone *zone;
2507 2531
2508 for_each_populated_zone(zone) { 2532 for_each_populated_zone(zone) {
2509 if (skip_free_areas_zone(filter, zone)) 2533 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2510 continue; 2534 continue;
2511 show_node(zone); 2535 show_node(zone);
2512 printk("%s per-cpu:\n", zone->name); 2536 printk("%s per-cpu:\n", zone->name);
@@ -2549,7 +2573,7 @@ void __show_free_areas(unsigned int filter)
2549 for_each_populated_zone(zone) { 2573 for_each_populated_zone(zone) {
2550 int i; 2574 int i;
2551 2575
2552 if (skip_free_areas_zone(filter, zone)) 2576 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2553 continue; 2577 continue;
2554 show_node(zone); 2578 show_node(zone);
2555 printk("%s" 2579 printk("%s"
@@ -2618,7 +2642,7 @@ void __show_free_areas(unsigned int filter)
2618 for_each_populated_zone(zone) { 2642 for_each_populated_zone(zone) {
2619 unsigned long nr[MAX_ORDER], flags, order, total = 0; 2643 unsigned long nr[MAX_ORDER], flags, order, total = 0;
2620 2644
2621 if (skip_free_areas_zone(filter, zone)) 2645 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2622 continue; 2646 continue;
2623 show_node(zone); 2647 show_node(zone);
2624 printk("%s: ", zone->name); 2648 printk("%s: ", zone->name);
@@ -2639,11 +2663,6 @@ void __show_free_areas(unsigned int filter)
2639 show_swap_cache_info(); 2663 show_swap_cache_info();
2640} 2664}
2641 2665
2642void show_free_areas(void)
2643{
2644 __show_free_areas(0);
2645}
2646
2647static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 2666static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2648{ 2667{
2649 zoneref->zone = zone; 2668 zoneref->zone = zone;
@@ -3314,6 +3333,20 @@ static inline unsigned long wait_table_bits(unsigned long size)
3314#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 3333#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3315 3334
3316/* 3335/*
3336 * Check if a pageblock contains reserved pages
3337 */
3338static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3339{
3340 unsigned long pfn;
3341
3342 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3343 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3344 return 1;
3345 }
3346 return 0;
3347}
3348
3349/*
3317 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 3350 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3318 * of blocks reserved is based on min_wmark_pages(zone). The memory within 3351 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3319 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes 3352 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
@@ -3322,7 +3355,7 @@ static inline unsigned long wait_table_bits(unsigned long size)
3322 */ 3355 */
3323static void setup_zone_migrate_reserve(struct zone *zone) 3356static void setup_zone_migrate_reserve(struct zone *zone)
3324{ 3357{
3325 unsigned long start_pfn, pfn, end_pfn; 3358 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3326 struct page *page; 3359 struct page *page;
3327 unsigned long block_migratetype; 3360 unsigned long block_migratetype;
3328 int reserve; 3361 int reserve;
@@ -3352,7 +3385,8 @@ static void setup_zone_migrate_reserve(struct zone *zone)
3352 continue; 3385 continue;
3353 3386
3354 /* Blocks with reserved pages will never free, skip them. */ 3387 /* Blocks with reserved pages will never free, skip them. */
3355 if (PageReserved(page)) 3388 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3389 if (pageblock_is_reserved(pfn, block_end_pfn))
3356 continue; 3390 continue;
3357 3391
3358 block_migratetype = get_pageblock_migratetype(page); 3392 block_migratetype = get_pageblock_migratetype(page);
@@ -4289,10 +4323,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4289 zone->zone_pgdat = pgdat; 4323 zone->zone_pgdat = pgdat;
4290 4324
4291 zone_pcp_init(zone); 4325 zone_pcp_init(zone);
4292 for_each_lru(l) { 4326 for_each_lru(l)
4293 INIT_LIST_HEAD(&zone->lru[l].list); 4327 INIT_LIST_HEAD(&zone->lru[l].list);
4294 zone->reclaim_stat.nr_saved_scan[l] = 0;
4295 }
4296 zone->reclaim_stat.recent_rotated[0] = 0; 4328 zone->reclaim_stat.recent_rotated[0] = 0;
4297 zone->reclaim_stat.recent_rotated[1] = 0; 4329 zone->reclaim_stat.recent_rotated[1] = 0;
4298 zone->reclaim_stat.recent_scanned[0] = 0; 4330 zone->reclaim_stat.recent_scanned[0] = 0;
@@ -5100,7 +5132,7 @@ void setup_per_zone_wmarks(void)
5100 * 1TB 101 10GB 5132 * 1TB 101 10GB
5101 * 10TB 320 32GB 5133 * 10TB 320 32GB
5102 */ 5134 */
5103void calculate_zone_inactive_ratio(struct zone *zone) 5135static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5104{ 5136{
5105 unsigned int gb, ratio; 5137 unsigned int gb, ratio;
5106 5138
@@ -5114,7 +5146,7 @@ void calculate_zone_inactive_ratio(struct zone *zone)
5114 zone->inactive_ratio = ratio; 5146 zone->inactive_ratio = ratio;
5115} 5147}
5116 5148
5117static void __init setup_per_zone_inactive_ratio(void) 5149static void __meminit setup_per_zone_inactive_ratio(void)
5118{ 5150{
5119 struct zone *zone; 5151 struct zone *zone;
5120 5152
@@ -5146,7 +5178,7 @@ static void __init setup_per_zone_inactive_ratio(void)
5146 * 8192MB: 11584k 5178 * 8192MB: 11584k
5147 * 16384MB: 16384k 5179 * 16384MB: 16384k
5148 */ 5180 */
5149static int __init init_per_zone_wmark_min(void) 5181int __meminit init_per_zone_wmark_min(void)
5150{ 5182{
5151 unsigned long lowmem_kbytes; 5183 unsigned long lowmem_kbytes;
5152 5184
@@ -5158,6 +5190,7 @@ static int __init init_per_zone_wmark_min(void)
5158 if (min_free_kbytes > 65536) 5190 if (min_free_kbytes > 65536)
5159 min_free_kbytes = 65536; 5191 min_free_kbytes = 65536;
5160 setup_per_zone_wmarks(); 5192 setup_per_zone_wmarks();
5193 refresh_zone_stat_thresholds();
5161 setup_per_zone_lowmem_reserve(); 5194 setup_per_zone_lowmem_reserve();
5162 setup_per_zone_inactive_ratio(); 5195 setup_per_zone_inactive_ratio();
5163 return 0; 5196 return 0;
@@ -5508,10 +5541,8 @@ int set_migratetype_isolate(struct page *page)
5508 struct memory_isolate_notify arg; 5541 struct memory_isolate_notify arg;
5509 int notifier_ret; 5542 int notifier_ret;
5510 int ret = -EBUSY; 5543 int ret = -EBUSY;
5511 int zone_idx;
5512 5544
5513 zone = page_zone(page); 5545 zone = page_zone(page);
5514 zone_idx = zone_idx(zone);
5515 5546
5516 spin_lock_irqsave(&zone->lock, flags); 5547 spin_lock_irqsave(&zone->lock, flags);
5517 5548
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 2daadc322ba6..74ccff61d1be 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -130,7 +130,7 @@ struct page *lookup_cgroup_page(struct page_cgroup *pc)
130 return page; 130 return page;
131} 131}
132 132
133static void *__init_refok alloc_page_cgroup(size_t size, int nid) 133static void *__meminit alloc_page_cgroup(size_t size, int nid)
134{ 134{
135 void *addr = NULL; 135 void *addr = NULL;
136 136
@@ -162,7 +162,7 @@ static void free_page_cgroup(void *addr)
162} 162}
163#endif 163#endif
164 164
165static int __init_refok init_section_page_cgroup(unsigned long pfn) 165static int __meminit init_section_page_cgroup(unsigned long pfn)
166{ 166{
167 struct page_cgroup *base, *pc; 167 struct page_cgroup *base, *pc;
168 struct mem_section *section; 168 struct mem_section *section;
@@ -475,7 +475,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
475 if (!do_swap_account) 475 if (!do_swap_account)
476 return 0; 476 return 0;
477 477
478 length = ((max_pages/SC_PER_PAGE) + 1); 478 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
479 array_size = length * sizeof(void *); 479 array_size = length * sizeof(void *);
480 480
481 array = vmalloc(array_size); 481 array = vmalloc(array_size);
@@ -492,8 +492,8 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
492 /* memory shortage */ 492 /* memory shortage */
493 ctrl->map = NULL; 493 ctrl->map = NULL;
494 ctrl->length = 0; 494 ctrl->length = 0;
495 vfree(array);
496 mutex_unlock(&swap_cgroup_mutex); 495 mutex_unlock(&swap_cgroup_mutex);
496 vfree(array);
497 goto nomem; 497 goto nomem;
498 } 498 }
499 mutex_unlock(&swap_cgroup_mutex); 499 mutex_unlock(&swap_cgroup_mutex);
@@ -508,7 +508,8 @@ nomem:
508 508
509void swap_cgroup_swapoff(int type) 509void swap_cgroup_swapoff(int type)
510{ 510{
511 int i; 511 struct page **map;
512 unsigned long i, length;
512 struct swap_cgroup_ctrl *ctrl; 513 struct swap_cgroup_ctrl *ctrl;
513 514
514 if (!do_swap_account) 515 if (!do_swap_account)
@@ -516,17 +517,20 @@ void swap_cgroup_swapoff(int type)
516 517
517 mutex_lock(&swap_cgroup_mutex); 518 mutex_lock(&swap_cgroup_mutex);
518 ctrl = &swap_cgroup_ctrl[type]; 519 ctrl = &swap_cgroup_ctrl[type];
519 if (ctrl->map) { 520 map = ctrl->map;
520 for (i = 0; i < ctrl->length; i++) { 521 length = ctrl->length;
521 struct page *page = ctrl->map[i]; 522 ctrl->map = NULL;
523 ctrl->length = 0;
524 mutex_unlock(&swap_cgroup_mutex);
525
526 if (map) {
527 for (i = 0; i < length; i++) {
528 struct page *page = map[i];
522 if (page) 529 if (page)
523 __free_page(page); 530 __free_page(page);
524 } 531 }
525 vfree(ctrl->map); 532 vfree(map);
526 ctrl->map = NULL;
527 ctrl->length = 0;
528 } 533 }
529 mutex_unlock(&swap_cgroup_mutex);
530} 534}
531 535
532#endif 536#endif
diff --git a/mm/readahead.c b/mm/readahead.c
index 2c0cc489e288..867f9dd82dcd 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -180,7 +180,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
180 if (page) 180 if (page)
181 continue; 181 continue;
182 182
183 page = page_cache_alloc_cold(mapping); 183 page = page_cache_alloc_readahead(mapping);
184 if (!page) 184 if (!page)
185 break; 185 break;
186 page->index = page_offset; 186 page->index = page_offset;
diff --git a/mm/rmap.c b/mm/rmap.c
index 522e4a93cadd..3a39b518a653 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -24,8 +24,8 @@
24 * inode->i_alloc_sem (vmtruncate_range) 24 * inode->i_alloc_sem (vmtruncate_range)
25 * mm->mmap_sem 25 * mm->mmap_sem
26 * page->flags PG_locked (lock_page) 26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock 27 * mapping->i_mmap_mutex
28 * anon_vma->lock 28 * anon_vma->mutex
29 * mm->page_table_lock or pte_lock 29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get) 31 * swap_lock (in swap_duplicate, swap_info_get)
@@ -40,7 +40,7 @@
40 * 40 *
41 * (code doesn't rely on that order so it could be switched around) 41 * (code doesn't rely on that order so it could be switched around)
42 * ->tasklist_lock 42 * ->tasklist_lock
43 * anon_vma->lock (memory_failure, collect_procs_anon) 43 * anon_vma->mutex (memory_failure, collect_procs_anon)
44 * pte map lock 44 * pte map lock
45 */ 45 */
46 46
@@ -86,6 +86,29 @@ static inline struct anon_vma *anon_vma_alloc(void)
86static inline void anon_vma_free(struct anon_vma *anon_vma) 86static inline void anon_vma_free(struct anon_vma *anon_vma)
87{ 87{
88 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 88 VM_BUG_ON(atomic_read(&anon_vma->refcount));
89
90 /*
91 * Synchronize against page_lock_anon_vma() such that
92 * we can safely hold the lock without the anon_vma getting
93 * freed.
94 *
95 * Relies on the full mb implied by the atomic_dec_and_test() from
96 * put_anon_vma() against the acquire barrier implied by
97 * mutex_trylock() from page_lock_anon_vma(). This orders:
98 *
99 * page_lock_anon_vma() VS put_anon_vma()
100 * mutex_trylock() atomic_dec_and_test()
101 * LOCK MB
102 * atomic_read() mutex_is_locked()
103 *
104 * LOCK should suffice since the actual taking of the lock must
105 * happen _before_ what follows.
106 */
107 if (mutex_is_locked(&anon_vma->root->mutex)) {
108 anon_vma_lock(anon_vma);
109 anon_vma_unlock(anon_vma);
110 }
111
89 kmem_cache_free(anon_vma_cachep, anon_vma); 112 kmem_cache_free(anon_vma_cachep, anon_vma);
90} 113}
91 114
@@ -307,7 +330,7 @@ static void anon_vma_ctor(void *data)
307{ 330{
308 struct anon_vma *anon_vma = data; 331 struct anon_vma *anon_vma = data;
309 332
310 spin_lock_init(&anon_vma->lock); 333 mutex_init(&anon_vma->mutex);
311 atomic_set(&anon_vma->refcount, 0); 334 atomic_set(&anon_vma->refcount, 0);
312 INIT_LIST_HEAD(&anon_vma->head); 335 INIT_LIST_HEAD(&anon_vma->head);
313} 336}
@@ -320,12 +343,26 @@ void __init anon_vma_init(void)
320} 343}
321 344
322/* 345/*
323 * Getting a lock on a stable anon_vma from a page off the LRU is 346 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
324 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 347 *
348 * Since there is no serialization what so ever against page_remove_rmap()
349 * the best this function can do is return a locked anon_vma that might
350 * have been relevant to this page.
351 *
352 * The page might have been remapped to a different anon_vma or the anon_vma
353 * returned may already be freed (and even reused).
354 *
355 * All users of this function must be very careful when walking the anon_vma
356 * chain and verify that the page in question is indeed mapped in it
357 * [ something equivalent to page_mapped_in_vma() ].
358 *
359 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
360 * that the anon_vma pointer from page->mapping is valid if there is a
361 * mapcount, we can dereference the anon_vma after observing those.
325 */ 362 */
326struct anon_vma *__page_lock_anon_vma(struct page *page) 363struct anon_vma *page_get_anon_vma(struct page *page)
327{ 364{
328 struct anon_vma *anon_vma, *root_anon_vma; 365 struct anon_vma *anon_vma = NULL;
329 unsigned long anon_mapping; 366 unsigned long anon_mapping;
330 367
331 rcu_read_lock(); 368 rcu_read_lock();
@@ -336,32 +373,97 @@ struct anon_vma *__page_lock_anon_vma(struct page *page)
336 goto out; 373 goto out;
337 374
338 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 375 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
339 root_anon_vma = ACCESS_ONCE(anon_vma->root); 376 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
340 spin_lock(&root_anon_vma->lock); 377 anon_vma = NULL;
378 goto out;
379 }
341 380
342 /* 381 /*
343 * If this page is still mapped, then its anon_vma cannot have been 382 * If this page is still mapped, then its anon_vma cannot have been
344 * freed. But if it has been unmapped, we have no security against 383 * freed. But if it has been unmapped, we have no security against the
345 * the anon_vma structure being freed and reused (for another anon_vma: 384 * anon_vma structure being freed and reused (for another anon_vma:
346 * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot 385 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
347 * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting 386 * above cannot corrupt).
348 * anon_vma->root before page_unlock_anon_vma() is called to unlock.
349 */ 387 */
350 if (page_mapped(page)) 388 if (!page_mapped(page)) {
351 return anon_vma; 389 put_anon_vma(anon_vma);
390 anon_vma = NULL;
391 }
392out:
393 rcu_read_unlock();
394
395 return anon_vma;
396}
397
398/*
399 * Similar to page_get_anon_vma() except it locks the anon_vma.
400 *
401 * Its a little more complex as it tries to keep the fast path to a single
402 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
403 * reference like with page_get_anon_vma() and then block on the mutex.
404 */
405struct anon_vma *page_lock_anon_vma(struct page *page)
406{
407 struct anon_vma *anon_vma = NULL;
408 unsigned long anon_mapping;
409
410 rcu_read_lock();
411 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
412 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
413 goto out;
414 if (!page_mapped(page))
415 goto out;
416
417 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
418 if (mutex_trylock(&anon_vma->root->mutex)) {
419 /*
420 * If we observe a !0 refcount, then holding the lock ensures
421 * the anon_vma will not go away, see __put_anon_vma().
422 */
423 if (!atomic_read(&anon_vma->refcount)) {
424 anon_vma_unlock(anon_vma);
425 anon_vma = NULL;
426 }
427 goto out;
428 }
429
430 /* trylock failed, we got to sleep */
431 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
432 anon_vma = NULL;
433 goto out;
434 }
435
436 if (!page_mapped(page)) {
437 put_anon_vma(anon_vma);
438 anon_vma = NULL;
439 goto out;
440 }
441
442 /* we pinned the anon_vma, its safe to sleep */
443 rcu_read_unlock();
444 anon_vma_lock(anon_vma);
445
446 if (atomic_dec_and_test(&anon_vma->refcount)) {
447 /*
448 * Oops, we held the last refcount, release the lock
449 * and bail -- can't simply use put_anon_vma() because
450 * we'll deadlock on the anon_vma_lock() recursion.
451 */
452 anon_vma_unlock(anon_vma);
453 __put_anon_vma(anon_vma);
454 anon_vma = NULL;
455 }
456
457 return anon_vma;
352 458
353 spin_unlock(&root_anon_vma->lock);
354out: 459out:
355 rcu_read_unlock(); 460 rcu_read_unlock();
356 return NULL; 461 return anon_vma;
357} 462}
358 463
359void page_unlock_anon_vma(struct anon_vma *anon_vma) 464void page_unlock_anon_vma(struct anon_vma *anon_vma)
360 __releases(&anon_vma->root->lock)
361 __releases(RCU)
362{ 465{
363 anon_vma_unlock(anon_vma); 466 anon_vma_unlock(anon_vma);
364 rcu_read_unlock();
365} 467}
366 468
367/* 469/*
@@ -646,14 +748,14 @@ static int page_referenced_file(struct page *page,
646 * The page lock not only makes sure that page->mapping cannot 748 * The page lock not only makes sure that page->mapping cannot
647 * suddenly be NULLified by truncation, it makes sure that the 749 * suddenly be NULLified by truncation, it makes sure that the
648 * structure at mapping cannot be freed and reused yet, 750 * structure at mapping cannot be freed and reused yet,
649 * so we can safely take mapping->i_mmap_lock. 751 * so we can safely take mapping->i_mmap_mutex.
650 */ 752 */
651 BUG_ON(!PageLocked(page)); 753 BUG_ON(!PageLocked(page));
652 754
653 spin_lock(&mapping->i_mmap_lock); 755 mutex_lock(&mapping->i_mmap_mutex);
654 756
655 /* 757 /*
656 * i_mmap_lock does not stabilize mapcount at all, but mapcount 758 * i_mmap_mutex does not stabilize mapcount at all, but mapcount
657 * is more likely to be accurate if we note it after spinning. 759 * is more likely to be accurate if we note it after spinning.
658 */ 760 */
659 mapcount = page_mapcount(page); 761 mapcount = page_mapcount(page);
@@ -675,7 +777,7 @@ static int page_referenced_file(struct page *page,
675 break; 777 break;
676 } 778 }
677 779
678 spin_unlock(&mapping->i_mmap_lock); 780 mutex_unlock(&mapping->i_mmap_mutex);
679 return referenced; 781 return referenced;
680} 782}
681 783
@@ -762,7 +864,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
762 864
763 BUG_ON(PageAnon(page)); 865 BUG_ON(PageAnon(page));
764 866
765 spin_lock(&mapping->i_mmap_lock); 867 mutex_lock(&mapping->i_mmap_mutex);
766 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 868 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
767 if (vma->vm_flags & VM_SHARED) { 869 if (vma->vm_flags & VM_SHARED) {
768 unsigned long address = vma_address(page, vma); 870 unsigned long address = vma_address(page, vma);
@@ -771,7 +873,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
771 ret += page_mkclean_one(page, vma, address); 873 ret += page_mkclean_one(page, vma, address);
772 } 874 }
773 } 875 }
774 spin_unlock(&mapping->i_mmap_lock); 876 mutex_unlock(&mapping->i_mmap_mutex);
775 return ret; 877 return ret;
776} 878}
777 879
@@ -1119,7 +1221,7 @@ out_mlock:
1119 /* 1221 /*
1120 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1222 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1121 * unstable result and race. Plus, We can't wait here because 1223 * unstable result and race. Plus, We can't wait here because
1122 * we now hold anon_vma->lock or mapping->i_mmap_lock. 1224 * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
1123 * if trylock failed, the page remain in evictable lru and later 1225 * if trylock failed, the page remain in evictable lru and later
1124 * vmscan could retry to move the page to unevictable lru if the 1226 * vmscan could retry to move the page to unevictable lru if the
1125 * page is actually mlocked. 1227 * page is actually mlocked.
@@ -1345,7 +1447,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1345 unsigned long max_nl_size = 0; 1447 unsigned long max_nl_size = 0;
1346 unsigned int mapcount; 1448 unsigned int mapcount;
1347 1449
1348 spin_lock(&mapping->i_mmap_lock); 1450 mutex_lock(&mapping->i_mmap_mutex);
1349 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1451 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1350 unsigned long address = vma_address(page, vma); 1452 unsigned long address = vma_address(page, vma);
1351 if (address == -EFAULT) 1453 if (address == -EFAULT)
@@ -1391,7 +1493,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1391 mapcount = page_mapcount(page); 1493 mapcount = page_mapcount(page);
1392 if (!mapcount) 1494 if (!mapcount)
1393 goto out; 1495 goto out;
1394 cond_resched_lock(&mapping->i_mmap_lock); 1496 cond_resched();
1395 1497
1396 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1498 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1397 if (max_nl_cursor == 0) 1499 if (max_nl_cursor == 0)
@@ -1413,7 +1515,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1413 } 1515 }
1414 vma->vm_private_data = (void *) max_nl_cursor; 1516 vma->vm_private_data = (void *) max_nl_cursor;
1415 } 1517 }
1416 cond_resched_lock(&mapping->i_mmap_lock); 1518 cond_resched();
1417 max_nl_cursor += CLUSTER_SIZE; 1519 max_nl_cursor += CLUSTER_SIZE;
1418 } while (max_nl_cursor <= max_nl_size); 1520 } while (max_nl_cursor <= max_nl_size);
1419 1521
@@ -1425,7 +1527,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1425 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1527 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1426 vma->vm_private_data = NULL; 1528 vma->vm_private_data = NULL;
1427out: 1529out:
1428 spin_unlock(&mapping->i_mmap_lock); 1530 mutex_unlock(&mapping->i_mmap_mutex);
1429 return ret; 1531 return ret;
1430} 1532}
1431 1533
@@ -1544,7 +1646,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1544 1646
1545 if (!mapping) 1647 if (!mapping)
1546 return ret; 1648 return ret;
1547 spin_lock(&mapping->i_mmap_lock); 1649 mutex_lock(&mapping->i_mmap_mutex);
1548 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1650 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1549 unsigned long address = vma_address(page, vma); 1651 unsigned long address = vma_address(page, vma);
1550 if (address == -EFAULT) 1652 if (address == -EFAULT)
@@ -1558,7 +1660,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1558 * never contain migration ptes. Decide what to do about this 1660 * never contain migration ptes. Decide what to do about this
1559 * limitation to linear when we need rmap_walk() on nonlinear. 1661 * limitation to linear when we need rmap_walk() on nonlinear.
1560 */ 1662 */
1561 spin_unlock(&mapping->i_mmap_lock); 1663 mutex_unlock(&mapping->i_mmap_mutex);
1562 return ret; 1664 return ret;
1563} 1665}
1564 1666
diff --git a/mm/shmem.c b/mm/shmem.c
index ba4ad28b7db6..1acfb2687bfa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -99,6 +99,13 @@ static struct vfsmount *shm_mnt;
99/* Pretend that each entry is of this size in directory's i_size */ 99/* Pretend that each entry is of this size in directory's i_size */
100#define BOGO_DIRENT_SIZE 20 100#define BOGO_DIRENT_SIZE 20
101 101
102struct shmem_xattr {
103 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
104 char *name; /* xattr name */
105 size_t size;
106 char value[0];
107};
108
102/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 109/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
103enum sgp_type { 110enum sgp_type {
104 SGP_READ, /* don't exceed i_size, don't allocate page */ 111 SGP_READ, /* don't exceed i_size, don't allocate page */
@@ -822,6 +829,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
822static void shmem_evict_inode(struct inode *inode) 829static void shmem_evict_inode(struct inode *inode)
823{ 830{
824 struct shmem_inode_info *info = SHMEM_I(inode); 831 struct shmem_inode_info *info = SHMEM_I(inode);
832 struct shmem_xattr *xattr, *nxattr;
825 833
826 if (inode->i_mapping->a_ops == &shmem_aops) { 834 if (inode->i_mapping->a_ops == &shmem_aops) {
827 truncate_inode_pages(inode->i_mapping, 0); 835 truncate_inode_pages(inode->i_mapping, 0);
@@ -834,6 +842,11 @@ static void shmem_evict_inode(struct inode *inode)
834 mutex_unlock(&shmem_swaplist_mutex); 842 mutex_unlock(&shmem_swaplist_mutex);
835 } 843 }
836 } 844 }
845
846 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
847 kfree(xattr->name);
848 kfree(xattr);
849 }
837 BUG_ON(inode->i_blocks); 850 BUG_ON(inode->i_blocks);
838 shmem_free_inode(inode->i_sb); 851 shmem_free_inode(inode->i_sb);
839 end_writeback(inode); 852 end_writeback(inode);
@@ -1292,12 +1305,10 @@ repeat:
1292 swappage = lookup_swap_cache(swap); 1305 swappage = lookup_swap_cache(swap);
1293 if (!swappage) { 1306 if (!swappage) {
1294 shmem_swp_unmap(entry); 1307 shmem_swp_unmap(entry);
1308 spin_unlock(&info->lock);
1295 /* here we actually do the io */ 1309 /* here we actually do the io */
1296 if (type && !(*type & VM_FAULT_MAJOR)) { 1310 if (type)
1297 __count_vm_event(PGMAJFAULT);
1298 *type |= VM_FAULT_MAJOR; 1311 *type |= VM_FAULT_MAJOR;
1299 }
1300 spin_unlock(&info->lock);
1301 swappage = shmem_swapin(swap, gfp, info, idx); 1312 swappage = shmem_swapin(swap, gfp, info, idx);
1302 if (!swappage) { 1313 if (!swappage) {
1303 spin_lock(&info->lock); 1314 spin_lock(&info->lock);
@@ -1536,7 +1547,10 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1536 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1547 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1537 if (error) 1548 if (error)
1538 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1549 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1539 1550 if (ret & VM_FAULT_MAJOR) {
1551 count_vm_event(PGMAJFAULT);
1552 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1553 }
1540 return ret | VM_FAULT_LOCKED; 1554 return ret | VM_FAULT_LOCKED;
1541} 1555}
1542 1556
@@ -1615,6 +1629,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
1615 spin_lock_init(&info->lock); 1629 spin_lock_init(&info->lock);
1616 info->flags = flags & VM_NORESERVE; 1630 info->flags = flags & VM_NORESERVE;
1617 INIT_LIST_HEAD(&info->swaplist); 1631 INIT_LIST_HEAD(&info->swaplist);
1632 INIT_LIST_HEAD(&info->xattr_list);
1618 cache_no_acl(inode); 1633 cache_no_acl(inode);
1619 1634
1620 switch (mode & S_IFMT) { 1635 switch (mode & S_IFMT) {
@@ -2014,9 +2029,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2014 2029
2015 info = SHMEM_I(inode); 2030 info = SHMEM_I(inode);
2016 inode->i_size = len-1; 2031 inode->i_size = len-1;
2017 if (len <= (char *)inode - (char *)info) { 2032 if (len <= SHMEM_SYMLINK_INLINE_LEN) {
2018 /* do it inline */ 2033 /* do it inline */
2019 memcpy(info, symname, len); 2034 memcpy(info->inline_symlink, symname, len);
2020 inode->i_op = &shmem_symlink_inline_operations; 2035 inode->i_op = &shmem_symlink_inline_operations;
2021 } else { 2036 } else {
2022 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2037 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
@@ -2042,7 +2057,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2042 2057
2043static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 2058static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
2044{ 2059{
2045 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 2060 nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
2046 return NULL; 2061 return NULL;
2047} 2062}
2048 2063
@@ -2066,63 +2081,253 @@ static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *co
2066 } 2081 }
2067} 2082}
2068 2083
2069static const struct inode_operations shmem_symlink_inline_operations = { 2084#ifdef CONFIG_TMPFS_XATTR
2070 .readlink = generic_readlink,
2071 .follow_link = shmem_follow_link_inline,
2072};
2073
2074static const struct inode_operations shmem_symlink_inode_operations = {
2075 .readlink = generic_readlink,
2076 .follow_link = shmem_follow_link,
2077 .put_link = shmem_put_link,
2078};
2079
2080#ifdef CONFIG_TMPFS_POSIX_ACL
2081/* 2085/*
2082 * Superblocks without xattr inode operations will get security.* xattr 2086 * Superblocks without xattr inode operations may get some security.* xattr
2083 * support from the VFS "for free". As soon as we have any other xattrs 2087 * support from the LSM "for free". As soon as we have any other xattrs
2084 * like ACLs, we also need to implement the security.* handlers at 2088 * like ACLs, we also need to implement the security.* handlers at
2085 * filesystem level, though. 2089 * filesystem level, though.
2086 */ 2090 */
2087 2091
2088static size_t shmem_xattr_security_list(struct dentry *dentry, char *list, 2092static int shmem_xattr_get(struct dentry *dentry, const char *name,
2089 size_t list_len, const char *name, 2093 void *buffer, size_t size)
2090 size_t name_len, int handler_flags)
2091{ 2094{
2092 return security_inode_listsecurity(dentry->d_inode, list, list_len); 2095 struct shmem_inode_info *info;
2093} 2096 struct shmem_xattr *xattr;
2097 int ret = -ENODATA;
2094 2098
2095static int shmem_xattr_security_get(struct dentry *dentry, const char *name, 2099 info = SHMEM_I(dentry->d_inode);
2096 void *buffer, size_t size, int handler_flags) 2100
2097{ 2101 spin_lock(&info->lock);
2098 if (strcmp(name, "") == 0) 2102 list_for_each_entry(xattr, &info->xattr_list, list) {
2099 return -EINVAL; 2103 if (strcmp(name, xattr->name))
2100 return xattr_getsecurity(dentry->d_inode, name, buffer, size); 2104 continue;
2105
2106 ret = xattr->size;
2107 if (buffer) {
2108 if (size < xattr->size)
2109 ret = -ERANGE;
2110 else
2111 memcpy(buffer, xattr->value, xattr->size);
2112 }
2113 break;
2114 }
2115 spin_unlock(&info->lock);
2116 return ret;
2101} 2117}
2102 2118
2103static int shmem_xattr_security_set(struct dentry *dentry, const char *name, 2119static int shmem_xattr_set(struct dentry *dentry, const char *name,
2104 const void *value, size_t size, int flags, int handler_flags) 2120 const void *value, size_t size, int flags)
2105{ 2121{
2106 if (strcmp(name, "") == 0) 2122 struct inode *inode = dentry->d_inode;
2107 return -EINVAL; 2123 struct shmem_inode_info *info = SHMEM_I(inode);
2108 return security_inode_setsecurity(dentry->d_inode, name, value, 2124 struct shmem_xattr *xattr;
2109 size, flags); 2125 struct shmem_xattr *new_xattr = NULL;
2126 size_t len;
2127 int err = 0;
2128
2129 /* value == NULL means remove */
2130 if (value) {
2131 /* wrap around? */
2132 len = sizeof(*new_xattr) + size;
2133 if (len <= sizeof(*new_xattr))
2134 return -ENOMEM;
2135
2136 new_xattr = kmalloc(len, GFP_KERNEL);
2137 if (!new_xattr)
2138 return -ENOMEM;
2139
2140 new_xattr->name = kstrdup(name, GFP_KERNEL);
2141 if (!new_xattr->name) {
2142 kfree(new_xattr);
2143 return -ENOMEM;
2144 }
2145
2146 new_xattr->size = size;
2147 memcpy(new_xattr->value, value, size);
2148 }
2149
2150 spin_lock(&info->lock);
2151 list_for_each_entry(xattr, &info->xattr_list, list) {
2152 if (!strcmp(name, xattr->name)) {
2153 if (flags & XATTR_CREATE) {
2154 xattr = new_xattr;
2155 err = -EEXIST;
2156 } else if (new_xattr) {
2157 list_replace(&xattr->list, &new_xattr->list);
2158 } else {
2159 list_del(&xattr->list);
2160 }
2161 goto out;
2162 }
2163 }
2164 if (flags & XATTR_REPLACE) {
2165 xattr = new_xattr;
2166 err = -ENODATA;
2167 } else {
2168 list_add(&new_xattr->list, &info->xattr_list);
2169 xattr = NULL;
2170 }
2171out:
2172 spin_unlock(&info->lock);
2173 if (xattr)
2174 kfree(xattr->name);
2175 kfree(xattr);
2176 return err;
2110} 2177}
2111 2178
2112static const struct xattr_handler shmem_xattr_security_handler = {
2113 .prefix = XATTR_SECURITY_PREFIX,
2114 .list = shmem_xattr_security_list,
2115 .get = shmem_xattr_security_get,
2116 .set = shmem_xattr_security_set,
2117};
2118 2179
2119static const struct xattr_handler *shmem_xattr_handlers[] = { 2180static const struct xattr_handler *shmem_xattr_handlers[] = {
2181#ifdef CONFIG_TMPFS_POSIX_ACL
2120 &generic_acl_access_handler, 2182 &generic_acl_access_handler,
2121 &generic_acl_default_handler, 2183 &generic_acl_default_handler,
2122 &shmem_xattr_security_handler, 2184#endif
2123 NULL 2185 NULL
2124}; 2186};
2187
2188static int shmem_xattr_validate(const char *name)
2189{
2190 struct { const char *prefix; size_t len; } arr[] = {
2191 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2192 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2193 };
2194 int i;
2195
2196 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2197 size_t preflen = arr[i].len;
2198 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2199 if (!name[preflen])
2200 return -EINVAL;
2201 return 0;
2202 }
2203 }
2204 return -EOPNOTSUPP;
2205}
2206
2207static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2208 void *buffer, size_t size)
2209{
2210 int err;
2211
2212 /*
2213 * If this is a request for a synthetic attribute in the system.*
2214 * namespace use the generic infrastructure to resolve a handler
2215 * for it via sb->s_xattr.
2216 */
2217 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2218 return generic_getxattr(dentry, name, buffer, size);
2219
2220 err = shmem_xattr_validate(name);
2221 if (err)
2222 return err;
2223
2224 return shmem_xattr_get(dentry, name, buffer, size);
2225}
2226
2227static int shmem_setxattr(struct dentry *dentry, const char *name,
2228 const void *value, size_t size, int flags)
2229{
2230 int err;
2231
2232 /*
2233 * If this is a request for a synthetic attribute in the system.*
2234 * namespace use the generic infrastructure to resolve a handler
2235 * for it via sb->s_xattr.
2236 */
2237 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2238 return generic_setxattr(dentry, name, value, size, flags);
2239
2240 err = shmem_xattr_validate(name);
2241 if (err)
2242 return err;
2243
2244 if (size == 0)
2245 value = ""; /* empty EA, do not remove */
2246
2247 return shmem_xattr_set(dentry, name, value, size, flags);
2248
2249}
2250
2251static int shmem_removexattr(struct dentry *dentry, const char *name)
2252{
2253 int err;
2254
2255 /*
2256 * If this is a request for a synthetic attribute in the system.*
2257 * namespace use the generic infrastructure to resolve a handler
2258 * for it via sb->s_xattr.
2259 */
2260 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2261 return generic_removexattr(dentry, name);
2262
2263 err = shmem_xattr_validate(name);
2264 if (err)
2265 return err;
2266
2267 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
2268}
2269
2270static bool xattr_is_trusted(const char *name)
2271{
2272 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2273}
2274
2275static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2276{
2277 bool trusted = capable(CAP_SYS_ADMIN);
2278 struct shmem_xattr *xattr;
2279 struct shmem_inode_info *info;
2280 size_t used = 0;
2281
2282 info = SHMEM_I(dentry->d_inode);
2283
2284 spin_lock(&info->lock);
2285 list_for_each_entry(xattr, &info->xattr_list, list) {
2286 size_t len;
2287
2288 /* skip "trusted." attributes for unprivileged callers */
2289 if (!trusted && xattr_is_trusted(xattr->name))
2290 continue;
2291
2292 len = strlen(xattr->name) + 1;
2293 used += len;
2294 if (buffer) {
2295 if (size < used) {
2296 used = -ERANGE;
2297 break;
2298 }
2299 memcpy(buffer, xattr->name, len);
2300 buffer += len;
2301 }
2302 }
2303 spin_unlock(&info->lock);
2304
2305 return used;
2306}
2307#endif /* CONFIG_TMPFS_XATTR */
2308
2309static const struct inode_operations shmem_symlink_inline_operations = {
2310 .readlink = generic_readlink,
2311 .follow_link = shmem_follow_link_inline,
2312#ifdef CONFIG_TMPFS_XATTR
2313 .setxattr = shmem_setxattr,
2314 .getxattr = shmem_getxattr,
2315 .listxattr = shmem_listxattr,
2316 .removexattr = shmem_removexattr,
2125#endif 2317#endif
2318};
2319
2320static const struct inode_operations shmem_symlink_inode_operations = {
2321 .readlink = generic_readlink,
2322 .follow_link = shmem_follow_link,
2323 .put_link = shmem_put_link,
2324#ifdef CONFIG_TMPFS_XATTR
2325 .setxattr = shmem_setxattr,
2326 .getxattr = shmem_getxattr,
2327 .listxattr = shmem_listxattr,
2328 .removexattr = shmem_removexattr,
2329#endif
2330};
2126 2331
2127static struct dentry *shmem_get_parent(struct dentry *child) 2332static struct dentry *shmem_get_parent(struct dentry *child)
2128{ 2333{
@@ -2402,8 +2607,10 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
2402 sb->s_magic = TMPFS_MAGIC; 2607 sb->s_magic = TMPFS_MAGIC;
2403 sb->s_op = &shmem_ops; 2608 sb->s_op = &shmem_ops;
2404 sb->s_time_gran = 1; 2609 sb->s_time_gran = 1;
2405#ifdef CONFIG_TMPFS_POSIX_ACL 2610#ifdef CONFIG_TMPFS_XATTR
2406 sb->s_xattr = shmem_xattr_handlers; 2611 sb->s_xattr = shmem_xattr_handlers;
2612#endif
2613#ifdef CONFIG_TMPFS_POSIX_ACL
2407 sb->s_flags |= MS_POSIXACL; 2614 sb->s_flags |= MS_POSIXACL;
2408#endif 2615#endif
2409 2616
@@ -2501,11 +2708,13 @@ static const struct file_operations shmem_file_operations = {
2501static const struct inode_operations shmem_inode_operations = { 2708static const struct inode_operations shmem_inode_operations = {
2502 .setattr = shmem_notify_change, 2709 .setattr = shmem_notify_change,
2503 .truncate_range = shmem_truncate_range, 2710 .truncate_range = shmem_truncate_range,
2711#ifdef CONFIG_TMPFS_XATTR
2712 .setxattr = shmem_setxattr,
2713 .getxattr = shmem_getxattr,
2714 .listxattr = shmem_listxattr,
2715 .removexattr = shmem_removexattr,
2716#endif
2504#ifdef CONFIG_TMPFS_POSIX_ACL 2717#ifdef CONFIG_TMPFS_POSIX_ACL
2505 .setxattr = generic_setxattr,
2506 .getxattr = generic_getxattr,
2507 .listxattr = generic_listxattr,
2508 .removexattr = generic_removexattr,
2509 .check_acl = generic_check_acl, 2718 .check_acl = generic_check_acl,
2510#endif 2719#endif
2511 2720
@@ -2523,23 +2732,27 @@ static const struct inode_operations shmem_dir_inode_operations = {
2523 .mknod = shmem_mknod, 2732 .mknod = shmem_mknod,
2524 .rename = shmem_rename, 2733 .rename = shmem_rename,
2525#endif 2734#endif
2735#ifdef CONFIG_TMPFS_XATTR
2736 .setxattr = shmem_setxattr,
2737 .getxattr = shmem_getxattr,
2738 .listxattr = shmem_listxattr,
2739 .removexattr = shmem_removexattr,
2740#endif
2526#ifdef CONFIG_TMPFS_POSIX_ACL 2741#ifdef CONFIG_TMPFS_POSIX_ACL
2527 .setattr = shmem_notify_change, 2742 .setattr = shmem_notify_change,
2528 .setxattr = generic_setxattr,
2529 .getxattr = generic_getxattr,
2530 .listxattr = generic_listxattr,
2531 .removexattr = generic_removexattr,
2532 .check_acl = generic_check_acl, 2743 .check_acl = generic_check_acl,
2533#endif 2744#endif
2534}; 2745};
2535 2746
2536static const struct inode_operations shmem_special_inode_operations = { 2747static const struct inode_operations shmem_special_inode_operations = {
2748#ifdef CONFIG_TMPFS_XATTR
2749 .setxattr = shmem_setxattr,
2750 .getxattr = shmem_getxattr,
2751 .listxattr = shmem_listxattr,
2752 .removexattr = shmem_removexattr,
2753#endif
2537#ifdef CONFIG_TMPFS_POSIX_ACL 2754#ifdef CONFIG_TMPFS_POSIX_ACL
2538 .setattr = shmem_notify_change, 2755 .setattr = shmem_notify_change,
2539 .setxattr = generic_setxattr,
2540 .getxattr = generic_getxattr,
2541 .listxattr = generic_listxattr,
2542 .removexattr = generic_removexattr,
2543 .check_acl = generic_check_acl, 2756 .check_acl = generic_check_acl,
2544#endif 2757#endif
2545}; 2758};
diff --git a/mm/slub.c b/mm/slub.c
index 4ea7f1a22a94..7be0223531b0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1831,7 +1831,6 @@ load_freelist:
1831 page->inuse = page->objects; 1831 page->inuse = page->objects;
1832 page->freelist = NULL; 1832 page->freelist = NULL;
1833 1833
1834unlock_out:
1835 slab_unlock(page); 1834 slab_unlock(page);
1836 c->tid = next_tid(c->tid); 1835 c->tid = next_tid(c->tid);
1837 local_irq_restore(flags); 1836 local_irq_restore(flags);
@@ -1884,7 +1883,8 @@ debug:
1884 deactivate_slab(s, c); 1883 deactivate_slab(s, c);
1885 c->page = NULL; 1884 c->page = NULL;
1886 c->node = NUMA_NO_NODE; 1885 c->node = NUMA_NO_NODE;
1887 goto unlock_out; 1886 local_irq_restore(flags);
1887 return object;
1888} 1888}
1889 1889
1890/* 1890/*
diff --git a/mm/swap.c b/mm/swap.c
index 5602f1a1b1e7..3a442f18b0b3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -272,14 +272,10 @@ static void update_page_reclaim_stat(struct zone *zone, struct page *page,
272 memcg_reclaim_stat->recent_rotated[file]++; 272 memcg_reclaim_stat->recent_rotated[file]++;
273} 273}
274 274
275/* 275static void __activate_page(struct page *page, void *arg)
276 * FIXME: speed this up?
277 */
278void activate_page(struct page *page)
279{ 276{
280 struct zone *zone = page_zone(page); 277 struct zone *zone = page_zone(page);
281 278
282 spin_lock_irq(&zone->lru_lock);
283 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 279 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
284 int file = page_is_file_cache(page); 280 int file = page_is_file_cache(page);
285 int lru = page_lru_base_type(page); 281 int lru = page_lru_base_type(page);
@@ -292,8 +288,45 @@ void activate_page(struct page *page)
292 288
293 update_page_reclaim_stat(zone, page, file, 1); 289 update_page_reclaim_stat(zone, page, file, 1);
294 } 290 }
291}
292
293#ifdef CONFIG_SMP
294static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
295
296static void activate_page_drain(int cpu)
297{
298 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
299
300 if (pagevec_count(pvec))
301 pagevec_lru_move_fn(pvec, __activate_page, NULL);
302}
303
304void activate_page(struct page *page)
305{
306 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
307 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
308
309 page_cache_get(page);
310 if (!pagevec_add(pvec, page))
311 pagevec_lru_move_fn(pvec, __activate_page, NULL);
312 put_cpu_var(activate_page_pvecs);
313 }
314}
315
316#else
317static inline void activate_page_drain(int cpu)
318{
319}
320
321void activate_page(struct page *page)
322{
323 struct zone *zone = page_zone(page);
324
325 spin_lock_irq(&zone->lru_lock);
326 __activate_page(page, NULL);
295 spin_unlock_irq(&zone->lru_lock); 327 spin_unlock_irq(&zone->lru_lock);
296} 328}
329#endif
297 330
298/* 331/*
299 * Mark a page as having seen activity. 332 * Mark a page as having seen activity.
@@ -464,6 +497,8 @@ static void drain_cpu_pagevecs(int cpu)
464 pvec = &per_cpu(lru_deactivate_pvecs, cpu); 497 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
465 if (pagevec_count(pvec)) 498 if (pagevec_count(pvec))
466 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 499 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
500
501 activate_page_drain(cpu);
467} 502}
468 503
469/** 504/**
@@ -476,6 +511,13 @@ static void drain_cpu_pagevecs(int cpu)
476 */ 511 */
477void deactivate_page(struct page *page) 512void deactivate_page(struct page *page)
478{ 513{
514 /*
515 * In a workload with many unevictable page such as mprotect, unevictable
516 * page deactivation for accelerating reclaim is pointless.
517 */
518 if (PageUnevictable(page))
519 return;
520
479 if (likely(get_page_unless_zero(page))) { 521 if (likely(get_page_unless_zero(page))) {
480 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 522 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
481 523
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 8c6b3ce38f09..d537d29e9b7b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -31,6 +31,7 @@
31#include <linux/syscalls.h> 31#include <linux/syscalls.h>
32#include <linux/memcontrol.h> 32#include <linux/memcontrol.h>
33#include <linux/poll.h> 33#include <linux/poll.h>
34#include <linux/oom.h>
34 35
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/tlbflush.h> 37#include <asm/tlbflush.h>
@@ -1555,6 +1556,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1555 struct address_space *mapping; 1556 struct address_space *mapping;
1556 struct inode *inode; 1557 struct inode *inode;
1557 char *pathname; 1558 char *pathname;
1559 int oom_score_adj;
1558 int i, type, prev; 1560 int i, type, prev;
1559 int err; 1561 int err;
1560 1562
@@ -1613,9 +1615,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1613 p->flags &= ~SWP_WRITEOK; 1615 p->flags &= ~SWP_WRITEOK;
1614 spin_unlock(&swap_lock); 1616 spin_unlock(&swap_lock);
1615 1617
1616 current->flags |= PF_OOM_ORIGIN; 1618 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1617 err = try_to_unuse(type); 1619 err = try_to_unuse(type);
1618 current->flags &= ~PF_OOM_ORIGIN; 1620 test_set_oom_score_adj(oom_score_adj);
1619 1621
1620 if (err) { 1622 if (err) {
1621 /* 1623 /*
diff --git a/mm/truncate.c b/mm/truncate.c
index a95667529135..3a29a6180212 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -19,6 +19,7 @@
19#include <linux/task_io_accounting_ops.h> 19#include <linux/task_io_accounting_ops.h>
20#include <linux/buffer_head.h> /* grr. try_to_release_page, 20#include <linux/buffer_head.h> /* grr. try_to_release_page,
21 do_invalidatepage */ 21 do_invalidatepage */
22#include <linux/cleancache.h>
22#include "internal.h" 23#include "internal.h"
23 24
24 25
@@ -51,6 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
51static inline void truncate_partial_page(struct page *page, unsigned partial) 52static inline void truncate_partial_page(struct page *page, unsigned partial)
52{ 53{
53 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 54 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
55 cleancache_flush_page(page->mapping, page);
54 if (page_has_private(page)) 56 if (page_has_private(page))
55 do_invalidatepage(page, partial); 57 do_invalidatepage(page, partial);
56} 58}
@@ -214,6 +216,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
214 pgoff_t next; 216 pgoff_t next;
215 int i; 217 int i;
216 218
219 cleancache_flush_inode(mapping);
217 if (mapping->nrpages == 0) 220 if (mapping->nrpages == 0)
218 return; 221 return;
219 222
@@ -291,6 +294,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
291 pagevec_release(&pvec); 294 pagevec_release(&pvec);
292 mem_cgroup_uncharge_end(); 295 mem_cgroup_uncharge_end();
293 } 296 }
297 cleancache_flush_inode(mapping);
294} 298}
295EXPORT_SYMBOL(truncate_inode_pages_range); 299EXPORT_SYMBOL(truncate_inode_pages_range);
296 300
@@ -440,6 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
440 int did_range_unmap = 0; 444 int did_range_unmap = 0;
441 int wrapped = 0; 445 int wrapped = 0;
442 446
447 cleancache_flush_inode(mapping);
443 pagevec_init(&pvec, 0); 448 pagevec_init(&pvec, 0);
444 next = start; 449 next = start;
445 while (next <= end && !wrapped && 450 while (next <= end && !wrapped &&
@@ -498,6 +503,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
498 mem_cgroup_uncharge_end(); 503 mem_cgroup_uncharge_end();
499 cond_resched(); 504 cond_resched();
500 } 505 }
506 cleancache_flush_inode(mapping);
501 return ret; 507 return ret;
502} 508}
503EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 509EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
diff --git a/mm/util.c b/mm/util.c
index e7b103a6fd21..88ea1bd661c0 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -6,6 +6,8 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <asm/uaccess.h> 7#include <asm/uaccess.h>
8 8
9#include "internal.h"
10
9#define CREATE_TRACE_POINTS 11#define CREATE_TRACE_POINTS
10#include <trace/events/kmem.h> 12#include <trace/events/kmem.h>
11 13
@@ -215,6 +217,28 @@ char *strndup_user(const char __user *s, long n)
215} 217}
216EXPORT_SYMBOL(strndup_user); 218EXPORT_SYMBOL(strndup_user);
217 219
220void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
221 struct vm_area_struct *prev, struct rb_node *rb_parent)
222{
223 struct vm_area_struct *next;
224
225 vma->vm_prev = prev;
226 if (prev) {
227 next = prev->vm_next;
228 prev->vm_next = vma;
229 } else {
230 mm->mmap = vma;
231 if (rb_parent)
232 next = rb_entry(rb_parent,
233 struct vm_area_struct, vm_rb);
234 else
235 next = NULL;
236 }
237 vma->vm_next = next;
238 if (next)
239 next->vm_prev = vma;
240}
241
218#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 242#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
219void arch_pick_mmap_layout(struct mm_struct *mm) 243void arch_pick_mmap_layout(struct mm_struct *mm)
220{ 244{
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5d6030235d7a..1d34d75366a7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -375,7 +375,7 @@ nocache:
375 /* find starting point for our search */ 375 /* find starting point for our search */
376 if (free_vmap_cache) { 376 if (free_vmap_cache) {
377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
378 addr = ALIGN(first->va_end + PAGE_SIZE, align); 378 addr = ALIGN(first->va_end, align);
379 if (addr < vstart) 379 if (addr < vstart)
380 goto nocache; 380 goto nocache;
381 if (addr + size - 1 < addr) 381 if (addr + size - 1 < addr)
@@ -406,10 +406,10 @@ nocache:
406 } 406 }
407 407
408 /* from the starting point, walk areas until a suitable hole is found */ 408 /* from the starting point, walk areas until a suitable hole is found */
409 while (addr + size >= first->va_start && addr + size <= vend) { 409 while (addr + size > first->va_start && addr + size <= vend) {
410 if (addr + cached_hole_size < first->va_start) 410 if (addr + cached_hole_size < first->va_start)
411 cached_hole_size = first->va_start - addr; 411 cached_hole_size = first->va_start - addr;
412 addr = ALIGN(first->va_end + PAGE_SIZE, align); 412 addr = ALIGN(first->va_end, align);
413 if (addr + size - 1 < addr) 413 if (addr + size - 1 < addr)
414 goto overflow; 414 goto overflow;
415 415
@@ -1534,6 +1534,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1534static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1534static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1535 pgprot_t prot, int node, void *caller) 1535 pgprot_t prot, int node, void *caller)
1536{ 1536{
1537 const int order = 0;
1537 struct page **pages; 1538 struct page **pages;
1538 unsigned int nr_pages, array_size, i; 1539 unsigned int nr_pages, array_size, i;
1539 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1540 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
@@ -1560,11 +1561,12 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1560 1561
1561 for (i = 0; i < area->nr_pages; i++) { 1562 for (i = 0; i < area->nr_pages; i++) {
1562 struct page *page; 1563 struct page *page;
1564 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1563 1565
1564 if (node < 0) 1566 if (node < 0)
1565 page = alloc_page(gfp_mask); 1567 page = alloc_page(tmp_mask);
1566 else 1568 else
1567 page = alloc_pages_node(node, gfp_mask, 0); 1569 page = alloc_pages_node(node, tmp_mask, order);
1568 1570
1569 if (unlikely(!page)) { 1571 if (unlikely(!page)) {
1570 /* Successfully allocated i pages, free them in __vunmap() */ 1572 /* Successfully allocated i pages, free them in __vunmap() */
@@ -1579,6 +1581,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1579 return area->addr; 1581 return area->addr;
1580 1582
1581fail: 1583fail:
1584 warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, "
1585 "allocated %ld of %ld bytes\n",
1586 (area->nr_pages*PAGE_SIZE), area->size);
1582 vfree(area->addr); 1587 vfree(area->addr);
1583 return NULL; 1588 return NULL;
1584} 1589}
@@ -2148,10 +2153,6 @@ struct vm_struct *alloc_vm_area(size_t size)
2148 return NULL; 2153 return NULL;
2149 } 2154 }
2150 2155
2151 /* Make sure the pagetables are constructed in process kernel
2152 mappings */
2153 vmalloc_sync_all();
2154
2155 return area; 2156 return area;
2156} 2157}
2157EXPORT_SYMBOL_GPL(alloc_vm_area); 2158EXPORT_SYMBOL_GPL(alloc_vm_area);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c9177202c8ce..faa0a088f9cc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -173,7 +173,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
173 struct scan_control *sc, enum lru_list lru) 173 struct scan_control *sc, enum lru_list lru)
174{ 174{
175 if (!scanning_global_lru(sc)) 175 if (!scanning_global_lru(sc))
176 return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); 176 return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
177 177
178 return zone_page_state(zone, NR_LRU_BASE + lru); 178 return zone_page_state(zone, NR_LRU_BASE + lru);
179} 179}
@@ -202,6 +202,14 @@ void unregister_shrinker(struct shrinker *shrinker)
202} 202}
203EXPORT_SYMBOL(unregister_shrinker); 203EXPORT_SYMBOL(unregister_shrinker);
204 204
205static inline int do_shrinker_shrink(struct shrinker *shrinker,
206 struct shrink_control *sc,
207 unsigned long nr_to_scan)
208{
209 sc->nr_to_scan = nr_to_scan;
210 return (*shrinker->shrink)(shrinker, sc);
211}
212
205#define SHRINK_BATCH 128 213#define SHRINK_BATCH 128
206/* 214/*
207 * Call the shrink functions to age shrinkable caches 215 * Call the shrink functions to age shrinkable caches
@@ -222,25 +230,29 @@ EXPORT_SYMBOL(unregister_shrinker);
222 * 230 *
223 * Returns the number of slab objects which we shrunk. 231 * Returns the number of slab objects which we shrunk.
224 */ 232 */
225unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 233unsigned long shrink_slab(struct shrink_control *shrink,
226 unsigned long lru_pages) 234 unsigned long nr_pages_scanned,
235 unsigned long lru_pages)
227{ 236{
228 struct shrinker *shrinker; 237 struct shrinker *shrinker;
229 unsigned long ret = 0; 238 unsigned long ret = 0;
230 239
231 if (scanned == 0) 240 if (nr_pages_scanned == 0)
232 scanned = SWAP_CLUSTER_MAX; 241 nr_pages_scanned = SWAP_CLUSTER_MAX;
233 242
234 if (!down_read_trylock(&shrinker_rwsem)) 243 if (!down_read_trylock(&shrinker_rwsem)) {
235 return 1; /* Assume we'll be able to shrink next time */ 244 /* Assume we'll be able to shrink next time */
245 ret = 1;
246 goto out;
247 }
236 248
237 list_for_each_entry(shrinker, &shrinker_list, list) { 249 list_for_each_entry(shrinker, &shrinker_list, list) {
238 unsigned long long delta; 250 unsigned long long delta;
239 unsigned long total_scan; 251 unsigned long total_scan;
240 unsigned long max_pass; 252 unsigned long max_pass;
241 253
242 max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); 254 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
243 delta = (4 * scanned) / shrinker->seeks; 255 delta = (4 * nr_pages_scanned) / shrinker->seeks;
244 delta *= max_pass; 256 delta *= max_pass;
245 do_div(delta, lru_pages + 1); 257 do_div(delta, lru_pages + 1);
246 shrinker->nr += delta; 258 shrinker->nr += delta;
@@ -267,9 +279,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
267 int shrink_ret; 279 int shrink_ret;
268 int nr_before; 280 int nr_before;
269 281
270 nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); 282 nr_before = do_shrinker_shrink(shrinker, shrink, 0);
271 shrink_ret = (*shrinker->shrink)(shrinker, this_scan, 283 shrink_ret = do_shrinker_shrink(shrinker, shrink,
272 gfp_mask); 284 this_scan);
273 if (shrink_ret == -1) 285 if (shrink_ret == -1)
274 break; 286 break;
275 if (shrink_ret < nr_before) 287 if (shrink_ret < nr_before)
@@ -283,6 +295,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
283 shrinker->nr += total_scan; 295 shrinker->nr += total_scan;
284 } 296 }
285 up_read(&shrinker_rwsem); 297 up_read(&shrinker_rwsem);
298out:
299 cond_resched();
286 return ret; 300 return ret;
287} 301}
288 302
@@ -1202,13 +1216,16 @@ int isolate_lru_page(struct page *page)
1202{ 1216{
1203 int ret = -EBUSY; 1217 int ret = -EBUSY;
1204 1218
1219 VM_BUG_ON(!page_count(page));
1220
1205 if (PageLRU(page)) { 1221 if (PageLRU(page)) {
1206 struct zone *zone = page_zone(page); 1222 struct zone *zone = page_zone(page);
1207 1223
1208 spin_lock_irq(&zone->lru_lock); 1224 spin_lock_irq(&zone->lru_lock);
1209 if (PageLRU(page) && get_page_unless_zero(page)) { 1225 if (PageLRU(page)) {
1210 int lru = page_lru(page); 1226 int lru = page_lru(page);
1211 ret = 0; 1227 ret = 0;
1228 get_page(page);
1212 ClearPageLRU(page); 1229 ClearPageLRU(page);
1213 1230
1214 del_page_from_lru_list(zone, page, lru); 1231 del_page_from_lru_list(zone, page, lru);
@@ -1701,26 +1718,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1701} 1718}
1702 1719
1703/* 1720/*
1704 * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
1705 * until we collected @swap_cluster_max pages to scan.
1706 */
1707static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1708 unsigned long *nr_saved_scan)
1709{
1710 unsigned long nr;
1711
1712 *nr_saved_scan += nr_to_scan;
1713 nr = *nr_saved_scan;
1714
1715 if (nr >= SWAP_CLUSTER_MAX)
1716 *nr_saved_scan = 0;
1717 else
1718 nr = 0;
1719
1720 return nr;
1721}
1722
1723/*
1724 * Determine how aggressively the anon and file LRU lists should be 1721 * Determine how aggressively the anon and file LRU lists should be
1725 * scanned. The relative value of each set of LRU lists is determined 1722 * scanned. The relative value of each set of LRU lists is determined
1726 * by looking at the fraction of the pages scanned we did rotate back 1723 * by looking at the fraction of the pages scanned we did rotate back
@@ -1738,6 +1735,22 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1738 u64 fraction[2], denominator; 1735 u64 fraction[2], denominator;
1739 enum lru_list l; 1736 enum lru_list l;
1740 int noswap = 0; 1737 int noswap = 0;
1738 int force_scan = 0;
1739
1740
1741 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1742 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1743 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1744 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1745
1746 if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
1747 /* kswapd does zone balancing and need to scan this zone */
1748 if (scanning_global_lru(sc) && current_is_kswapd())
1749 force_scan = 1;
1750 /* memcg may have small limit and need to avoid priority drop */
1751 if (!scanning_global_lru(sc))
1752 force_scan = 1;
1753 }
1741 1754
1742 /* If we have no swap space, do not bother scanning anon pages. */ 1755 /* If we have no swap space, do not bother scanning anon pages. */
1743 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1756 if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1748,11 +1761,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1748 goto out; 1761 goto out;
1749 } 1762 }
1750 1763
1751 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1752 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1753 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1754 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1755
1756 if (scanning_global_lru(sc)) { 1764 if (scanning_global_lru(sc)) {
1757 free = zone_page_state(zone, NR_FREE_PAGES); 1765 free = zone_page_state(zone, NR_FREE_PAGES);
1758 /* If we have very few page cache pages, 1766 /* If we have very few page cache pages,
@@ -1819,8 +1827,23 @@ out:
1819 scan >>= priority; 1827 scan >>= priority;
1820 scan = div64_u64(scan * fraction[file], denominator); 1828 scan = div64_u64(scan * fraction[file], denominator);
1821 } 1829 }
1822 nr[l] = nr_scan_try_batch(scan, 1830
1823 &reclaim_stat->nr_saved_scan[l]); 1831 /*
1832 * If zone is small or memcg is small, nr[l] can be 0.
1833 * This results no-scan on this priority and priority drop down.
1834 * For global direct reclaim, it can visit next zone and tend
1835 * not to have problems. For global kswapd, it's for zone
1836 * balancing and it need to scan a small amounts. When using
1837 * memcg, priority drop can cause big latency. So, it's better
1838 * to scan small amount. See may_noscan above.
1839 */
1840 if (!scan && force_scan) {
1841 if (file)
1842 scan = SWAP_CLUSTER_MAX;
1843 else if (!noswap)
1844 scan = SWAP_CLUSTER_MAX;
1845 }
1846 nr[l] = scan;
1824 } 1847 }
1825} 1848}
1826 1849
@@ -1960,11 +1983,14 @@ restart:
1960 * If a zone is deemed to be full of pinned pages then just give it a light 1983 * If a zone is deemed to be full of pinned pages then just give it a light
1961 * scan then give up on it. 1984 * scan then give up on it.
1962 */ 1985 */
1963static void shrink_zones(int priority, struct zonelist *zonelist, 1986static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
1964 struct scan_control *sc) 1987 struct scan_control *sc)
1965{ 1988{
1966 struct zoneref *z; 1989 struct zoneref *z;
1967 struct zone *zone; 1990 struct zone *zone;
1991 unsigned long nr_soft_reclaimed;
1992 unsigned long nr_soft_scanned;
1993 unsigned long total_scanned = 0;
1968 1994
1969 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1995 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1970 gfp_zone(sc->gfp_mask), sc->nodemask) { 1996 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1981,8 +2007,17 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1981 continue; /* Let kswapd poll it */ 2007 continue; /* Let kswapd poll it */
1982 } 2008 }
1983 2009
2010 nr_soft_scanned = 0;
2011 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2012 sc->order, sc->gfp_mask,
2013 &nr_soft_scanned);
2014 sc->nr_reclaimed += nr_soft_reclaimed;
2015 total_scanned += nr_soft_scanned;
2016
1984 shrink_zone(priority, zone, sc); 2017 shrink_zone(priority, zone, sc);
1985 } 2018 }
2019
2020 return total_scanned;
1986} 2021}
1987 2022
1988static bool zone_reclaimable(struct zone *zone) 2023static bool zone_reclaimable(struct zone *zone)
@@ -2027,7 +2062,8 @@ static bool all_unreclaimable(struct zonelist *zonelist,
2027 * else, the number of pages reclaimed 2062 * else, the number of pages reclaimed
2028 */ 2063 */
2029static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2064static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2030 struct scan_control *sc) 2065 struct scan_control *sc,
2066 struct shrink_control *shrink)
2031{ 2067{
2032 int priority; 2068 int priority;
2033 unsigned long total_scanned = 0; 2069 unsigned long total_scanned = 0;
@@ -2046,7 +2082,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2046 sc->nr_scanned = 0; 2082 sc->nr_scanned = 0;
2047 if (!priority) 2083 if (!priority)
2048 disable_swap_token(); 2084 disable_swap_token();
2049 shrink_zones(priority, zonelist, sc); 2085 total_scanned += shrink_zones(priority, zonelist, sc);
2050 /* 2086 /*
2051 * Don't shrink slabs when reclaiming memory from 2087 * Don't shrink slabs when reclaiming memory from
2052 * over limit cgroups 2088 * over limit cgroups
@@ -2061,7 +2097,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2061 lru_pages += zone_reclaimable_pages(zone); 2097 lru_pages += zone_reclaimable_pages(zone);
2062 } 2098 }
2063 2099
2064 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 2100 shrink_slab(shrink, sc->nr_scanned, lru_pages);
2065 if (reclaim_state) { 2101 if (reclaim_state) {
2066 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2102 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2067 reclaim_state->reclaimed_slab = 0; 2103 reclaim_state->reclaimed_slab = 0;
@@ -2133,12 +2169,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2133 .mem_cgroup = NULL, 2169 .mem_cgroup = NULL,
2134 .nodemask = nodemask, 2170 .nodemask = nodemask,
2135 }; 2171 };
2172 struct shrink_control shrink = {
2173 .gfp_mask = sc.gfp_mask,
2174 };
2136 2175
2137 trace_mm_vmscan_direct_reclaim_begin(order, 2176 trace_mm_vmscan_direct_reclaim_begin(order,
2138 sc.may_writepage, 2177 sc.may_writepage,
2139 gfp_mask); 2178 gfp_mask);
2140 2179
2141 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2180 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2142 2181
2143 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2182 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2144 2183
@@ -2150,9 +2189,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2150unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2189unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2151 gfp_t gfp_mask, bool noswap, 2190 gfp_t gfp_mask, bool noswap,
2152 unsigned int swappiness, 2191 unsigned int swappiness,
2153 struct zone *zone) 2192 struct zone *zone,
2193 unsigned long *nr_scanned)
2154{ 2194{
2155 struct scan_control sc = { 2195 struct scan_control sc = {
2196 .nr_scanned = 0,
2156 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2197 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2157 .may_writepage = !laptop_mode, 2198 .may_writepage = !laptop_mode,
2158 .may_unmap = 1, 2199 .may_unmap = 1,
@@ -2161,6 +2202,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2161 .order = 0, 2202 .order = 0,
2162 .mem_cgroup = mem, 2203 .mem_cgroup = mem,
2163 }; 2204 };
2205
2164 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2206 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2165 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2207 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2166 2208
@@ -2179,6 +2221,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2179 2221
2180 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2222 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2181 2223
2224 *nr_scanned = sc.nr_scanned;
2182 return sc.nr_reclaimed; 2225 return sc.nr_reclaimed;
2183} 2226}
2184 2227
@@ -2189,6 +2232,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2189{ 2232{
2190 struct zonelist *zonelist; 2233 struct zonelist *zonelist;
2191 unsigned long nr_reclaimed; 2234 unsigned long nr_reclaimed;
2235 int nid;
2192 struct scan_control sc = { 2236 struct scan_control sc = {
2193 .may_writepage = !laptop_mode, 2237 .may_writepage = !laptop_mode,
2194 .may_unmap = 1, 2238 .may_unmap = 1,
@@ -2198,17 +2242,27 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2198 .order = 0, 2242 .order = 0,
2199 .mem_cgroup = mem_cont, 2243 .mem_cgroup = mem_cont,
2200 .nodemask = NULL, /* we don't care the placement */ 2244 .nodemask = NULL, /* we don't care the placement */
2245 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2246 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2247 };
2248 struct shrink_control shrink = {
2249 .gfp_mask = sc.gfp_mask,
2201 }; 2250 };
2202 2251
2203 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2252 /*
2204 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2253 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2205 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 2254 * take care of from where we get pages. So the node where we start the
2255 * scan does not need to be the current node.
2256 */
2257 nid = mem_cgroup_select_victim_node(mem_cont);
2258
2259 zonelist = NODE_DATA(nid)->node_zonelists;
2206 2260
2207 trace_mm_vmscan_memcg_reclaim_begin(0, 2261 trace_mm_vmscan_memcg_reclaim_begin(0,
2208 sc.may_writepage, 2262 sc.may_writepage,
2209 sc.gfp_mask); 2263 sc.gfp_mask);
2210 2264
2211 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2265 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2212 2266
2213 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2267 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2214 2268
@@ -2287,7 +2341,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2287 * must be balanced 2341 * must be balanced
2288 */ 2342 */
2289 if (order) 2343 if (order)
2290 return pgdat_balanced(pgdat, balanced, classzone_idx); 2344 return !pgdat_balanced(pgdat, balanced, classzone_idx);
2291 else 2345 else
2292 return !all_zones_ok; 2346 return !all_zones_ok;
2293} 2347}
@@ -2323,6 +2377,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2323 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2377 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2324 unsigned long total_scanned; 2378 unsigned long total_scanned;
2325 struct reclaim_state *reclaim_state = current->reclaim_state; 2379 struct reclaim_state *reclaim_state = current->reclaim_state;
2380 unsigned long nr_soft_reclaimed;
2381 unsigned long nr_soft_scanned;
2326 struct scan_control sc = { 2382 struct scan_control sc = {
2327 .gfp_mask = GFP_KERNEL, 2383 .gfp_mask = GFP_KERNEL,
2328 .may_unmap = 1, 2384 .may_unmap = 1,
@@ -2336,6 +2392,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2336 .order = order, 2392 .order = order,
2337 .mem_cgroup = NULL, 2393 .mem_cgroup = NULL,
2338 }; 2394 };
2395 struct shrink_control shrink = {
2396 .gfp_mask = sc.gfp_mask,
2397 };
2339loop_again: 2398loop_again:
2340 total_scanned = 0; 2399 total_scanned = 0;
2341 sc.nr_reclaimed = 0; 2400 sc.nr_reclaimed = 0;
@@ -2412,11 +2471,15 @@ loop_again:
2412 2471
2413 sc.nr_scanned = 0; 2472 sc.nr_scanned = 0;
2414 2473
2474 nr_soft_scanned = 0;
2415 /* 2475 /*
2416 * Call soft limit reclaim before calling shrink_zone. 2476 * Call soft limit reclaim before calling shrink_zone.
2417 * For now we ignore the return value
2418 */ 2477 */
2419 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); 2478 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2479 order, sc.gfp_mask,
2480 &nr_soft_scanned);
2481 sc.nr_reclaimed += nr_soft_reclaimed;
2482 total_scanned += nr_soft_scanned;
2420 2483
2421 /* 2484 /*
2422 * We put equal pressure on every zone, unless 2485 * We put equal pressure on every zone, unless
@@ -2435,8 +2498,7 @@ loop_again:
2435 end_zone, 0)) 2498 end_zone, 0))
2436 shrink_zone(priority, zone, &sc); 2499 shrink_zone(priority, zone, &sc);
2437 reclaim_state->reclaimed_slab = 0; 2500 reclaim_state->reclaimed_slab = 0;
2438 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2501 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2439 lru_pages);
2440 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2502 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2441 total_scanned += sc.nr_scanned; 2503 total_scanned += sc.nr_scanned;
2442 2504
@@ -2788,7 +2850,10 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2788 .swappiness = vm_swappiness, 2850 .swappiness = vm_swappiness,
2789 .order = 0, 2851 .order = 0,
2790 }; 2852 };
2791 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2853 struct shrink_control shrink = {
2854 .gfp_mask = sc.gfp_mask,
2855 };
2856 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2792 struct task_struct *p = current; 2857 struct task_struct *p = current;
2793 unsigned long nr_reclaimed; 2858 unsigned long nr_reclaimed;
2794 2859
@@ -2797,7 +2862,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2797 reclaim_state.reclaimed_slab = 0; 2862 reclaim_state.reclaimed_slab = 0;
2798 p->reclaim_state = &reclaim_state; 2863 p->reclaim_state = &reclaim_state;
2799 2864
2800 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2865 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2801 2866
2802 p->reclaim_state = NULL; 2867 p->reclaim_state = NULL;
2803 lockdep_clear_current_reclaim_state(); 2868 lockdep_clear_current_reclaim_state();
@@ -2972,6 +3037,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2972 .swappiness = vm_swappiness, 3037 .swappiness = vm_swappiness,
2973 .order = order, 3038 .order = order,
2974 }; 3039 };
3040 struct shrink_control shrink = {
3041 .gfp_mask = sc.gfp_mask,
3042 };
2975 unsigned long nr_slab_pages0, nr_slab_pages1; 3043 unsigned long nr_slab_pages0, nr_slab_pages1;
2976 3044
2977 cond_resched(); 3045 cond_resched();
@@ -3013,7 +3081,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3013 unsigned long lru_pages = zone_reclaimable_pages(zone); 3081 unsigned long lru_pages = zone_reclaimable_pages(zone);
3014 3082
3015 /* No reclaimable slab or very low memory pressure */ 3083 /* No reclaimable slab or very low memory pressure */
3016 if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages)) 3084 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3017 break; 3085 break;
3018 3086
3019 /* Freed enough memory */ 3087 /* Freed enough memory */
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 897ea9e88238..20c18b7694b2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -157,7 +157,7 @@ int calculate_normal_threshold(struct zone *zone)
157/* 157/*
158 * Refresh the thresholds for each zone. 158 * Refresh the thresholds for each zone.
159 */ 159 */
160static void refresh_zone_stat_thresholds(void) 160void refresh_zone_stat_thresholds(void)
161{ 161{
162 struct zone *zone; 162 struct zone *zone;
163 int cpu; 163 int cpu;
@@ -659,6 +659,138 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
659} 659}
660#endif 660#endif
661 661
662#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS)
663#ifdef CONFIG_ZONE_DMA
664#define TEXT_FOR_DMA(xx) xx "_dma",
665#else
666#define TEXT_FOR_DMA(xx)
667#endif
668
669#ifdef CONFIG_ZONE_DMA32
670#define TEXT_FOR_DMA32(xx) xx "_dma32",
671#else
672#define TEXT_FOR_DMA32(xx)
673#endif
674
675#ifdef CONFIG_HIGHMEM
676#define TEXT_FOR_HIGHMEM(xx) xx "_high",
677#else
678#define TEXT_FOR_HIGHMEM(xx)
679#endif
680
681#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
682 TEXT_FOR_HIGHMEM(xx) xx "_movable",
683
684const char * const vmstat_text[] = {
685 /* Zoned VM counters */
686 "nr_free_pages",
687 "nr_inactive_anon",
688 "nr_active_anon",
689 "nr_inactive_file",
690 "nr_active_file",
691 "nr_unevictable",
692 "nr_mlock",
693 "nr_anon_pages",
694 "nr_mapped",
695 "nr_file_pages",
696 "nr_dirty",
697 "nr_writeback",
698 "nr_slab_reclaimable",
699 "nr_slab_unreclaimable",
700 "nr_page_table_pages",
701 "nr_kernel_stack",
702 "nr_unstable",
703 "nr_bounce",
704 "nr_vmscan_write",
705 "nr_writeback_temp",
706 "nr_isolated_anon",
707 "nr_isolated_file",
708 "nr_shmem",
709 "nr_dirtied",
710 "nr_written",
711
712#ifdef CONFIG_NUMA
713 "numa_hit",
714 "numa_miss",
715 "numa_foreign",
716 "numa_interleave",
717 "numa_local",
718 "numa_other",
719#endif
720 "nr_anon_transparent_hugepages",
721 "nr_dirty_threshold",
722 "nr_dirty_background_threshold",
723
724#ifdef CONFIG_VM_EVENT_COUNTERS
725 "pgpgin",
726 "pgpgout",
727 "pswpin",
728 "pswpout",
729
730 TEXTS_FOR_ZONES("pgalloc")
731
732 "pgfree",
733 "pgactivate",
734 "pgdeactivate",
735
736 "pgfault",
737 "pgmajfault",
738
739 TEXTS_FOR_ZONES("pgrefill")
740 TEXTS_FOR_ZONES("pgsteal")
741 TEXTS_FOR_ZONES("pgscan_kswapd")
742 TEXTS_FOR_ZONES("pgscan_direct")
743
744#ifdef CONFIG_NUMA
745 "zone_reclaim_failed",
746#endif
747 "pginodesteal",
748 "slabs_scanned",
749 "kswapd_steal",
750 "kswapd_inodesteal",
751 "kswapd_low_wmark_hit_quickly",
752 "kswapd_high_wmark_hit_quickly",
753 "kswapd_skip_congestion_wait",
754 "pageoutrun",
755 "allocstall",
756
757 "pgrotated",
758
759#ifdef CONFIG_COMPACTION
760 "compact_blocks_moved",
761 "compact_pages_moved",
762 "compact_pagemigrate_failed",
763 "compact_stall",
764 "compact_fail",
765 "compact_success",
766#endif
767
768#ifdef CONFIG_HUGETLB_PAGE
769 "htlb_buddy_alloc_success",
770 "htlb_buddy_alloc_fail",
771#endif
772 "unevictable_pgs_culled",
773 "unevictable_pgs_scanned",
774 "unevictable_pgs_rescued",
775 "unevictable_pgs_mlocked",
776 "unevictable_pgs_munlocked",
777 "unevictable_pgs_cleared",
778 "unevictable_pgs_stranded",
779 "unevictable_pgs_mlockfreed",
780
781#ifdef CONFIG_TRANSPARENT_HUGEPAGE
782 "thp_fault_alloc",
783 "thp_fault_fallback",
784 "thp_collapse_alloc",
785 "thp_collapse_alloc_failed",
786 "thp_split",
787#endif
788
789#endif /* CONFIG_VM_EVENTS_COUNTERS */
790};
791#endif /* CONFIG_PROC_FS || CONFIG_SYSFS */
792
793
662#ifdef CONFIG_PROC_FS 794#ifdef CONFIG_PROC_FS
663static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 795static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
664 struct zone *zone) 796 struct zone *zone)
@@ -831,135 +963,6 @@ static const struct file_operations pagetypeinfo_file_ops = {
831 .release = seq_release, 963 .release = seq_release,
832}; 964};
833 965
834#ifdef CONFIG_ZONE_DMA
835#define TEXT_FOR_DMA(xx) xx "_dma",
836#else
837#define TEXT_FOR_DMA(xx)
838#endif
839
840#ifdef CONFIG_ZONE_DMA32
841#define TEXT_FOR_DMA32(xx) xx "_dma32",
842#else
843#define TEXT_FOR_DMA32(xx)
844#endif
845
846#ifdef CONFIG_HIGHMEM
847#define TEXT_FOR_HIGHMEM(xx) xx "_high",
848#else
849#define TEXT_FOR_HIGHMEM(xx)
850#endif
851
852#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
853 TEXT_FOR_HIGHMEM(xx) xx "_movable",
854
855static const char * const vmstat_text[] = {
856 /* Zoned VM counters */
857 "nr_free_pages",
858 "nr_inactive_anon",
859 "nr_active_anon",
860 "nr_inactive_file",
861 "nr_active_file",
862 "nr_unevictable",
863 "nr_mlock",
864 "nr_anon_pages",
865 "nr_mapped",
866 "nr_file_pages",
867 "nr_dirty",
868 "nr_writeback",
869 "nr_slab_reclaimable",
870 "nr_slab_unreclaimable",
871 "nr_page_table_pages",
872 "nr_kernel_stack",
873 "nr_unstable",
874 "nr_bounce",
875 "nr_vmscan_write",
876 "nr_writeback_temp",
877 "nr_isolated_anon",
878 "nr_isolated_file",
879 "nr_shmem",
880 "nr_dirtied",
881 "nr_written",
882
883#ifdef CONFIG_NUMA
884 "numa_hit",
885 "numa_miss",
886 "numa_foreign",
887 "numa_interleave",
888 "numa_local",
889 "numa_other",
890#endif
891 "nr_anon_transparent_hugepages",
892 "nr_dirty_threshold",
893 "nr_dirty_background_threshold",
894
895#ifdef CONFIG_VM_EVENT_COUNTERS
896 "pgpgin",
897 "pgpgout",
898 "pswpin",
899 "pswpout",
900
901 TEXTS_FOR_ZONES("pgalloc")
902
903 "pgfree",
904 "pgactivate",
905 "pgdeactivate",
906
907 "pgfault",
908 "pgmajfault",
909
910 TEXTS_FOR_ZONES("pgrefill")
911 TEXTS_FOR_ZONES("pgsteal")
912 TEXTS_FOR_ZONES("pgscan_kswapd")
913 TEXTS_FOR_ZONES("pgscan_direct")
914
915#ifdef CONFIG_NUMA
916 "zone_reclaim_failed",
917#endif
918 "pginodesteal",
919 "slabs_scanned",
920 "kswapd_steal",
921 "kswapd_inodesteal",
922 "kswapd_low_wmark_hit_quickly",
923 "kswapd_high_wmark_hit_quickly",
924 "kswapd_skip_congestion_wait",
925 "pageoutrun",
926 "allocstall",
927
928 "pgrotated",
929
930#ifdef CONFIG_COMPACTION
931 "compact_blocks_moved",
932 "compact_pages_moved",
933 "compact_pagemigrate_failed",
934 "compact_stall",
935 "compact_fail",
936 "compact_success",
937#endif
938
939#ifdef CONFIG_HUGETLB_PAGE
940 "htlb_buddy_alloc_success",
941 "htlb_buddy_alloc_fail",
942#endif
943 "unevictable_pgs_culled",
944 "unevictable_pgs_scanned",
945 "unevictable_pgs_rescued",
946 "unevictable_pgs_mlocked",
947 "unevictable_pgs_munlocked",
948 "unevictable_pgs_cleared",
949 "unevictable_pgs_stranded",
950 "unevictable_pgs_mlockfreed",
951
952#ifdef CONFIG_TRANSPARENT_HUGEPAGE
953 "thp_fault_alloc",
954 "thp_fault_fallback",
955 "thp_collapse_alloc",
956 "thp_collapse_alloc_failed",
957 "thp_split",
958#endif
959
960#endif /* CONFIG_VM_EVENTS_COUNTERS */
961};
962
963static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 966static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
964 struct zone *zone) 967 struct zone *zone)
965{ 968{
@@ -1198,7 +1201,6 @@ static int __init setup_vmstat(void)
1198#ifdef CONFIG_SMP 1201#ifdef CONFIG_SMP
1199 int cpu; 1202 int cpu;
1200 1203
1201 refresh_zone_stat_thresholds();
1202 register_cpu_notifier(&vmstat_notifier); 1204 register_cpu_notifier(&vmstat_notifier);
1203 1205
1204 for_each_online_cpu(cpu) 1206 for_each_online_cpu(cpu)
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 21cde8fd5795..db6baf7cf6e9 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -147,7 +147,6 @@ struct datalink_proto *register_snap_client(const unsigned char *desc,
147out: 147out:
148 spin_unlock_bh(&snap_lock); 148 spin_unlock_bh(&snap_lock);
149 149
150 synchronize_net();
151 return proto; 150 return proto;
152} 151}
153 152
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index c3408def8a19..9da07e30d1a2 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -118,11 +118,6 @@ extern void vlan_netlink_fini(void);
118 118
119extern struct rtnl_link_ops vlan_link_ops; 119extern struct rtnl_link_ops vlan_link_ops;
120 120
121static inline int is_vlan_dev(struct net_device *dev)
122{
123 return dev->priv_flags & IFF_802_1Q_VLAN;
124}
125
126extern int vlan_net_id; 121extern int vlan_net_id;
127 122
128struct proc_dir_entry; 123struct proc_dir_entry;
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 7ed75c7bd5d1..d9ea09b11cf8 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig NET_9P 5menuconfig NET_9P
6 depends on NET && EXPERIMENTAL 6 depends on NET
7 tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" 7 tristate "Plan 9 Resource Sharing Support (9P2000)"
8 help 8 help
9 If you say Y here, you will get experimental support for 9 If you say Y here, you will get experimental support for
10 Plan 9 resource sharing via the 9P2000 protocol. 10 Plan 9 resource sharing via the 9P2000 protocol.
@@ -16,8 +16,8 @@ menuconfig NET_9P
16if NET_9P 16if NET_9P
17 17
18config NET_9P_VIRTIO 18config NET_9P_VIRTIO
19 depends on EXPERIMENTAL && VIRTIO 19 depends on VIRTIO
20 tristate "9P Virtio Transport (Experimental)" 20 tristate "9P Virtio Transport"
21 help 21 help
22 This builds support for a transports between 22 This builds support for a transports between
23 guest partitions and a host partition. 23 guest partitions and a host partition.
diff --git a/net/9p/client.c b/net/9p/client.c
index ceab943dfc49..9e3b0e640da1 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -92,9 +92,6 @@ static int get_protocol_version(const substring_t *name)
92 return version; 92 return version;
93} 93}
94 94
95static struct p9_req_t *
96p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
97
98/** 95/**
99 * parse_options - parse mount options into client structure 96 * parse_options - parse mount options into client structure
100 * @opts: options string passed from mount 97 * @opts: options string passed from mount
@@ -307,12 +304,13 @@ static int p9_tag_init(struct p9_client *c)
307 c->tagpool = p9_idpool_create(); 304 c->tagpool = p9_idpool_create();
308 if (IS_ERR(c->tagpool)) { 305 if (IS_ERR(c->tagpool)) {
309 err = PTR_ERR(c->tagpool); 306 err = PTR_ERR(c->tagpool);
310 c->tagpool = NULL;
311 goto error; 307 goto error;
312 } 308 }
313 309 err = p9_idpool_get(c->tagpool); /* reserve tag 0 */
314 p9_idpool_get(c->tagpool); /* reserve tag 0 */ 310 if (err < 0) {
315 311 p9_idpool_destroy(c->tagpool);
312 goto error;
313 }
316 c->max_tag = 0; 314 c->max_tag = 0;
317error: 315error:
318 return err; 316 return err;
@@ -518,12 +516,15 @@ out_err:
518 return err; 516 return err;
519} 517}
520 518
519static struct p9_req_t *
520p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
521
521/** 522/**
522 * p9_client_flush - flush (cancel) a request 523 * p9_client_flush - flush (cancel) a request
523 * @c: client state 524 * @c: client state
524 * @oldreq: request to cancel 525 * @oldreq: request to cancel
525 * 526 *
526 * This sents a flush for a particular requests and links 527 * This sents a flush for a particular request and links
527 * the flush request to the original request. The current 528 * the flush request to the original request. The current
528 * code only supports a single flush request although the protocol 529 * code only supports a single flush request although the protocol
529 * allows for multiple flush requests to be sent for a single request. 530 * allows for multiple flush requests to be sent for a single request.
@@ -789,11 +790,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
789 spin_lock_init(&clnt->lock); 790 spin_lock_init(&clnt->lock);
790 INIT_LIST_HEAD(&clnt->fidlist); 791 INIT_LIST_HEAD(&clnt->fidlist);
791 792
792 p9_tag_init(clnt); 793 err = p9_tag_init(clnt);
794 if (err < 0)
795 goto free_client;
793 796
794 err = parse_opts(options, clnt); 797 err = parse_opts(options, clnt);
795 if (err < 0) 798 if (err < 0)
796 goto free_client; 799 goto destroy_tagpool;
797 800
798 if (!clnt->trans_mod) 801 if (!clnt->trans_mod)
799 clnt->trans_mod = v9fs_get_default_trans(); 802 clnt->trans_mod = v9fs_get_default_trans();
@@ -802,13 +805,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
802 err = -EPROTONOSUPPORT; 805 err = -EPROTONOSUPPORT;
803 P9_DPRINTK(P9_DEBUG_ERROR, 806 P9_DPRINTK(P9_DEBUG_ERROR,
804 "No transport defined or default transport\n"); 807 "No transport defined or default transport\n");
805 goto free_client; 808 goto destroy_tagpool;
806 } 809 }
807 810
808 clnt->fidpool = p9_idpool_create(); 811 clnt->fidpool = p9_idpool_create();
809 if (IS_ERR(clnt->fidpool)) { 812 if (IS_ERR(clnt->fidpool)) {
810 err = PTR_ERR(clnt->fidpool); 813 err = PTR_ERR(clnt->fidpool);
811 clnt->fidpool = NULL;
812 goto put_trans; 814 goto put_trans;
813 } 815 }
814 816
@@ -834,6 +836,8 @@ destroy_fidpool:
834 p9_idpool_destroy(clnt->fidpool); 836 p9_idpool_destroy(clnt->fidpool);
835put_trans: 837put_trans:
836 v9fs_put_trans(clnt->trans_mod); 838 v9fs_put_trans(clnt->trans_mod);
839destroy_tagpool:
840 p9_idpool_destroy(clnt->tagpool);
837free_client: 841free_client:
838 kfree(clnt); 842 kfree(clnt);
839 return ERR_PTR(err); 843 return ERR_PTR(err);
@@ -1298,7 +1302,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1298 if (count < rsize) 1302 if (count < rsize)
1299 rsize = count; 1303 rsize = count;
1300 1304
1301 /* Don't bother zerocopy form small IO (< 1024) */ 1305 /* Don't bother zerocopy for small IO (< 1024) */
1302 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 1306 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
1303 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) { 1307 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
1304 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset, 1308 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
diff --git a/net/9p/mod.c b/net/9p/mod.c
index cf8a4128cd5c..72c398275051 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -139,7 +139,7 @@ void v9fs_put_trans(struct p9_trans_module *m)
139} 139}
140 140
141/** 141/**
142 * v9fs_init - Initialize module 142 * init_p9 - Initialize module
143 * 143 *
144 */ 144 */
145static int __init init_p9(void) 145static int __init init_p9(void)
@@ -154,7 +154,7 @@ static int __init init_p9(void)
154} 154}
155 155
156/** 156/**
157 * v9fs_init - shutdown module 157 * exit_p9 - shutdown module
158 * 158 *
159 */ 159 */
160 160
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4a9084395d35..fdfdb5747f63 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -916,8 +916,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
916 sin_server.sin_family = AF_INET; 916 sin_server.sin_family = AF_INET;
917 sin_server.sin_addr.s_addr = in_aton(addr); 917 sin_server.sin_addr.s_addr = in_aton(addr);
918 sin_server.sin_port = htons(opts.port); 918 sin_server.sin_port = htons(opts.port);
919 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); 919 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
920 920 SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
921 if (err) { 921 if (err) {
922 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); 922 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
923 return err; 923 return err;
@@ -954,7 +954,8 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
954 954
955 sun_server.sun_family = PF_UNIX; 955 sun_server.sun_family = PF_UNIX;
956 strcpy(sun_server.sun_path, addr); 956 strcpy(sun_server.sun_path, addr);
957 err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); 957 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
958 SOCK_STREAM, 0, &csocket, 1);
958 if (err < 0) { 959 if (err < 0) {
959 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n"); 960 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
960 return err; 961 return err;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 844a7a5607e3..159c50f1c6bf 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
589 return -ENOMEM; 589 return -ENOMEM;
590 590
591 /* Create the RDMA CM ID */ 591 /* Create the RDMA CM ID */
592 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); 592 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
593 IB_QPT_RC);
593 if (IS_ERR(rdma->cm_id)) 594 if (IS_ERR(rdma->cm_id))
594 goto error; 595 goto error;
595 596
diff --git a/net/9p/util.c b/net/9p/util.c
index da6af81e59d9..9c1c9348ac35 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -93,7 +93,7 @@ int p9_idpool_get(struct p9_idpool *p)
93 93
94retry: 94retry:
95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0) 95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
96 return 0; 96 return -1;
97 97
98 spin_lock_irqsave(&p->lock, flags); 98 spin_lock_irqsave(&p->lock, flags);
99 99
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 25073b6ef474..ba48daa68c1f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1171,7 +1171,7 @@ static int __init lane_module_init(void)
1171#endif 1171#endif
1172 1172
1173 register_atm_ioctl(&lane_ioctl_ops); 1173 register_atm_ioctl(&lane_ioctl_ops);
1174 pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n"); 1174 pr_info("lec.c: initialized\n");
1175 return 0; 1175 return 0;
1176} 1176}
1177 1177
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 644cdf071642..3ccca42e6f90 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1482,7 +1482,7 @@ static __init int atm_mpoa_init(void)
1482 if (mpc_proc_init() != 0) 1482 if (mpc_proc_init() != 0)
1483 pr_info("failed to initialize /proc/mpoa\n"); 1483 pr_info("failed to initialize /proc/mpoa\n");
1484 1484
1485 pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n"); 1485 pr_info("mpc.c: initialized\n");
1486 1486
1487 return 0; 1487 return 0;
1488} 1488}
diff --git a/net/atm/proc.c b/net/atm/proc.c
index f85da0779e5e..be3afdefec58 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -191,7 +191,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
191{ 191{
192 struct sock *sk = sk_atm(vcc); 192 struct sock *sk = sk_atm(vcc);
193 193
194 seq_printf(seq, "%p ", vcc); 194 seq_printf(seq, "%pK ", vcc);
195 if (!vcc->dev) 195 if (!vcc->dev)
196 seq_printf(seq, "Unassigned "); 196 seq_printf(seq, "Unassigned ");
197 else 197 else
@@ -218,7 +218,7 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
218{ 218{
219 if (!vcc->dev) 219 if (!vcc->dev)
220 seq_printf(seq, sizeof(void *) == 4 ? 220 seq_printf(seq, sizeof(void *) == 4 ?
221 "N/A@%p%10s" : "N/A@%p%2s", vcc, ""); 221 "N/A@%pK%10s" : "N/A@%pK%2s", vcc, "");
222 else 222 else
223 seq_printf(seq, "%3d %3d %5d ", 223 seq_printf(seq, "%3d %3d %5d ",
224 vcc->dev->number, vcc->vpi, vcc->vci); 224 vcc->dev->number, vcc->vpi, vcc->vci);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e1f5ec75e91c..3fa123185e89 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -117,6 +117,10 @@ static struct dst_ops fake_dst_ops = {
117 * ipt_REJECT needs it. Future netfilter modules might 117 * ipt_REJECT needs it. Future netfilter modules might
118 * require us to fill additional fields. 118 * require us to fill additional fields.
119 */ 119 */
120static const u32 br_dst_default_metrics[RTAX_MAX] = {
121 [RTAX_MTU - 1] = 1500,
122};
123
120void br_netfilter_rtable_init(struct net_bridge *br) 124void br_netfilter_rtable_init(struct net_bridge *br)
121{ 125{
122 struct rtable *rt = &br->fake_rtable; 126 struct rtable *rt = &br->fake_rtable;
@@ -124,7 +128,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
124 atomic_set(&rt->dst.__refcnt, 1); 128 atomic_set(&rt->dst.__refcnt, 1);
125 rt->dst.dev = br->dev; 129 rt->dst.dev = br->dev;
126 rt->dst.path = &rt->dst; 130 rt->dst.path = &rt->dst;
127 dst_metric_set(&rt->dst, RTAX_MTU, 1500); 131 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
128 rt->dst.flags = DST_NOXFRM; 132 rt->dst.flags = DST_NOXFRM;
129 rt->dst.ops = &fake_dst_ops; 133 rt->dst.ops = &fake_dst_ops;
130} 134}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index cced806098a9..184a6572b67e 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -165,9 +165,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
165 struct bcm_sock *bo = bcm_sk(sk); 165 struct bcm_sock *bo = bcm_sk(sk);
166 struct bcm_op *op; 166 struct bcm_op *op;
167 167
168 seq_printf(m, ">>> socket %p", sk->sk_socket); 168 seq_printf(m, ">>> socket %pK", sk->sk_socket);
169 seq_printf(m, " / sk %p", sk); 169 seq_printf(m, " / sk %pK", sk);
170 seq_printf(m, " / bo %p", bo); 170 seq_printf(m, " / bo %pK", bo);
171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); 172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
173 seq_printf(m, " <<<\n"); 173 seq_printf(m, " <<<\n");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index e15a82ccc05f..78b55f49de7c 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -76,7 +76,8 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
76 break; 76 break;
77 77
78 default: 78 default:
79 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family); 79 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %d)",
80 (int)ss->ss_family);
80 } 81 }
81 82
82 return s; 83 return s;
@@ -598,7 +599,7 @@ static void prepare_write_keepalive(struct ceph_connection *con)
598 * Connection negotiation. 599 * Connection negotiation.
599 */ 600 */
600 601
601static void prepare_connect_authorizer(struct ceph_connection *con) 602static int prepare_connect_authorizer(struct ceph_connection *con)
602{ 603{
603 void *auth_buf; 604 void *auth_buf;
604 int auth_len = 0; 605 int auth_len = 0;
@@ -612,13 +613,20 @@ static void prepare_connect_authorizer(struct ceph_connection *con)
612 con->auth_retry); 613 con->auth_retry);
613 mutex_lock(&con->mutex); 614 mutex_lock(&con->mutex);
614 615
616 if (test_bit(CLOSED, &con->state) ||
617 test_bit(OPENING, &con->state))
618 return -EAGAIN;
619
615 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol); 620 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
616 con->out_connect.authorizer_len = cpu_to_le32(auth_len); 621 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
617 622
618 con->out_kvec[con->out_kvec_left].iov_base = auth_buf; 623 if (auth_len) {
619 con->out_kvec[con->out_kvec_left].iov_len = auth_len; 624 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
620 con->out_kvec_left++; 625 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
621 con->out_kvec_bytes += auth_len; 626 con->out_kvec_left++;
627 con->out_kvec_bytes += auth_len;
628 }
629 return 0;
622} 630}
623 631
624/* 632/*
@@ -640,9 +648,9 @@ static void prepare_write_banner(struct ceph_messenger *msgr,
640 set_bit(WRITE_PENDING, &con->state); 648 set_bit(WRITE_PENDING, &con->state);
641} 649}
642 650
643static void prepare_write_connect(struct ceph_messenger *msgr, 651static int prepare_write_connect(struct ceph_messenger *msgr,
644 struct ceph_connection *con, 652 struct ceph_connection *con,
645 int after_banner) 653 int after_banner)
646{ 654{
647 unsigned global_seq = get_global_seq(con->msgr, 0); 655 unsigned global_seq = get_global_seq(con->msgr, 0);
648 int proto; 656 int proto;
@@ -683,7 +691,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
683 con->out_more = 0; 691 con->out_more = 0;
684 set_bit(WRITE_PENDING, &con->state); 692 set_bit(WRITE_PENDING, &con->state);
685 693
686 prepare_connect_authorizer(con); 694 return prepare_connect_authorizer(con);
687} 695}
688 696
689 697
@@ -1065,8 +1073,10 @@ static void addr_set_port(struct sockaddr_storage *ss, int p)
1065 switch (ss->ss_family) { 1073 switch (ss->ss_family) {
1066 case AF_INET: 1074 case AF_INET:
1067 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1075 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1076 break;
1068 case AF_INET6: 1077 case AF_INET6:
1069 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1078 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1079 break;
1070 } 1080 }
1071} 1081}
1072 1082
@@ -1216,6 +1226,7 @@ static int process_connect(struct ceph_connection *con)
1216 u64 sup_feat = con->msgr->supported_features; 1226 u64 sup_feat = con->msgr->supported_features;
1217 u64 req_feat = con->msgr->required_features; 1227 u64 req_feat = con->msgr->required_features;
1218 u64 server_feat = le64_to_cpu(con->in_reply.features); 1228 u64 server_feat = le64_to_cpu(con->in_reply.features);
1229 int ret;
1219 1230
1220 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1231 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1221 1232
@@ -1250,7 +1261,9 @@ static int process_connect(struct ceph_connection *con)
1250 return -1; 1261 return -1;
1251 } 1262 }
1252 con->auth_retry = 1; 1263 con->auth_retry = 1;
1253 prepare_write_connect(con->msgr, con, 0); 1264 ret = prepare_write_connect(con->msgr, con, 0);
1265 if (ret < 0)
1266 return ret;
1254 prepare_read_connect(con); 1267 prepare_read_connect(con);
1255 break; 1268 break;
1256 1269
@@ -1277,6 +1290,9 @@ static int process_connect(struct ceph_connection *con)
1277 if (con->ops->peer_reset) 1290 if (con->ops->peer_reset)
1278 con->ops->peer_reset(con); 1291 con->ops->peer_reset(con);
1279 mutex_lock(&con->mutex); 1292 mutex_lock(&con->mutex);
1293 if (test_bit(CLOSED, &con->state) ||
1294 test_bit(OPENING, &con->state))
1295 return -EAGAIN;
1280 break; 1296 break;
1281 1297
1282 case CEPH_MSGR_TAG_RETRY_SESSION: 1298 case CEPH_MSGR_TAG_RETRY_SESSION:
@@ -1341,7 +1357,9 @@ static int process_connect(struct ceph_connection *con)
1341 * to WAIT. This shouldn't happen if we are the 1357 * to WAIT. This shouldn't happen if we are the
1342 * client. 1358 * client.
1343 */ 1359 */
1344 pr_err("process_connect peer connecting WAIT\n"); 1360 pr_err("process_connect got WAIT as client\n");
1361 con->error_msg = "protocol error, got WAIT as client";
1362 return -1;
1345 1363
1346 default: 1364 default:
1347 pr_err("connect protocol error, will retry\n"); 1365 pr_err("connect protocol error, will retry\n");
@@ -1810,6 +1828,17 @@ static int try_read(struct ceph_connection *con)
1810more: 1828more:
1811 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 1829 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1812 con->in_base_pos); 1830 con->in_base_pos);
1831
1832 /*
1833 * process_connect and process_message drop and re-take
1834 * con->mutex. make sure we handle a racing close or reopen.
1835 */
1836 if (test_bit(CLOSED, &con->state) ||
1837 test_bit(OPENING, &con->state)) {
1838 ret = -EAGAIN;
1839 goto out;
1840 }
1841
1813 if (test_bit(CONNECTING, &con->state)) { 1842 if (test_bit(CONNECTING, &con->state)) {
1814 if (!test_bit(NEGOTIATING, &con->state)) { 1843 if (!test_bit(NEGOTIATING, &con->state)) {
1815 dout("try_read connecting\n"); 1844 dout("try_read connecting\n");
@@ -1938,8 +1967,10 @@ static void con_work(struct work_struct *work)
1938{ 1967{
1939 struct ceph_connection *con = container_of(work, struct ceph_connection, 1968 struct ceph_connection *con = container_of(work, struct ceph_connection,
1940 work.work); 1969 work.work);
1970 int ret;
1941 1971
1942 mutex_lock(&con->mutex); 1972 mutex_lock(&con->mutex);
1973restart:
1943 if (test_and_clear_bit(BACKOFF, &con->state)) { 1974 if (test_and_clear_bit(BACKOFF, &con->state)) {
1944 dout("con_work %p backing off\n", con); 1975 dout("con_work %p backing off\n", con);
1945 if (queue_delayed_work(ceph_msgr_wq, &con->work, 1976 if (queue_delayed_work(ceph_msgr_wq, &con->work,
@@ -1969,18 +2000,31 @@ static void con_work(struct work_struct *work)
1969 con_close_socket(con); 2000 con_close_socket(con);
1970 } 2001 }
1971 2002
1972 if (test_and_clear_bit(SOCK_CLOSED, &con->state) || 2003 if (test_and_clear_bit(SOCK_CLOSED, &con->state))
1973 try_read(con) < 0 || 2004 goto fault;
1974 try_write(con) < 0) { 2005
1975 mutex_unlock(&con->mutex); 2006 ret = try_read(con);
1976 ceph_fault(con); /* error/fault path */ 2007 if (ret == -EAGAIN)
1977 goto done_unlocked; 2008 goto restart;
1978 } 2009 if (ret < 0)
2010 goto fault;
2011
2012 ret = try_write(con);
2013 if (ret == -EAGAIN)
2014 goto restart;
2015 if (ret < 0)
2016 goto fault;
1979 2017
1980done: 2018done:
1981 mutex_unlock(&con->mutex); 2019 mutex_unlock(&con->mutex);
1982done_unlocked: 2020done_unlocked:
1983 con->ops->put(con); 2021 con->ops->put(con);
2022 return;
2023
2024fault:
2025 mutex_unlock(&con->mutex);
2026 ceph_fault(con); /* error/fault path */
2027 goto done_unlocked;
1984} 2028}
1985 2029
1986 2030
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6b5dda1cb5df..6ea2b892f44b 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -124,7 +124,7 @@ static void calc_layout(struct ceph_osd_client *osdc,
124 ceph_calc_raw_layout(osdc, layout, vino.snap, off, 124 ceph_calc_raw_layout(osdc, layout, vino.snap, off,
125 plen, &bno, req, op); 125 plen, &bno, req, op);
126 126
127 sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno); 127 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
128 req->r_oid_len = strlen(req->r_oid); 128 req->r_oid_len = strlen(req->r_oid);
129} 129}
130 130
@@ -1421,6 +1421,15 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1421done: 1421done:
1422 downgrade_write(&osdc->map_sem); 1422 downgrade_write(&osdc->map_sem);
1423 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); 1423 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1424
1425 /*
1426 * subscribe to subsequent osdmap updates if full to ensure
1427 * we find out when we are no longer full and stop returning
1428 * ENOSPC.
1429 */
1430 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1431 ceph_monc_request_next_osdmap(&osdc->client->monc);
1432
1424 send_queued(osdc); 1433 send_queued(osdc);
1425 up_read(&osdc->map_sem); 1434 up_read(&osdc->map_sem);
1426 wake_up_all(&osdc->client->auth_wq); 1435 wake_up_all(&osdc->client->auth_wq);
@@ -1677,8 +1686,14 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1677 */ 1686 */
1678 if (req->r_sent == 0) { 1687 if (req->r_sent == 0) {
1679 rc = __map_request(osdc, req); 1688 rc = __map_request(osdc, req);
1680 if (rc < 0) 1689 if (rc < 0) {
1690 if (nofail) {
1691 dout("osdc_start_request failed map, "
1692 " will retry %lld\n", req->r_tid);
1693 rc = 0;
1694 }
1681 goto out_unlock; 1695 goto out_unlock;
1696 }
1682 if (req->r_osd == NULL) { 1697 if (req->r_osd == NULL) {
1683 dout("send_request %p no up osds in pg\n", req); 1698 dout("send_request %p no up osds in pg\n", req);
1684 ceph_monc_request_next_osdmap(&osdc->client->monc); 1699 ceph_monc_request_next_osdmap(&osdc->client->monc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 71603ac3dff5..e97c3588c3ec 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -765,7 +765,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
765 } 765 }
766 766
767 map->epoch++; 767 map->epoch++;
768 map->modified = map->modified; 768 map->modified = modified;
769 if (newcrush) { 769 if (newcrush) {
770 if (map->crush) 770 if (map->crush)
771 crush_destroy(map->crush); 771 crush_destroy(map->crush);
@@ -830,15 +830,20 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
830 map->osd_addr[osd] = addr; 830 map->osd_addr[osd] = addr;
831 } 831 }
832 832
833 /* new_down */ 833 /* new_state */
834 ceph_decode_32_safe(p, end, len, bad); 834 ceph_decode_32_safe(p, end, len, bad);
835 while (len--) { 835 while (len--) {
836 u32 osd; 836 u32 osd;
837 u8 xorstate;
837 ceph_decode_32_safe(p, end, osd, bad); 838 ceph_decode_32_safe(p, end, osd, bad);
839 xorstate = **(u8 **)p;
838 (*p)++; /* clean flag */ 840 (*p)++; /* clean flag */
839 pr_info("osd%d down\n", osd); 841 if (xorstate == 0)
842 xorstate = CEPH_OSD_UP;
843 if (xorstate & CEPH_OSD_UP)
844 pr_info("osd%d down\n", osd);
840 if (osd < map->max_osd) 845 if (osd < map->max_osd)
841 map->osd_state[osd] &= ~CEPH_OSD_UP; 846 map->osd_state[osd] ^= xorstate;
842 } 847 }
843 848
844 /* new_weight */ 849 /* new_weight */
diff --git a/net/core/dev.c b/net/core/dev.c
index bcb05cb799c1..c7e305d13b71 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1308,6 +1308,13 @@ void dev_disable_lro(struct net_device *dev)
1308{ 1308{
1309 u32 flags; 1309 u32 flags;
1310 1310
1311 /*
1312 * If we're trying to disable lro on a vlan device
1313 * use the underlying physical device instead
1314 */
1315 if (is_vlan_dev(dev))
1316 dev = vlan_dev_real_dev(dev);
1317
1311 if (dev->ethtool_ops && dev->ethtool_ops->get_flags) 1318 if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
1312 flags = dev->ethtool_ops->get_flags(dev); 1319 flags = dev->ethtool_ops->get_flags(dev);
1313 else 1320 else
@@ -5954,7 +5961,10 @@ EXPORT_SYMBOL(free_netdev);
5954void synchronize_net(void) 5961void synchronize_net(void)
5955{ 5962{
5956 might_sleep(); 5963 might_sleep();
5957 synchronize_rcu(); 5964 if (rtnl_is_locked())
5965 synchronize_rcu_expedited();
5966 else
5967 synchronize_rcu();
5958} 5968}
5959EXPORT_SYMBOL(synchronize_net); 5969EXPORT_SYMBOL(synchronize_net);
5960 5970
diff --git a/net/core/dst.c b/net/core/dst.c
index 81a4fa1c95ed..9ccca038444f 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -315,7 +315,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
315{ 315{
316 unsigned long prev, new; 316 unsigned long prev, new;
317 317
318 new = (unsigned long) dst_default_metrics; 318 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
319 prev = cmpxchg(&dst->_metrics, old, new); 319 prev = cmpxchg(&dst->_metrics, old, new);
320 if (prev == old) 320 if (prev == old)
321 kfree(__DST_METRICS_PTR(old)); 321 kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3911586e12e4..008dc70b064b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -602,6 +602,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
602skip: 602skip:
603 idx++; 603 idx++;
604 } 604 }
605 rcu_read_unlock();
605 cb->args[1] = idx; 606 cb->args[1] = idx;
606 rules_ops_put(ops); 607 rules_ops_put(ops);
607 608
diff --git a/net/core/filter.c b/net/core/filter.c
index 0eb8c4466eaa..0e3622f1dcb1 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -350,7 +350,9 @@ load_b:
350 continue; 350 continue;
351 } 351 }
352 default: 352 default:
353 WARN_ON(1); 353 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
354 fentry->code, fentry->jt,
355 fentry->jf, fentry->k);
354 return 0; 356 return 0;
355 } 357 }
356 } 358 }
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 2e2dce6583e1..6c6b86d0da15 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -8,6 +8,8 @@
8#include <linux/idr.h> 8#include <linux/idr.h>
9#include <linux/rculist.h> 9#include <linux/rculist.h>
10#include <linux/nsproxy.h> 10#include <linux/nsproxy.h>
11#include <linux/proc_fs.h>
12#include <linux/file.h>
11#include <net/net_namespace.h> 13#include <net/net_namespace.h>
12#include <net/netns/generic.h> 14#include <net/netns/generic.h>
13 15
@@ -302,6 +304,28 @@ void __put_net(struct net *net)
302} 304}
303EXPORT_SYMBOL_GPL(__put_net); 305EXPORT_SYMBOL_GPL(__put_net);
304 306
307struct net *get_net_ns_by_fd(int fd)
308{
309 struct proc_inode *ei;
310 struct file *file;
311 struct net *net;
312
313 net = ERR_PTR(-EINVAL);
314 file = proc_ns_fget(fd);
315 if (!file)
316 goto out;
317
318 ei = PROC_I(file->f_dentry->d_inode);
319 if (ei->ns_ops != &netns_operations)
320 goto out;
321
322 net = get_net(ei->ns);
323out:
324 if (file)
325 fput(file);
326 return net;
327}
328
305#else 329#else
306struct net *copy_net_ns(unsigned long flags, struct net *old_net) 330struct net *copy_net_ns(unsigned long flags, struct net *old_net)
307{ 331{
@@ -309,6 +333,11 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
309 return ERR_PTR(-EINVAL); 333 return ERR_PTR(-EINVAL);
310 return old_net; 334 return old_net;
311} 335}
336
337struct net *get_net_ns_by_fd(int fd)
338{
339 return ERR_PTR(-EINVAL);
340}
312#endif 341#endif
313 342
314struct net *get_net_ns_by_pid(pid_t pid) 343struct net *get_net_ns_by_pid(pid_t pid)
@@ -561,3 +590,39 @@ void unregister_pernet_device(struct pernet_operations *ops)
561 mutex_unlock(&net_mutex); 590 mutex_unlock(&net_mutex);
562} 591}
563EXPORT_SYMBOL_GPL(unregister_pernet_device); 592EXPORT_SYMBOL_GPL(unregister_pernet_device);
593
594#ifdef CONFIG_NET_NS
595static void *netns_get(struct task_struct *task)
596{
597 struct net *net = NULL;
598 struct nsproxy *nsproxy;
599
600 rcu_read_lock();
601 nsproxy = task_nsproxy(task);
602 if (nsproxy)
603 net = get_net(nsproxy->net_ns);
604 rcu_read_unlock();
605
606 return net;
607}
608
609static void netns_put(void *ns)
610{
611 put_net(ns);
612}
613
614static int netns_install(struct nsproxy *nsproxy, void *ns)
615{
616 put_net(nsproxy->net_ns);
617 nsproxy->net_ns = get_net(ns);
618 return 0;
619}
620
621const struct proc_ns_operations netns_operations = {
622 .name = "net",
623 .type = CLONE_NEWNET,
624 .get = netns_get,
625 .put = netns_put,
626 .install = netns_install,
627};
628#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d1644e317e70..abd936d8a716 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -850,6 +850,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
850 struct nlattr *attr, *af_spec; 850 struct nlattr *attr, *af_spec;
851 struct rtnl_af_ops *af_ops; 851 struct rtnl_af_ops *af_ops;
852 852
853 ASSERT_RTNL();
853 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 854 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
854 if (nlh == NULL) 855 if (nlh == NULL)
855 return -EMSGSIZE; 856 return -EMSGSIZE;
@@ -1045,6 +1046,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1045 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1046 [IFLA_LINKMODE] = { .type = NLA_U8 },
1046 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1047 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1047 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1048 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1049 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1048 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 1050 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
1049 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1051 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1050 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1052 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
@@ -1093,6 +1095,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1093 */ 1095 */
1094 if (tb[IFLA_NET_NS_PID]) 1096 if (tb[IFLA_NET_NS_PID])
1095 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 1097 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1098 else if (tb[IFLA_NET_NS_FD])
1099 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1096 else 1100 else
1097 net = get_net(src_net); 1101 net = get_net(src_net);
1098 return net; 1102 return net;
@@ -1223,7 +1227,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1223 int send_addr_notify = 0; 1227 int send_addr_notify = 0;
1224 int err; 1228 int err;
1225 1229
1226 if (tb[IFLA_NET_NS_PID]) { 1230 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1227 struct net *net = rtnl_link_get_net(dev_net(dev), tb); 1231 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1228 if (IS_ERR(net)) { 1232 if (IS_ERR(net)) {
1229 err = PTR_ERR(net); 1233 err = PTR_ERR(net);
@@ -1876,6 +1880,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1876 int min_len; 1880 int min_len;
1877 int family; 1881 int family;
1878 int type; 1882 int type;
1883 int err;
1879 1884
1880 type = nlh->nlmsg_type; 1885 type = nlh->nlmsg_type;
1881 if (type > RTM_MAX) 1886 if (type > RTM_MAX)
@@ -1902,8 +1907,11 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1902 if (dumpit == NULL) 1907 if (dumpit == NULL)
1903 return -EOPNOTSUPP; 1908 return -EOPNOTSUPP;
1904 1909
1910 __rtnl_unlock();
1905 rtnl = net->rtnl; 1911 rtnl = net->rtnl;
1906 return netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); 1912 err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
1913 rtnl_lock();
1914 return err;
1907 } 1915 }
1908 1916
1909 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); 1917 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
@@ -1975,7 +1983,7 @@ static int __net_init rtnetlink_net_init(struct net *net)
1975{ 1983{
1976 struct sock *sk; 1984 struct sock *sk;
1977 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1985 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
1978 rtnetlink_rcv, NULL, THIS_MODULE); 1986 rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
1979 if (!sk) 1987 if (!sk)
1980 return -ENOMEM; 1988 return -ENOMEM;
1981 net->rtnl = sk; 1989 net->rtnl = sk;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 672e476c8c8a..f1d27f6c9351 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1155,20 +1155,18 @@ static void igmp_group_dropped(struct ip_mc_list *im)
1155 1155
1156 if (!in_dev->dead) { 1156 if (!in_dev->dead) {
1157 if (IGMP_V1_SEEN(in_dev)) 1157 if (IGMP_V1_SEEN(in_dev))
1158 goto done; 1158 return;
1159 if (IGMP_V2_SEEN(in_dev)) { 1159 if (IGMP_V2_SEEN(in_dev)) {
1160 if (reporter) 1160 if (reporter)
1161 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); 1161 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
1162 goto done; 1162 return;
1163 } 1163 }
1164 /* IGMPv3 */ 1164 /* IGMPv3 */
1165 igmpv3_add_delrec(in_dev, im); 1165 igmpv3_add_delrec(in_dev, im);
1166 1166
1167 igmp_ifc_event(in_dev); 1167 igmp_ifc_event(in_dev);
1168 } 1168 }
1169done:
1170#endif 1169#endif
1171 ip_mc_clear_src(im);
1172} 1170}
1173 1171
1174static void igmp_group_added(struct ip_mc_list *im) 1172static void igmp_group_added(struct ip_mc_list *im)
@@ -1305,6 +1303,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1305 *ip = i->next_rcu; 1303 *ip = i->next_rcu;
1306 in_dev->mc_count--; 1304 in_dev->mc_count--;
1307 igmp_group_dropped(i); 1305 igmp_group_dropped(i);
1306 ip_mc_clear_src(i);
1308 1307
1309 if (!in_dev->dead) 1308 if (!in_dev->dead)
1310 ip_rt_multicast_event(in_dev); 1309 ip_rt_multicast_event(in_dev);
@@ -1414,7 +1413,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1414 in_dev->mc_list = i->next_rcu; 1413 in_dev->mc_list = i->next_rcu;
1415 in_dev->mc_count--; 1414 in_dev->mc_count--;
1416 1415
1417 igmp_group_dropped(i); 1416 /* We've dropped the groups in ip_mc_down already */
1417 ip_mc_clear_src(i);
1418 ip_ma_put(i); 1418 ip_ma_put(i);
1419 } 1419 }
1420} 1420}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 61fac4cabc78..c14d88ad348d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg);
33 * This struct holds the first and last local port number. 33 * This struct holds the first and last local port number.
34 */ 34 */
35struct local_ports sysctl_local_ports __read_mostly = { 35struct local_ports sysctl_local_ports __read_mostly = {
36 .lock = SEQLOCK_UNLOCKED, 36 .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
37 .range = { 32768, 61000 }, 37 .range = { 32768, 61000 },
38}; 38};
39 39
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 1f3bb11490c9..9aaa67165f42 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -137,9 +137,6 @@ static void ping_v4_unhash(struct sock *sk)
137 struct inet_sock *isk = inet_sk(sk); 137 struct inet_sock *isk = inet_sk(sk);
138 pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); 138 pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
139 if (sk_hashed(sk)) { 139 if (sk_hashed(sk)) {
140 struct hlist_nulls_head *hslot;
141
142 hslot = ping_hashslot(&ping_table, sock_net(sk), isk->inet_num);
143 write_lock_bh(&ping_table.lock); 140 write_lock_bh(&ping_table.lock);
144 hlist_nulls_del(&sk->sk_nulls_node); 141 hlist_nulls_del(&sk->sk_nulls_node);
145 sock_put(sk); 142 sock_put(sk);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 11e1780455f2..c9893d43242e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -979,7 +979,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
979 srcp = inet->inet_num; 979 srcp = inet->inet_num;
980 980
981 seq_printf(seq, "%4d: %08X:%04X %08X:%04X" 981 seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
982 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 982 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
983 i, src, srcp, dest, destp, sp->sk_state, 983 i, src, srcp, dest, destp, sp->sk_state,
984 sk_wmem_alloc_get(sp), 984 sk_wmem_alloc_get(sp),
985 sk_rmem_alloc_get(sp), 985 sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3c8d9b6f1ea4..a7d6671e33b8 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2371,7 +2371,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
2371 int ttd = req->expires - jiffies; 2371 int ttd = req->expires - jiffies;
2372 2372
2373 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2373 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2374 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", 2374 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2375 i, 2375 i,
2376 ireq->loc_addr, 2376 ireq->loc_addr,
2377 ntohs(inet_sk(sk)->inet_sport), 2377 ntohs(inet_sk(sk)->inet_sport),
@@ -2426,7 +2426,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2426 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2426 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2427 2427
2428 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2428 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2429 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", 2429 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2430 i, src, srcp, dest, destp, sk->sk_state, 2430 i, src, srcp, dest, destp, sk->sk_state,
2431 tp->write_seq - tp->snd_una, 2431 tp->write_seq - tp->snd_una,
2432 rx_queue, 2432 rx_queue,
@@ -2461,7 +2461,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
2461 srcp = ntohs(tw->tw_sport); 2461 srcp = ntohs(tw->tw_sport);
2462 2462
2463 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2463 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2464 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n", 2464 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2465 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2465 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2466 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 2466 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2467 atomic_read(&tw->tw_refcnt), tw, len); 2467 atomic_read(&tw->tw_refcnt), tw, len);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 599374f65c76..abca870d8ff6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2090,7 +2090,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2090 __u16 srcp = ntohs(inet->inet_sport); 2090 __u16 srcp = ntohs(inet->inet_sport);
2091 2091
2092 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2092 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2093 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", 2093 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
2094 bucket, src, srcp, dest, destp, sp->sk_state, 2094 bucket, src, srcp, dest, destp, sp->sk_state,
2095 sk_wmem_alloc_get(sp), 2095 sk_wmem_alloc_get(sp),
2096 sk_rmem_alloc_get(sp), 2096 sk_rmem_alloc_get(sp),
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ae64984f81aa..cc7313b8f7ea 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1240,7 +1240,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1240 srcp = inet_sk(sp)->inet_num; 1240 srcp = inet_sk(sp)->inet_num;
1241 seq_printf(seq, 1241 seq_printf(seq,
1242 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1242 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1243 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 1243 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
1244 i, 1244 i,
1245 src->s6_addr32[0], src->s6_addr32[1], 1245 src->s6_addr32[0], src->s6_addr32[1],
1246 src->s6_addr32[2], src->s6_addr32[3], srcp, 1246 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 868366470b4a..d1fd28711ba5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2036,7 +2036,7 @@ static void get_openreq6(struct seq_file *seq,
2036 2036
2037 seq_printf(seq, 2037 seq_printf(seq,
2038 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2038 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2039 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 2039 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2040 i, 2040 i,
2041 src->s6_addr32[0], src->s6_addr32[1], 2041 src->s6_addr32[0], src->s6_addr32[1],
2042 src->s6_addr32[2], src->s6_addr32[3], 2042 src->s6_addr32[2], src->s6_addr32[3],
@@ -2087,7 +2087,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2087 2087
2088 seq_printf(seq, 2088 seq_printf(seq,
2089 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2089 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2090 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n", 2090 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2091 i, 2091 i,
2092 src->s6_addr32[0], src->s6_addr32[1], 2092 src->s6_addr32[0], src->s6_addr32[1],
2093 src->s6_addr32[2], src->s6_addr32[3], srcp, 2093 src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -2129,7 +2129,7 @@ static void get_timewait6_sock(struct seq_file *seq,
2129 2129
2130 seq_printf(seq, 2130 seq_printf(seq,
2131 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2131 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2132 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 2132 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2133 i, 2133 i,
2134 src->s6_addr32[0], src->s6_addr32[1], 2134 src->s6_addr32[0], src->s6_addr32[1],
2135 src->s6_addr32[2], src->s6_addr32[3], srcp, 2135 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fc0c42a88e54..41f8c9c08dba 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1391,7 +1391,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1391 srcp = ntohs(inet->inet_sport); 1391 srcp = ntohs(inet->inet_sport);
1392 seq_printf(seq, 1392 seq_printf(seq,
1393 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1393 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1394 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 1394 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
1395 bucket, 1395 bucket,
1396 src->s6_addr32[0], src->s6_addr32[1], 1396 src->s6_addr32[0], src->s6_addr32[1],
1397 src->s6_addr32[2], src->s6_addr32[3], srcp, 1397 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index a6770a04e3bd..4fe1db12d2a3 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -241,7 +241,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
241 __be32 spi; 241 __be32 spi;
242 242
243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr); 243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
245} 245}
246 246
247static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 247static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a15c01524959..7f9124914b13 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -54,7 +54,7 @@
54#include <asm/atomic.h> 54#include <asm/atomic.h>
55#include <asm/ebcdic.h> 55#include <asm/ebcdic.h>
56#include <asm/io.h> 56#include <asm/io.h>
57#include <asm/s390_ext.h> 57#include <asm/irq.h>
58#include <asm/smp.h> 58#include <asm/smp.h>
59 59
60/* 60/*
diff --git a/net/key/af_key.c b/net/key/af_key.c
index d62401c25684..8f92cf8116ea 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3656,7 +3656,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
3656 if (v == SEQ_START_TOKEN) 3656 if (v == SEQ_START_TOKEN)
3657 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); 3657 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
3658 else 3658 else
3659 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", 3659 seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
3660 s, 3660 s,
3661 atomic_read(&s->sk_refcnt), 3661 atomic_read(&s->sk_refcnt),
3662 sk_rmem_alloc_get(s), 3662 sk_rmem_alloc_get(s),
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7dfbe71dc637..49d4f869e0bc 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -384,11 +384,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
384 int i; 384 int i;
385 enum nl80211_channel_type orig_ct; 385 enum nl80211_channel_type orig_ct;
386 386
387 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
388
387 if (local->scan_sdata == sdata) 389 if (local->scan_sdata == sdata)
388 ieee80211_scan_cancel(local); 390 ieee80211_scan_cancel(local);
389 391
390 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
391
392 /* 392 /*
393 * Stop TX on this interface first. 393 * Stop TX on this interface first.
394 */ 394 */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0d7b08db8e56..866f269183cf 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -752,11 +752,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
752 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); 752 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
753 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); 753 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
754 754
755 /* mac80211 doesn't support more than 1 channel */ 755 /*
756 for (i = 0; i < hw->wiphy->n_iface_combinations; i++) 756 * mac80211 doesn't support more than 1 channel, and also not more
757 if (hw->wiphy->iface_combinations[i].num_different_channels > 1) 757 * than one IBSS interface
758 */
759 for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
760 const struct ieee80211_iface_combination *c;
761 int j;
762
763 c = &hw->wiphy->iface_combinations[i];
764
765 if (c->num_different_channels > 1)
758 return -EINVAL; 766 return -EINVAL;
759 767
768 for (j = 0; j < c->n_limits; j++)
769 if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
770 c->limits[j].max > 1)
771 return -EINVAL;
772 }
773
760#ifndef CONFIG_MAC80211_MESH 774#ifndef CONFIG_MAC80211_MESH
761 /* mesh depends on Kconfig, but drivers should set it if they want */ 775 /* mesh depends on Kconfig, but drivers should set it if they want */
762 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); 776 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
@@ -1076,6 +1090,8 @@ static void __exit ieee80211_exit(void)
1076 ieee80211s_stop(); 1090 ieee80211s_stop();
1077 1091
1078 ieee80211_iface_exit(); 1092 ieee80211_iface_exit();
1093
1094 rcu_barrier();
1079} 1095}
1080 1096
1081 1097
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e7c5fddb4804..249e733362e7 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -120,6 +120,7 @@ struct mesh_path {
120 * buckets 120 * buckets
121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is 121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is
122 * reached, the table will grow 122 * reached, the table will grow
123 * rcu_head: RCU head to free the table
123 */ 124 */
124struct mesh_table { 125struct mesh_table {
125 /* Number of buckets will be 2^N */ 126 /* Number of buckets will be 2^N */
@@ -132,6 +133,8 @@ struct mesh_table {
132 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 133 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
133 int size_order; 134 int size_order;
134 int mean_chain_len; 135 int mean_chain_len;
136
137 struct rcu_head rcu_head;
135}; 138};
136 139
137/* Recent multicast cache */ 140/* Recent multicast cache */
@@ -286,10 +289,6 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
286 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; 289 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
287} 290}
288 291
289#define for_each_mesh_entry(x, p, node, i) \
290 for (i = 0; i <= x->hash_mask; i++) \
291 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
292
293void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 292void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
294 293
295void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); 294void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 83ce48e31913..0d2faacc3e87 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -36,8 +36,8 @@ struct mpath_node {
36 struct mesh_path *mpath; 36 struct mesh_path *mpath;
37}; 37};
38 38
39static struct mesh_table *mesh_paths; 39static struct mesh_table __rcu *mesh_paths;
40static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 40static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
41 41
42int mesh_paths_generation; 42int mesh_paths_generation;
43 43
@@ -48,17 +48,40 @@ int mesh_paths_generation;
48static DEFINE_RWLOCK(pathtbl_resize_lock); 48static DEFINE_RWLOCK(pathtbl_resize_lock);
49 49
50 50
51static inline struct mesh_table *resize_dereference_mesh_paths(void)
52{
53 return rcu_dereference_protected(mesh_paths,
54 lockdep_is_held(&pathtbl_resize_lock));
55}
56
57static inline struct mesh_table *resize_dereference_mpp_paths(void)
58{
59 return rcu_dereference_protected(mpp_paths,
60 lockdep_is_held(&pathtbl_resize_lock));
61}
62
63/*
64 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since
66 * it's used twice. So it is illegal to do
67 * for_each_mesh_entry(rcu_dereference(...), ...)
68 */
69#define for_each_mesh_entry(tbl, p, node, i) \
70 for (i = 0; i <= tbl->hash_mask; i++) \
71 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
72
73
51static struct mesh_table *mesh_table_alloc(int size_order) 74static struct mesh_table *mesh_table_alloc(int size_order)
52{ 75{
53 int i; 76 int i;
54 struct mesh_table *newtbl; 77 struct mesh_table *newtbl;
55 78
56 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); 79 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
57 if (!newtbl) 80 if (!newtbl)
58 return NULL; 81 return NULL;
59 82
60 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * 83 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
61 (1 << size_order), GFP_KERNEL); 84 (1 << size_order), GFP_ATOMIC);
62 85
63 if (!newtbl->hash_buckets) { 86 if (!newtbl->hash_buckets) {
64 kfree(newtbl); 87 kfree(newtbl);
@@ -66,7 +89,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
66 } 89 }
67 90
68 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * 91 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
69 (1 << size_order), GFP_KERNEL); 92 (1 << size_order), GFP_ATOMIC);
70 if (!newtbl->hashwlock) { 93 if (!newtbl->hashwlock) {
71 kfree(newtbl->hash_buckets); 94 kfree(newtbl->hash_buckets);
72 kfree(newtbl); 95 kfree(newtbl);
@@ -258,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
258 */ 281 */
259struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) 282struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
260{ 283{
284 struct mesh_table *tbl = rcu_dereference(mesh_paths);
261 struct mpath_node *node; 285 struct mpath_node *node;
262 struct hlist_node *p; 286 struct hlist_node *p;
263 int i; 287 int i;
264 int j = 0; 288 int j = 0;
265 289
266 for_each_mesh_entry(mesh_paths, p, node, i) { 290 for_each_mesh_entry(tbl, p, node, i) {
267 if (sdata && node->mpath->sdata != sdata) 291 if (sdata && node->mpath->sdata != sdata)
268 continue; 292 continue;
269 if (j++ == idx) { 293 if (j++ == idx) {
@@ -293,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
293{ 317{
294 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 318 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
295 struct ieee80211_local *local = sdata->local; 319 struct ieee80211_local *local = sdata->local;
320 struct mesh_table *tbl;
296 struct mesh_path *mpath, *new_mpath; 321 struct mesh_path *mpath, *new_mpath;
297 struct mpath_node *node, *new_node; 322 struct mpath_node *node, *new_node;
298 struct hlist_head *bucket; 323 struct hlist_head *bucket;
@@ -332,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
332 spin_lock_init(&new_mpath->state_lock); 357 spin_lock_init(&new_mpath->state_lock);
333 init_timer(&new_mpath->timer); 358 init_timer(&new_mpath->timer);
334 359
335 hash_idx = mesh_table_hash(dst, sdata, mesh_paths); 360 tbl = resize_dereference_mesh_paths();
336 bucket = &mesh_paths->hash_buckets[hash_idx]; 361
362 hash_idx = mesh_table_hash(dst, sdata, tbl);
363 bucket = &tbl->hash_buckets[hash_idx];
337 364
338 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 365 spin_lock_bh(&tbl->hashwlock[hash_idx]);
339 366
340 err = -EEXIST; 367 err = -EEXIST;
341 hlist_for_each_entry(node, n, bucket, list) { 368 hlist_for_each_entry(node, n, bucket, list) {
@@ -345,13 +372,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
345 } 372 }
346 373
347 hlist_add_head_rcu(&new_node->list, bucket); 374 hlist_add_head_rcu(&new_node->list, bucket);
348 if (atomic_inc_return(&mesh_paths->entries) >= 375 if (atomic_inc_return(&tbl->entries) >=
349 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 376 tbl->mean_chain_len * (tbl->hash_mask + 1))
350 grow = 1; 377 grow = 1;
351 378
352 mesh_paths_generation++; 379 mesh_paths_generation++;
353 380
354 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 381 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
355 read_unlock_bh(&pathtbl_resize_lock); 382 read_unlock_bh(&pathtbl_resize_lock);
356 if (grow) { 383 if (grow) {
357 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 384 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -360,7 +387,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
360 return 0; 387 return 0;
361 388
362err_exists: 389err_exists:
363 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 390 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
364 read_unlock_bh(&pathtbl_resize_lock); 391 read_unlock_bh(&pathtbl_resize_lock);
365 kfree(new_node); 392 kfree(new_node);
366err_node_alloc: 393err_node_alloc:
@@ -370,58 +397,59 @@ err_path_alloc:
370 return err; 397 return err;
371} 398}
372 399
400static void mesh_table_free_rcu(struct rcu_head *rcu)
401{
402 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
403
404 mesh_table_free(tbl, false);
405}
406
373void mesh_mpath_table_grow(void) 407void mesh_mpath_table_grow(void)
374{ 408{
375 struct mesh_table *oldtbl, *newtbl; 409 struct mesh_table *oldtbl, *newtbl;
376 410
377 rcu_read_lock();
378 newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
379 if (!newtbl)
380 return;
381 write_lock_bh(&pathtbl_resize_lock); 411 write_lock_bh(&pathtbl_resize_lock);
382 oldtbl = mesh_paths; 412 oldtbl = resize_dereference_mesh_paths();
383 if (mesh_table_grow(mesh_paths, newtbl) < 0) { 413 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
384 rcu_read_unlock(); 414 if (!newtbl)
415 goto out;
416 if (mesh_table_grow(oldtbl, newtbl) < 0) {
385 __mesh_table_free(newtbl); 417 __mesh_table_free(newtbl);
386 write_unlock_bh(&pathtbl_resize_lock); 418 goto out;
387 return;
388 } 419 }
389 rcu_read_unlock();
390 rcu_assign_pointer(mesh_paths, newtbl); 420 rcu_assign_pointer(mesh_paths, newtbl);
391 write_unlock_bh(&pathtbl_resize_lock);
392 421
393 synchronize_rcu(); 422 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
394 mesh_table_free(oldtbl, false); 423
424 out:
425 write_unlock_bh(&pathtbl_resize_lock);
395} 426}
396 427
397void mesh_mpp_table_grow(void) 428void mesh_mpp_table_grow(void)
398{ 429{
399 struct mesh_table *oldtbl, *newtbl; 430 struct mesh_table *oldtbl, *newtbl;
400 431
401 rcu_read_lock();
402 newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
403 if (!newtbl)
404 return;
405 write_lock_bh(&pathtbl_resize_lock); 432 write_lock_bh(&pathtbl_resize_lock);
406 oldtbl = mpp_paths; 433 oldtbl = resize_dereference_mpp_paths();
407 if (mesh_table_grow(mpp_paths, newtbl) < 0) { 434 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
408 rcu_read_unlock(); 435 if (!newtbl)
436 goto out;
437 if (mesh_table_grow(oldtbl, newtbl) < 0) {
409 __mesh_table_free(newtbl); 438 __mesh_table_free(newtbl);
410 write_unlock_bh(&pathtbl_resize_lock); 439 goto out;
411 return;
412 } 440 }
413 rcu_read_unlock();
414 rcu_assign_pointer(mpp_paths, newtbl); 441 rcu_assign_pointer(mpp_paths, newtbl);
415 write_unlock_bh(&pathtbl_resize_lock); 442 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
416 443
417 synchronize_rcu(); 444 out:
418 mesh_table_free(oldtbl, false); 445 write_unlock_bh(&pathtbl_resize_lock);
419} 446}
420 447
421int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 448int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
422{ 449{
423 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 450 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
424 struct ieee80211_local *local = sdata->local; 451 struct ieee80211_local *local = sdata->local;
452 struct mesh_table *tbl;
425 struct mesh_path *mpath, *new_mpath; 453 struct mesh_path *mpath, *new_mpath;
426 struct mpath_node *node, *new_node; 454 struct mpath_node *node, *new_node;
427 struct hlist_head *bucket; 455 struct hlist_head *bucket;
@@ -456,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
456 new_mpath->exp_time = jiffies; 484 new_mpath->exp_time = jiffies;
457 spin_lock_init(&new_mpath->state_lock); 485 spin_lock_init(&new_mpath->state_lock);
458 486
459 hash_idx = mesh_table_hash(dst, sdata, mpp_paths); 487 tbl = resize_dereference_mpp_paths();
460 bucket = &mpp_paths->hash_buckets[hash_idx];
461 488
462 spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); 489 hash_idx = mesh_table_hash(dst, sdata, tbl);
490 bucket = &tbl->hash_buckets[hash_idx];
491
492 spin_lock_bh(&tbl->hashwlock[hash_idx]);
463 493
464 err = -EEXIST; 494 err = -EEXIST;
465 hlist_for_each_entry(node, n, bucket, list) { 495 hlist_for_each_entry(node, n, bucket, list) {
@@ -469,11 +499,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
469 } 499 }
470 500
471 hlist_add_head_rcu(&new_node->list, bucket); 501 hlist_add_head_rcu(&new_node->list, bucket);
472 if (atomic_inc_return(&mpp_paths->entries) >= 502 if (atomic_inc_return(&tbl->entries) >=
473 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) 503 tbl->mean_chain_len * (tbl->hash_mask + 1))
474 grow = 1; 504 grow = 1;
475 505
476 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 506 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
477 read_unlock_bh(&pathtbl_resize_lock); 507 read_unlock_bh(&pathtbl_resize_lock);
478 if (grow) { 508 if (grow) {
479 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 509 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -482,7 +512,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
482 return 0; 512 return 0;
483 513
484err_exists: 514err_exists:
485 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 515 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
486 read_unlock_bh(&pathtbl_resize_lock); 516 read_unlock_bh(&pathtbl_resize_lock);
487 kfree(new_node); 517 kfree(new_node);
488err_node_alloc: 518err_node_alloc:
@@ -502,6 +532,7 @@ err_path_alloc:
502 */ 532 */
503void mesh_plink_broken(struct sta_info *sta) 533void mesh_plink_broken(struct sta_info *sta)
504{ 534{
535 struct mesh_table *tbl;
505 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 536 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
506 struct mesh_path *mpath; 537 struct mesh_path *mpath;
507 struct mpath_node *node; 538 struct mpath_node *node;
@@ -510,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta)
510 int i; 541 int i;
511 542
512 rcu_read_lock(); 543 rcu_read_lock();
513 for_each_mesh_entry(mesh_paths, p, node, i) { 544 tbl = rcu_dereference(mesh_paths);
545 for_each_mesh_entry(tbl, p, node, i) {
514 mpath = node->mpath; 546 mpath = node->mpath;
515 spin_lock_bh(&mpath->state_lock); 547 spin_lock_bh(&mpath->state_lock);
516 if (mpath->next_hop == sta && 548 if (rcu_dereference(mpath->next_hop) == sta &&
517 mpath->flags & MESH_PATH_ACTIVE && 549 mpath->flags & MESH_PATH_ACTIVE &&
518 !(mpath->flags & MESH_PATH_FIXED)) { 550 !(mpath->flags & MESH_PATH_FIXED)) {
519 mpath->flags &= ~MESH_PATH_ACTIVE; 551 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -542,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta)
542 */ 574 */
543void mesh_path_flush_by_nexthop(struct sta_info *sta) 575void mesh_path_flush_by_nexthop(struct sta_info *sta)
544{ 576{
577 struct mesh_table *tbl;
545 struct mesh_path *mpath; 578 struct mesh_path *mpath;
546 struct mpath_node *node; 579 struct mpath_node *node;
547 struct hlist_node *p; 580 struct hlist_node *p;
548 int i; 581 int i;
549 582
550 for_each_mesh_entry(mesh_paths, p, node, i) { 583 rcu_read_lock();
584 tbl = rcu_dereference(mesh_paths);
585 for_each_mesh_entry(tbl, p, node, i) {
551 mpath = node->mpath; 586 mpath = node->mpath;
552 if (mpath->next_hop == sta) 587 if (rcu_dereference(mpath->next_hop) == sta)
553 mesh_path_del(mpath->dst, mpath->sdata); 588 mesh_path_del(mpath->dst, mpath->sdata);
554 } 589 }
590 rcu_read_unlock();
555} 591}
556 592
557void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 593void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
558{ 594{
595 struct mesh_table *tbl;
559 struct mesh_path *mpath; 596 struct mesh_path *mpath;
560 struct mpath_node *node; 597 struct mpath_node *node;
561 struct hlist_node *p; 598 struct hlist_node *p;
562 int i; 599 int i;
563 600
564 for_each_mesh_entry(mesh_paths, p, node, i) { 601 rcu_read_lock();
602 tbl = rcu_dereference(mesh_paths);
603 for_each_mesh_entry(tbl, p, node, i) {
565 mpath = node->mpath; 604 mpath = node->mpath;
566 if (mpath->sdata == sdata) 605 if (mpath->sdata == sdata)
567 mesh_path_del(mpath->dst, mpath->sdata); 606 mesh_path_del(mpath->dst, mpath->sdata);
568 } 607 }
608 rcu_read_unlock();
569} 609}
570 610
571static void mesh_path_node_reclaim(struct rcu_head *rp) 611static void mesh_path_node_reclaim(struct rcu_head *rp)
@@ -589,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
589 */ 629 */
590int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 630int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
591{ 631{
632 struct mesh_table *tbl;
592 struct mesh_path *mpath; 633 struct mesh_path *mpath;
593 struct mpath_node *node; 634 struct mpath_node *node;
594 struct hlist_head *bucket; 635 struct hlist_head *bucket;
@@ -597,19 +638,20 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
597 int err = 0; 638 int err = 0;
598 639
599 read_lock_bh(&pathtbl_resize_lock); 640 read_lock_bh(&pathtbl_resize_lock);
600 hash_idx = mesh_table_hash(addr, sdata, mesh_paths); 641 tbl = resize_dereference_mesh_paths();
601 bucket = &mesh_paths->hash_buckets[hash_idx]; 642 hash_idx = mesh_table_hash(addr, sdata, tbl);
643 bucket = &tbl->hash_buckets[hash_idx];
602 644
603 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 645 spin_lock_bh(&tbl->hashwlock[hash_idx]);
604 hlist_for_each_entry(node, n, bucket, list) { 646 hlist_for_each_entry(node, n, bucket, list) {
605 mpath = node->mpath; 647 mpath = node->mpath;
606 if (mpath->sdata == sdata && 648 if (mpath->sdata == sdata &&
607 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
608 spin_lock_bh(&mpath->state_lock); 650 spin_lock_bh(&mpath->state_lock);
609 mpath->flags |= MESH_PATH_RESOLVING; 651 mpath->flags |= MESH_PATH_RESOLVING;
610 hlist_del_rcu(&node->list); 652 hlist_del_rcu(&node->list);
611 call_rcu(&node->rcu, mesh_path_node_reclaim); 653 call_rcu(&node->rcu, mesh_path_node_reclaim);
612 atomic_dec(&mesh_paths->entries); 654 atomic_dec(&tbl->entries);
613 spin_unlock_bh(&mpath->state_lock); 655 spin_unlock_bh(&mpath->state_lock);
614 goto enddel; 656 goto enddel;
615 } 657 }
@@ -618,7 +660,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
618 err = -ENXIO; 660 err = -ENXIO;
619enddel: 661enddel:
620 mesh_paths_generation++; 662 mesh_paths_generation++;
621 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 663 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
622 read_unlock_bh(&pathtbl_resize_lock); 664 read_unlock_bh(&pathtbl_resize_lock);
623 return err; 665 return err;
624} 666}
@@ -719,8 +761,10 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
719 struct mpath_node *node = hlist_entry(p, struct mpath_node, list); 761 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
720 mpath = node->mpath; 762 mpath = node->mpath;
721 hlist_del_rcu(p); 763 hlist_del_rcu(p);
722 if (free_leafs) 764 if (free_leafs) {
765 del_timer_sync(&mpath->timer);
723 kfree(mpath); 766 kfree(mpath);
767 }
724 kfree(node); 768 kfree(node);
725} 769}
726 770
@@ -745,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
745 789
746int mesh_pathtbl_init(void) 790int mesh_pathtbl_init(void)
747{ 791{
748 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 792 struct mesh_table *tbl_path, *tbl_mpp;
749 if (!mesh_paths) 793
794 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795 if (!tbl_path)
750 return -ENOMEM; 796 return -ENOMEM;
751 mesh_paths->free_node = &mesh_path_node_free; 797 tbl_path->free_node = &mesh_path_node_free;
752 mesh_paths->copy_node = &mesh_path_node_copy; 798 tbl_path->copy_node = &mesh_path_node_copy;
753 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; 799 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
754 800
755 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 801 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
756 if (!mpp_paths) { 802 if (!tbl_mpp) {
757 mesh_table_free(mesh_paths, true); 803 mesh_table_free(tbl_path, true);
758 return -ENOMEM; 804 return -ENOMEM;
759 } 805 }
760 mpp_paths->free_node = &mesh_path_node_free; 806 tbl_mpp->free_node = &mesh_path_node_free;
761 mpp_paths->copy_node = &mesh_path_node_copy; 807 tbl_mpp->copy_node = &mesh_path_node_copy;
762 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; 808 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
809
810 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths, tbl_path);
812 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
763 813
764 return 0; 814 return 0;
765} 815}
766 816
767void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 817void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
768{ 818{
819 struct mesh_table *tbl;
769 struct mesh_path *mpath; 820 struct mesh_path *mpath;
770 struct mpath_node *node; 821 struct mpath_node *node;
771 struct hlist_node *p; 822 struct hlist_node *p;
772 int i; 823 int i;
773 824
774 read_lock_bh(&pathtbl_resize_lock); 825 rcu_read_lock();
775 for_each_mesh_entry(mesh_paths, p, node, i) { 826 tbl = rcu_dereference(mesh_paths);
827 for_each_mesh_entry(tbl, p, node, i) {
776 if (node->mpath->sdata != sdata) 828 if (node->mpath->sdata != sdata)
777 continue; 829 continue;
778 mpath = node->mpath; 830 mpath = node->mpath;
779 spin_lock_bh(&mpath->state_lock); 831 spin_lock_bh(&mpath->state_lock);
780 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 832 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
781 (!(mpath->flags & MESH_PATH_FIXED)) && 833 (!(mpath->flags & MESH_PATH_FIXED)) &&
782 time_after(jiffies, 834 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
783 mpath->exp_time + MESH_PATH_EXPIRE)) {
784 spin_unlock_bh(&mpath->state_lock); 835 spin_unlock_bh(&mpath->state_lock);
785 mesh_path_del(mpath->dst, mpath->sdata); 836 mesh_path_del(mpath->dst, mpath->sdata);
786 } else 837 } else
787 spin_unlock_bh(&mpath->state_lock); 838 spin_unlock_bh(&mpath->state_lock);
788 } 839 }
789 read_unlock_bh(&pathtbl_resize_lock); 840 rcu_read_unlock();
790} 841}
791 842
792void mesh_pathtbl_unregister(void) 843void mesh_pathtbl_unregister(void)
793{ 844{
794 mesh_table_free(mesh_paths, true); 845 /* no need for locking during exit path */
795 mesh_table_free(mpp_paths, true); 846 mesh_table_free(rcu_dereference_raw(mesh_paths), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true);
796} 848}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d20046b5d8f4..27af6723cb5e 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -719,6 +719,11 @@ void ieee80211_scan_work(struct work_struct *work)
719 * without scheduling a new work 719 * without scheduling a new work
720 */ 720 */
721 do { 721 do {
722 if (!ieee80211_sdata_running(sdata)) {
723 aborted = true;
724 goto out_complete;
725 }
726
722 switch (local->next_scan_state) { 727 switch (local->next_scan_state) {
723 case SCAN_DECISION: 728 case SCAN_DECISION:
724 /* if no more bands/channels left, complete scan */ 729 /* if no more bands/channels left, complete scan */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5fe4f3b04ed3..6ef64adf7362 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1985,7 +1985,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1985 struct sock *s = v; 1985 struct sock *s = v;
1986 struct netlink_sock *nlk = nlk_sk(s); 1986 struct netlink_sock *nlk = nlk_sk(s);
1987 1987
1988 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n", 1988 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
1989 s, 1989 s,
1990 s->sk_protocol, 1990 s->sk_protocol,
1991 nlk->pid, 1991 nlk->pid,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 549527bca87a..925f715686a5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2706,7 +2706,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2706 const struct packet_sock *po = pkt_sk(s); 2706 const struct packet_sock *po = pkt_sk(s);
2707 2707
2708 seq_printf(seq, 2708 seq_printf(seq,
2709 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 2709 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2710 s, 2710 s,
2711 atomic_read(&s->sk_refcnt), 2711 atomic_read(&s->sk_refcnt),
2712 s->sk_type, 2712 s->sk_type,
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 8c5bfcef92cb..ab07711cf2f4 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -607,7 +607,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
607 struct pn_sock *pn = pn_sk(sk); 607 struct pn_sock *pn = pn_sk(sk);
608 608
609 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " 609 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
610 "%d %p %d%n", 610 "%d %pK %d%n",
611 sk->sk_protocol, pn->sobject, pn->dobject, 611 sk->sk_protocol, pn->sobject, pn->dobject,
612 pn->resource, sk->sk_state, 612 pn->resource, sk->sk_state,
613 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 613 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
diff --git a/net/rds/ib.c b/net/rds/ib.c
index cce19f95c624..3b83086bcc30 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr)
325 /* Create a CMA ID and try to bind it. This catches both 325 /* Create a CMA ID and try to bind it. This catches both
326 * IB and iWARP capable NICs. 326 * IB and iWARP capable NICs.
327 */ 327 */
328 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 328 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
329 if (IS_ERR(cm_id)) 329 if (IS_ERR(cm_id))
330 return PTR_ERR(cm_id); 330 return PTR_ERR(cm_id);
331 331
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ee369d201a65..fd453dd5124b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn)
587 /* XXX I wonder what affect the port space has */ 587 /* XXX I wonder what affect the port space has */
588 /* delegate cm event handler to rdma_transport */ 588 /* delegate cm event handler to rdma_transport */
589 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, 589 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
590 RDMA_PS_TCP); 590 RDMA_PS_TCP, IB_QPT_RC);
591 if (IS_ERR(ic->i_cm_id)) { 591 if (IS_ERR(ic->i_cm_id)) {
592 ret = PTR_ERR(ic->i_cm_id); 592 ret = PTR_ERR(ic->i_cm_id);
593 ic->i_cm_id = NULL; 593 ic->i_cm_id = NULL;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 5a9676fe594f..f7474844f096 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr)
226 /* Create a CMA ID and try to bind it. This catches both 226 /* Create a CMA ID and try to bind it. This catches both
227 * IB and iWARP capable NICs. 227 * IB and iWARP capable NICs.
228 */ 228 */
229 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 229 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
230 if (IS_ERR(cm_id)) 230 if (IS_ERR(cm_id))
231 return PTR_ERR(cm_id); 231 return PTR_ERR(cm_id);
232 232
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 3a60a15d1b4a..c12db66f24c7 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn)
522 /* XXX I wonder what affect the port space has */ 522 /* XXX I wonder what affect the port space has */
523 /* delegate cm event handler to rdma_transport */ 523 /* delegate cm event handler to rdma_transport */
524 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, 524 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
525 RDMA_PS_TCP); 525 RDMA_PS_TCP, IB_QPT_RC);
526 if (IS_ERR(ic->i_cm_id)) { 526 if (IS_ERR(ic->i_cm_id)) {
527 ret = PTR_ERR(ic->i_cm_id); 527 ret = PTR_ERR(ic->i_cm_id);
528 ic->i_cm_id = NULL; 528 ic->i_cm_id = NULL;
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 4195a0539829..f8760e1b6688 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void)
158 struct rdma_cm_id *cm_id; 158 struct rdma_cm_id *cm_id;
159 int ret; 159 int ret;
160 160
161 cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); 161 cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP,
162 IB_QPT_RC);
162 if (IS_ERR(cm_id)) { 163 if (IS_ERR(cm_id)) {
163 ret = PTR_ERR(cm_id); 164 ret = PTR_ERR(cm_id);
164 printk(KERN_ERR "RDS/RDMA: failed to setup listener, " 165 printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 48464ca13b24..78efe895b663 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -33,3 +33,12 @@ config RFKILL_REGULATOR
33 33
34 To compile this driver as a module, choose M here: the module will 34 To compile this driver as a module, choose M here: the module will
35 be called rfkill-regulator. 35 be called rfkill-regulator.
36
37config RFKILL_GPIO
38 tristate "GPIO RFKILL driver"
39 depends on RFKILL && GPIOLIB && HAVE_CLK
40 default n
41 help
42 If you say yes here you get support of a generic gpio RFKILL
43 driver. The platform should fill in the appropriate fields in the
44 rfkill_gpio_platform_data structure and pass that to the driver.
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index d9a5a58ffd8c..311768783f4a 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -6,3 +6,4 @@ rfkill-y += core.o
6rfkill-$(CONFIG_RFKILL_INPUT) += input.o 6rfkill-$(CONFIG_RFKILL_INPUT) += input.o
7obj-$(CONFIG_RFKILL) += rfkill.o 7obj-$(CONFIG_RFKILL) += rfkill.o
8obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o 8obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o
9obj-$(CONFIG_RFKILL_GPIO) += rfkill-gpio.o
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
new file mode 100644
index 000000000000..256c5ddd2d72
--- /dev/null
+++ b/net/rfkill/rfkill-gpio.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (c) 2011, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/gpio.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/rfkill.h>
24#include <linux/platform_device.h>
25#include <linux/clk.h>
26#include <linux/slab.h>
27
28#include <linux/rfkill-gpio.h>
29
30enum rfkill_gpio_clk_state {
31 UNSPECIFIED = 0,
32 PWR_ENABLED,
33 PWR_DISABLED
34};
35
36#define PWR_CLK_SET(_RF, _EN) \
37 ((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED))
38#define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED)
39#define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED)
40
41struct rfkill_gpio_data {
42 struct rfkill_gpio_platform_data *pdata;
43 struct rfkill *rfkill_dev;
44 char *reset_name;
45 char *shutdown_name;
46 enum rfkill_gpio_clk_state pwr_clk_enabled;
47 struct clk *pwr_clk;
48};
49
50static int rfkill_gpio_set_power(void *data, bool blocked)
51{
52 struct rfkill_gpio_data *rfkill = data;
53
54 if (blocked) {
55 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
56 gpio_direction_output(rfkill->pdata->shutdown_gpio, 0);
57 if (gpio_is_valid(rfkill->pdata->reset_gpio))
58 gpio_direction_output(rfkill->pdata->reset_gpio, 0);
59 if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
60 clk_disable(rfkill->pwr_clk);
61 } else {
62 if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill))
63 clk_enable(rfkill->pwr_clk);
64 if (gpio_is_valid(rfkill->pdata->reset_gpio))
65 gpio_direction_output(rfkill->pdata->reset_gpio, 1);
66 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
67 gpio_direction_output(rfkill->pdata->shutdown_gpio, 1);
68 }
69
70 if (rfkill->pwr_clk)
71 PWR_CLK_SET(rfkill, blocked);
72
73 return 0;
74}
75
76static const struct rfkill_ops rfkill_gpio_ops = {
77 .set_block = rfkill_gpio_set_power,
78};
79
80static int rfkill_gpio_probe(struct platform_device *pdev)
81{
82 struct rfkill_gpio_data *rfkill;
83 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
84 int ret = 0;
85 int len = 0;
86
87 if (!pdata) {
88 pr_warn("%s: No platform data specified\n", __func__);
89 return -EINVAL;
90 }
91
92 /* make sure at-least one of the GPIO is defined and that
93 * a name is specified for this instance */
94 if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) &&
95 !gpio_is_valid(pdata->shutdown_gpio))) {
96 pr_warn("%s: invalid platform data\n", __func__);
97 return -EINVAL;
98 }
99
100 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
101 if (!rfkill)
102 return -ENOMEM;
103
104 rfkill->pdata = pdata;
105
106 len = strlen(pdata->name);
107 rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL);
108 if (!rfkill->reset_name) {
109 ret = -ENOMEM;
110 goto fail_alloc;
111 }
112
113 rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL);
114 if (!rfkill->shutdown_name) {
115 ret = -ENOMEM;
116 goto fail_reset_name;
117 }
118
119 snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name);
120 snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name);
121
122 if (pdata->power_clk_name) {
123 rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name);
124 if (IS_ERR(rfkill->pwr_clk)) {
125 pr_warn("%s: can't find pwr_clk.\n", __func__);
126 goto fail_shutdown_name;
127 }
128 }
129
130 if (gpio_is_valid(pdata->reset_gpio)) {
131 ret = gpio_request(pdata->reset_gpio, rfkill->reset_name);
132 if (ret) {
133 pr_warn("%s: failed to get reset gpio.\n", __func__);
134 goto fail_clock;
135 }
136 }
137
138 if (gpio_is_valid(pdata->shutdown_gpio)) {
139 ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name);
140 if (ret) {
141 pr_warn("%s: failed to get shutdown gpio.\n", __func__);
142 goto fail_reset;
143 }
144 }
145
146 rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type,
147 &rfkill_gpio_ops, rfkill);
148 if (!rfkill->rfkill_dev)
149 goto fail_shutdown;
150
151 ret = rfkill_register(rfkill->rfkill_dev);
152 if (ret < 0)
153 goto fail_rfkill;
154
155 platform_set_drvdata(pdev, rfkill);
156
157 dev_info(&pdev->dev, "%s device registered.\n", pdata->name);
158
159 return 0;
160
161fail_rfkill:
162 rfkill_destroy(rfkill->rfkill_dev);
163fail_shutdown:
164 if (gpio_is_valid(pdata->shutdown_gpio))
165 gpio_free(pdata->shutdown_gpio);
166fail_reset:
167 if (gpio_is_valid(pdata->reset_gpio))
168 gpio_free(pdata->reset_gpio);
169fail_clock:
170 if (rfkill->pwr_clk)
171 clk_put(rfkill->pwr_clk);
172fail_shutdown_name:
173 kfree(rfkill->shutdown_name);
174fail_reset_name:
175 kfree(rfkill->reset_name);
176fail_alloc:
177 kfree(rfkill);
178
179 return ret;
180}
181
182static int rfkill_gpio_remove(struct platform_device *pdev)
183{
184 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
185
186 rfkill_unregister(rfkill->rfkill_dev);
187 rfkill_destroy(rfkill->rfkill_dev);
188 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
189 gpio_free(rfkill->pdata->shutdown_gpio);
190 if (gpio_is_valid(rfkill->pdata->reset_gpio))
191 gpio_free(rfkill->pdata->reset_gpio);
192 if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
193 clk_disable(rfkill->pwr_clk);
194 if (rfkill->pwr_clk)
195 clk_put(rfkill->pwr_clk);
196 kfree(rfkill->shutdown_name);
197 kfree(rfkill->reset_name);
198 kfree(rfkill);
199
200 return 0;
201}
202
203static struct platform_driver rfkill_gpio_driver = {
204 .probe = rfkill_gpio_probe,
205 .remove = __devexit_p(rfkill_gpio_remove),
206 .driver = {
207 .name = "rfkill_gpio",
208 .owner = THIS_MODULE,
209 },
210};
211
212static int __init rfkill_gpio_init(void)
213{
214 return platform_driver_register(&rfkill_gpio_driver);
215}
216
217static void __exit rfkill_gpio_exit(void)
218{
219 platform_driver_unregister(&rfkill_gpio_driver);
220}
221
222module_init(rfkill_gpio_init);
223module_exit(rfkill_gpio_exit);
224
225MODULE_DESCRIPTION("gpio rfkill");
226MODULE_AUTHOR("NVIDIA");
227MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7ef87f9eb675..b6ea6afa55b0 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
361{ 361{
362 struct sfq_sched_data *q = qdisc_priv(sch); 362 struct sfq_sched_data *q = qdisc_priv(sch);
363 unsigned int hash; 363 unsigned int hash;
364 sfq_index x; 364 sfq_index x, qlen;
365 struct sfq_slot *slot; 365 struct sfq_slot *slot;
366 int uninitialized_var(ret); 366 int uninitialized_var(ret);
367 367
@@ -405,20 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
405 if (++sch->q.qlen <= q->limit) 405 if (++sch->q.qlen <= q->limit)
406 return NET_XMIT_SUCCESS; 406 return NET_XMIT_SUCCESS;
407 407
408 qlen = slot->qlen;
408 sfq_drop(sch); 409 sfq_drop(sch);
409 return NET_XMIT_CN; 410 /* Return Congestion Notification only if we dropped a packet
410} 411 * from this flow.
411 412 */
412static struct sk_buff * 413 return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
413sfq_peek(struct Qdisc *sch)
414{
415 struct sfq_sched_data *q = qdisc_priv(sch);
416
417 /* No active slots */
418 if (q->tail == NULL)
419 return NULL;
420
421 return q->slots[q->tail->next].skblist_next;
422} 414}
423 415
424static struct sk_buff * 416static struct sk_buff *
@@ -702,7 +694,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
702 .priv_size = sizeof(struct sfq_sched_data), 694 .priv_size = sizeof(struct sfq_sched_data),
703 .enqueue = sfq_enqueue, 695 .enqueue = sfq_enqueue,
704 .dequeue = sfq_dequeue, 696 .dequeue = sfq_dequeue,
705 .peek = sfq_peek, 697 .peek = qdisc_peek_dequeued,
706 .drop = sfq_drop, 698 .drop = sfq_drop,
707 .init = sfq_init, 699 .init = sfq_init,
708 .reset = sfq_reset, 700 .reset = sfq_reset,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 1a21c571aa03..525f97c467e9 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -64,6 +64,7 @@
64/* Forward declarations for internal functions. */ 64/* Forward declarations for internal functions. */
65static void sctp_assoc_bh_rcv(struct work_struct *work); 65static void sctp_assoc_bh_rcv(struct work_struct *work);
66static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 66static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
67static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
67 68
68/* Keep track of the new idr low so that we don't re-use association id 69/* Keep track of the new idr low so that we don't re-use association id
69 * numbers too fast. It is protected by they idr spin lock is in the 70 * numbers too fast. It is protected by they idr spin lock is in the
@@ -446,6 +447,9 @@ void sctp_association_free(struct sctp_association *asoc)
446 /* Free any cached ASCONF_ACK chunk. */ 447 /* Free any cached ASCONF_ACK chunk. */
447 sctp_assoc_free_asconf_acks(asoc); 448 sctp_assoc_free_asconf_acks(asoc);
448 449
450 /* Free the ASCONF queue. */
451 sctp_assoc_free_asconf_queue(asoc);
452
449 /* Free any cached ASCONF chunk. */ 453 /* Free any cached ASCONF chunk. */
450 if (asoc->addip_last_asconf) 454 if (asoc->addip_last_asconf)
451 sctp_chunk_free(asoc->addip_last_asconf); 455 sctp_chunk_free(asoc->addip_last_asconf);
@@ -1578,6 +1582,18 @@ retry:
1578 return error; 1582 return error;
1579} 1583}
1580 1584
1585/* Free the ASCONF queue */
1586static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1587{
1588 struct sctp_chunk *asconf;
1589 struct sctp_chunk *tmp;
1590
1591 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1592 list_del_init(&asconf->list);
1593 sctp_chunk_free(asconf);
1594 }
1595}
1596
1581/* Free asconf_ack cache */ 1597/* Free asconf_ack cache */
1582static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) 1598static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1583{ 1599{
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 61aacfbbaa92..05a6ce214714 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -212,7 +212,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
212 sctp_for_each_hentry(epb, node, &head->chain) { 212 sctp_for_each_hentry(epb, node, &head->chain) {
213 ep = sctp_ep(epb); 213 ep = sctp_ep(epb);
214 sk = epb->sk; 214 sk = epb->sk;
215 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 215 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
216 sctp_sk(sk)->type, sk->sk_state, hash, 216 sctp_sk(sk)->type, sk->sk_state, hash,
217 epb->bind_addr.port, 217 epb->bind_addr.port,
218 sock_i_uid(sk), sock_i_ino(sk)); 218 sock_i_uid(sk), sock_i_ino(sk));
@@ -316,7 +316,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
316 assoc = sctp_assoc(epb); 316 assoc = sctp_assoc(epb);
317 sk = epb->sk; 317 sk = epb->sk;
318 seq_printf(seq, 318 seq_printf(seq,
319 "%8p %8p %-3d %-3d %-2d %-4d " 319 "%8pK %8pK %-3d %-3d %-2d %-4d "
320 "%4d %8d %8d %7d %5lu %-5d %5d ", 320 "%4d %8d %8d %7d %5lu %-5d %5d ",
321 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 321 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
322 assoc->state, hash, 322 assoc->state, hash,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 67e31276682a..cd6e4aa19dbf 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -326,10 +326,12 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
326 * Run memory cache shrinker. 326 * Run memory cache shrinker.
327 */ 327 */
328static int 328static int
329rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 329rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc)
330{ 330{
331 LIST_HEAD(free); 331 LIST_HEAD(free);
332 int res; 332 int res;
333 int nr_to_scan = sc->nr_to_scan;
334 gfp_t gfp_mask = sc->gfp_mask;
333 335
334 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 336 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
335 return (nr_to_scan == 0) ? 0 : -1; 337 return (nr_to_scan == 0) ? 0 : -1;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6c014dd3a20b..c3c232a88d94 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
695 return ERR_PTR(-ENOMEM); 695 return ERR_PTR(-ENOMEM);
696 xprt = &cma_xprt->sc_xprt; 696 xprt = &cma_xprt->sc_xprt;
697 697
698 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); 698 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
699 IB_QPT_RC);
699 if (IS_ERR(listen_id)) { 700 if (IS_ERR(listen_id)) {
700 ret = PTR_ERR(listen_id); 701 ret = PTR_ERR(listen_id);
701 dprintk("svcrdma: rdma_create_id failed = %d\n", ret); 702 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index d4297dc43dc4..80f8da344df5 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
387 387
388 init_completion(&ia->ri_done); 388 init_completion(&ia->ri_done);
389 389
390 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP); 390 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
391 if (IS_ERR(id)) { 391 if (IS_ERR(id)) {
392 rc = PTR_ERR(id); 392 rc = PTR_ERR(id);
393 dprintk("RPC: %s: rdma_create_id() failed %i\n", 393 dprintk("RPC: %s: rdma_create_id() failed %i\n",
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b1d75beb7e20..0722a25a3a33 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2254,7 +2254,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2254 struct unix_sock *u = unix_sk(s); 2254 struct unix_sock *u = unix_sk(s);
2255 unix_state_lock(s); 2255 unix_state_lock(s);
2256 2256
2257 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", 2257 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2258 s, 2258 s,
2259 atomic_read(&s->sk_refcnt), 2259 atomic_read(&s->sk_refcnt),
2260 0, 2260 0,
diff --git a/net/wireless/core.h b/net/wireless/core.h
index bf0fb40e3c8b..3dce1f167eba 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -245,6 +245,7 @@ struct cfg80211_event {
245 u16 status; 245 u16 status;
246 } cr; 246 } cr;
247 struct { 247 struct {
248 struct ieee80211_channel *channel;
248 u8 bssid[ETH_ALEN]; 249 u8 bssid[ETH_ALEN];
249 const u8 *req_ie; 250 const u8 *req_ie;
250 const u8 *resp_ie; 251 const u8 *resp_ie;
@@ -392,7 +393,9 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
392int cfg80211_disconnect(struct cfg80211_registered_device *rdev, 393int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
393 struct net_device *dev, u16 reason, 394 struct net_device *dev, u16 reason,
394 bool wextev); 395 bool wextev);
395void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, 396void __cfg80211_roamed(struct wireless_dev *wdev,
397 struct ieee80211_channel *channel,
398 const u8 *bssid,
396 const u8 *req_ie, size_t req_ie_len, 399 const u8 *req_ie, size_t req_ie_len,
397 const u8 *resp_ie, size_t resp_ie_len); 400 const u8 *resp_ie, size_t resp_ie_len);
398int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, 401int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2222ce08ee91..ec83f413a7ed 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3294,8 +3294,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3294 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3294 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3295 struct net_device *dev = info->user_ptr[1]; 3295 struct net_device *dev = info->user_ptr[1];
3296 struct cfg80211_scan_request *request; 3296 struct cfg80211_scan_request *request;
3297 struct cfg80211_ssid *ssid;
3298 struct ieee80211_channel *channel;
3299 struct nlattr *attr; 3297 struct nlattr *attr;
3300 struct wiphy *wiphy; 3298 struct wiphy *wiphy;
3301 int err, tmp, n_ssids = 0, n_channels, i; 3299 int err, tmp, n_ssids = 0, n_channels, i;
@@ -3342,8 +3340,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3342 return -EINVAL; 3340 return -EINVAL;
3343 3341
3344 request = kzalloc(sizeof(*request) 3342 request = kzalloc(sizeof(*request)
3345 + sizeof(*ssid) * n_ssids 3343 + sizeof(*request->ssids) * n_ssids
3346 + sizeof(channel) * n_channels 3344 + sizeof(*request->channels) * n_channels
3347 + ie_len, GFP_KERNEL); 3345 + ie_len, GFP_KERNEL);
3348 if (!request) 3346 if (!request)
3349 return -ENOMEM; 3347 return -ENOMEM;
@@ -3449,8 +3447,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3449 struct cfg80211_sched_scan_request *request; 3447 struct cfg80211_sched_scan_request *request;
3450 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3448 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3451 struct net_device *dev = info->user_ptr[1]; 3449 struct net_device *dev = info->user_ptr[1];
3452 struct cfg80211_ssid *ssid;
3453 struct ieee80211_channel *channel;
3454 struct nlattr *attr; 3450 struct nlattr *attr;
3455 struct wiphy *wiphy; 3451 struct wiphy *wiphy;
3456 int err, tmp, n_ssids = 0, n_channels, i; 3452 int err, tmp, n_ssids = 0, n_channels, i;
@@ -3507,8 +3503,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3507 return -EINVAL; 3503 return -EINVAL;
3508 3504
3509 request = kzalloc(sizeof(*request) 3505 request = kzalloc(sizeof(*request)
3510 + sizeof(*ssid) * n_ssids 3506 + sizeof(*request->ssids) * n_ssids
3511 + sizeof(channel) * n_channels 3507 + sizeof(*request->channels) * n_channels
3512 + ie_len, GFP_KERNEL); 3508 + ie_len, GFP_KERNEL);
3513 if (!request) 3509 if (!request)
3514 return -ENOMEM; 3510 return -ENOMEM;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index e17b0bee6bdc..b7b6ff8be553 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -250,7 +250,8 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
250 if (wdev->conn->params.privacy) 250 if (wdev->conn->params.privacy)
251 capa |= WLAN_CAPABILITY_PRIVACY; 251 capa |= WLAN_CAPABILITY_PRIVACY;
252 252
253 bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid, 253 bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
254 wdev->conn->params.bssid,
254 wdev->conn->params.ssid, 255 wdev->conn->params.ssid,
255 wdev->conn->params.ssid_len, 256 wdev->conn->params.ssid_len,
256 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, 257 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
@@ -470,7 +471,10 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
470 } 471 }
471 472
472 if (!bss) 473 if (!bss)
473 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 474 bss = cfg80211_get_bss(wdev->wiphy,
475 wdev->conn ? wdev->conn->params.channel :
476 NULL,
477 bssid,
474 wdev->ssid, wdev->ssid_len, 478 wdev->ssid, wdev->ssid_len,
475 WLAN_CAPABILITY_ESS, 479 WLAN_CAPABILITY_ESS,
476 WLAN_CAPABILITY_ESS); 480 WLAN_CAPABILITY_ESS);
@@ -538,7 +542,9 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
538} 542}
539EXPORT_SYMBOL(cfg80211_connect_result); 543EXPORT_SYMBOL(cfg80211_connect_result);
540 544
541void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, 545void __cfg80211_roamed(struct wireless_dev *wdev,
546 struct ieee80211_channel *channel,
547 const u8 *bssid,
542 const u8 *req_ie, size_t req_ie_len, 548 const u8 *req_ie, size_t req_ie_len,
543 const u8 *resp_ie, size_t resp_ie_len) 549 const u8 *resp_ie, size_t resp_ie_len)
544{ 550{
@@ -565,7 +571,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
565 cfg80211_put_bss(&wdev->current_bss->pub); 571 cfg80211_put_bss(&wdev->current_bss->pub);
566 wdev->current_bss = NULL; 572 wdev->current_bss = NULL;
567 573
568 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 574 bss = cfg80211_get_bss(wdev->wiphy, channel, bssid,
569 wdev->ssid, wdev->ssid_len, 575 wdev->ssid, wdev->ssid_len,
570 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 576 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
571 577
@@ -603,7 +609,9 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
603#endif 609#endif
604} 610}
605 611
606void cfg80211_roamed(struct net_device *dev, const u8 *bssid, 612void cfg80211_roamed(struct net_device *dev,
613 struct ieee80211_channel *channel,
614 const u8 *bssid,
607 const u8 *req_ie, size_t req_ie_len, 615 const u8 *req_ie, size_t req_ie_len,
608 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) 616 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
609{ 617{
@@ -619,6 +627,7 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
619 return; 627 return;
620 628
621 ev->type = EVENT_ROAMED; 629 ev->type = EVENT_ROAMED;
630 ev->rm.channel = channel;
622 memcpy(ev->rm.bssid, bssid, ETH_ALEN); 631 memcpy(ev->rm.bssid, bssid, ETH_ALEN);
623 ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); 632 ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
624 ev->rm.req_ie_len = req_ie_len; 633 ev->rm.req_ie_len = req_ie_len;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index f0536d44d43c..4d7b83fbc32f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -746,7 +746,7 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
746 NULL); 746 NULL);
747 break; 747 break;
748 case EVENT_ROAMED: 748 case EVENT_ROAMED:
749 __cfg80211_roamed(wdev, ev->rm.bssid, 749 __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid,
750 ev->rm.req_ie, ev->rm.req_ie_len, 750 ev->rm.req_ie, ev->rm.req_ie_len,
751 ev->rm.resp_ie, ev->rm.resp_ie_len); 751 ev->rm.resp_ie, ev->rm.resp_ie_len);
752 break; 752 break;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index d8670810db65..8657f99bfb2b 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -210,10 +210,10 @@ our $typeTypedefs = qr{(?x:
210 210
211our $logFunctions = qr{(?x: 211our $logFunctions = qr{(?x:
212 printk| 212 printk|
213 pr_(debug|dbg|vdbg|devel|info|warning|err|notice|alert|crit|emerg|cont)| 213 [a-z]+_(emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)|
214 (dev|netdev|netif)_(printk|dbg|vdbg|info|warn|err|notice|alert|crit|emerg|WARN)|
215 WARN| 214 WARN|
216 panic 215 panic|
216 MODULE_[A-Z_]+
217)}; 217)};
218 218
219our @typeList = ( 219our @typeList = (
@@ -1462,7 +1462,7 @@ sub process {
1462#80 column limit 1462#80 column limit
1463 if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ && 1463 if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
1464 $rawline !~ /^.\s*\*\s*\@$Ident\s/ && 1464 $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
1465 !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:,|\)\s*;)\s*$/ || 1465 !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:|,|\)\s*;)\s*$/ ||
1466 $line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) && 1466 $line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
1467 $length > 80) 1467 $length > 80)
1468 { 1468 {
@@ -2748,6 +2748,11 @@ sub process {
2748 WARN("sizeof(& should be avoided\n" . $herecurr); 2748 WARN("sizeof(& should be avoided\n" . $herecurr);
2749 } 2749 }
2750 2750
2751# check for line continuations in quoted strings with odd counts of "
2752 if ($rawline =~ /\\$/ && $rawline =~ tr/"/"/ % 2) {
2753 WARN("Avoid line continuations in quoted strings\n" . $herecurr);
2754 }
2755
2751# check for new externs in .c files. 2756# check for new externs in .c files.
2752 if ($realfile =~ /\.c$/ && defined $stat && 2757 if ($realfile =~ /\.c$/ && defined $stat &&
2753 $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s) 2758 $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl
index b444e89a0095..5e490a8ceca5 100755
--- a/scripts/checkversion.pl
+++ b/scripts/checkversion.pl
@@ -12,6 +12,7 @@ $| = 1;
12my $debugging; 12my $debugging;
13 13
14foreach my $file (@ARGV) { 14foreach my $file (@ARGV) {
15 next if $file =~ "include/linux/version\.h";
15 # Open this file. 16 # Open this file.
16 open( my $f, '<', $file ) 17 open( my $f, '<', $file )
17 or die "Can't open $file: $!\n"; 18 or die "Can't open $file: $!\n";
diff --git a/scripts/export_report.pl b/scripts/export_report.pl
index 04dce7c15f83..8f79b701de87 100644
--- a/scripts/export_report.pl
+++ b/scripts/export_report.pl
@@ -25,11 +25,12 @@ sub alphabetically {
25sub print_depends_on { 25sub print_depends_on {
26 my ($href) = @_; 26 my ($href) = @_;
27 print "\n"; 27 print "\n";
28 while (my ($mod, $list) = each %$href) { 28 for my $mod (sort keys %$href) {
29 my $list = $href->{$mod};
29 print "\t$mod:\n"; 30 print "\t$mod:\n";
30 foreach my $sym (sort numerically @{$list}) { 31 foreach my $sym (sort numerically @{$list}) {
31 my ($symbol, $no) = split /\s+/, $sym; 32 my ($symbol, $no) = split /\s+/, $sym;
32 printf("\t\t%-25s\t%-25d\n", $symbol, $no); 33 printf("\t\t%-25s\n", $symbol);
33 } 34 }
34 print "\n"; 35 print "\n";
35 } 36 }
@@ -49,8 +50,14 @@ sub usage {
49} 50}
50 51
51sub collectcfiles { 52sub collectcfiles {
52 my @file 53 my @file;
53 = `cat .tmp_versions/*.mod | grep '.*\.ko\$' | sed s/\.ko$/.mod.c/`; 54 while (<.tmp_versions/*.mod>) {
55 open my $fh, '<', $_ or die "cannot open $_: $!\n";
56 push (@file,
57 grep s/\.ko/.mod.c/, # change the suffix
58 grep m/.+\.ko/, # find the .ko path
59 <$fh>); # lines in opened file
60 }
54 chomp @file; 61 chomp @file;
55 return @file; 62 return @file;
56} 63}
@@ -95,6 +102,8 @@ close($module_symvers);
95# 102#
96# collect the usage count of each symbol. 103# collect the usage count of each symbol.
97# 104#
105my $modversion_warnings = 0;
106
98foreach my $thismod (@allcfiles) { 107foreach my $thismod (@allcfiles) {
99 my $module; 108 my $module;
100 109
@@ -125,7 +134,8 @@ foreach my $thismod (@allcfiles) {
125 } 134 }
126 } 135 }
127 if ($state != 2) { 136 if ($state != 2) {
128 print "WARNING:$thismod is not built with CONFIG_MODVERSION enabled\n"; 137 warn "WARNING:$thismod is not built with CONFIG_MODVERSIONS enabled\n";
138 $modversion_warnings++;
129 } 139 }
130 close($module); 140 close($module);
131} 141}
@@ -159,8 +169,12 @@ printf("SECTION 2:\n\tThis section reports export-symbol-usage of in-kernel
159modules. Each module lists the modules, and the symbols from that module that 169modules. Each module lists the modules, and the symbols from that module that
160it uses. Each listed symbol reports the number of modules using it\n"); 170it uses. Each listed symbol reports the number of modules using it\n");
161 171
172print "\nNOTE: Got $modversion_warnings CONFIG_MODVERSIONS warnings\n\n"
173 if $modversion_warnings;
174
162print "~"x80 , "\n"; 175print "~"x80 , "\n";
163while (my ($thismod, $list) = each %MODULE) { 176for my $thismod (sort keys %MODULE) {
177 my $list = $MODULE{$thismod};
164 my %depends; 178 my %depends;
165 $thismod =~ s/\.mod\.c/.ko/; 179 $thismod =~ s/\.mod\.c/.ko/;
166 print "\t\t\t$thismod\n"; 180 print "\t\t\t$thismod\n";
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 368ae306aee4..faa9a4701b6f 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -77,14 +77,15 @@ localyesconfig: $(obj)/streamline_config.pl $(obj)/conf
77# The symlink is used to repair a deficiency in arch/um 77# The symlink is used to repair a deficiency in arch/um
78update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h 78update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
79 $(Q)echo " GEN config" 79 $(Q)echo " GEN config"
80 $(Q)xgettext --default-domain=linux \ 80 $(Q)xgettext --default-domain=linux \
81 --add-comments --keyword=_ --keyword=N_ \ 81 --add-comments --keyword=_ --keyword=N_ \
82 --from-code=UTF-8 \ 82 --from-code=UTF-8 \
83 --files-from=scripts/kconfig/POTFILES.in \ 83 --files-from=$(srctree)/scripts/kconfig/POTFILES.in \
84 --directory=$(srctree) --directory=$(objtree) \
84 --output $(obj)/config.pot 85 --output $(obj)/config.pot
85 $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot 86 $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
86 $(Q)ln -fs Kconfig.i386 arch/um/Kconfig.arch 87 $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
87 $(Q)(for i in `ls arch/*/Kconfig`; \ 88 $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`; \
88 do \ 89 do \
89 echo " GEN $$i"; \ 90 echo " GEN $$i"; \
90 $(obj)/kxgettext $$i \ 91 $(obj)/kxgettext $$i \
@@ -92,7 +93,7 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
92 done ) 93 done )
93 $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \ 94 $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
94 --output $(obj)/linux.pot 95 --output $(obj)/linux.pot
95 $(Q)rm -f arch/um/Kconfig.arch 96 $(Q)rm -f $(srctree)/arch/um/Kconfig
96 $(Q)rm -f $(obj)/config.pot 97 $(Q)rm -f $(obj)/config.pot
97 98
98PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig 99PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
@@ -168,8 +169,11 @@ conf-objs := conf.o zconf.tab.o
168mconf-objs := mconf.o zconf.tab.o $(lxdialog) 169mconf-objs := mconf.o zconf.tab.o $(lxdialog)
169nconf-objs := nconf.o zconf.tab.o nconf.gui.o 170nconf-objs := nconf.o zconf.tab.o nconf.gui.o
170kxgettext-objs := kxgettext.o zconf.tab.o 171kxgettext-objs := kxgettext.o zconf.tab.o
172qconf-cxxobjs := qconf.o
173qconf-objs := kconfig_load.o zconf.tab.o
174gconf-objs := gconf.o kconfig_load.o zconf.tab.o
171 175
172hostprogs-y := conf qconf gconf kxgettext 176hostprogs-y := conf
173 177
174ifeq ($(MAKECMDGOALS),nconfig) 178ifeq ($(MAKECMDGOALS),nconfig)
175 hostprogs-y += nconf 179 hostprogs-y += nconf
@@ -179,6 +183,10 @@ ifeq ($(MAKECMDGOALS),menuconfig)
179 hostprogs-y += mconf 183 hostprogs-y += mconf
180endif 184endif
181 185
186ifeq ($(MAKECMDGOALS),update-po-config)
187 hostprogs-y += kxgettext
188endif
189
182ifeq ($(MAKECMDGOALS),xconfig) 190ifeq ($(MAKECMDGOALS),xconfig)
183 qconf-target := 1 191 qconf-target := 1
184endif 192endif
@@ -188,16 +196,15 @@ endif
188 196
189 197
190ifeq ($(qconf-target),1) 198ifeq ($(qconf-target),1)
191qconf-cxxobjs := qconf.o 199 hostprogs-y += qconf
192qconf-objs := kconfig_load.o zconf.tab.o
193endif 200endif
194 201
195ifeq ($(gconf-target),1) 202ifeq ($(gconf-target),1)
196gconf-objs := gconf.o kconfig_load.o zconf.tab.o 203 hostprogs-y += gconf
197endif 204endif
198 205
199clean-files := lkc_defs.h qconf.moc .tmp_qtcheck \ 206clean-files := lkc_defs.h qconf.moc .tmp_qtcheck .tmp_gtkcheck
200 .tmp_gtkcheck zconf.tab.c lex.zconf.c zconf.hash.c gconf.glade.h 207clean-files += zconf.tab.c lex.zconf.c zconf.hash.c gconf.glade.h
201clean-files += mconf qconf gconf nconf 208clean-files += mconf qconf gconf nconf
202clean-files += config.pot linux.pot 209clean-files += config.pot linux.pot
203 210
@@ -321,11 +328,12 @@ $(obj)/%.moc: $(src)/%.h
321 $(KC_QT_MOC) -i $< -o $@ 328 $(KC_QT_MOC) -i $< -o $@
322 329
323$(obj)/lkc_defs.h: $(src)/lkc_proto.h 330$(obj)/lkc_defs.h: $(src)/lkc_proto.h
324 sed < $< > $@ 's/P(\([^,]*\),.*/#define \1 (\*\1_p)/' 331 $(Q)sed < $< > $@ 's/P(\([^,]*\),.*/#define \1 (\*\1_p)/'
325 332
326# Extract gconf menu items for I18N support 333# Extract gconf menu items for I18N support
327$(obj)/gconf.glade.h: $(obj)/gconf.glade 334$(obj)/gconf.glade.h: $(obj)/gconf.glade
328 intltool-extract --type=gettext/glade $(obj)/gconf.glade 335 $(Q)intltool-extract --type=gettext/glade --srcdir=$(srctree) \
336 $(obj)/gconf.glade
329 337
330### 338###
331# The following requires flex/bison/gperf 339# The following requires flex/bison/gperf
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 61c35bf2d9cb..2bafd9a7c8da 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -560,8 +560,6 @@ int conf_write(const char *name)
560 const char *basename; 560 const char *basename;
561 const char *str; 561 const char *str;
562 char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1]; 562 char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1];
563 time_t now;
564 int use_timestamp = 1;
565 char *env; 563 char *env;
566 564
567 dirname[0] = 0; 565 dirname[0] = 0;
@@ -598,19 +596,11 @@ int conf_write(const char *name)
598 if (!out) 596 if (!out)
599 return 1; 597 return 1;
600 598
601 time(&now);
602 env = getenv("KCONFIG_NOTIMESTAMP");
603 if (env && *env)
604 use_timestamp = 0;
605
606 fprintf(out, _("#\n" 599 fprintf(out, _("#\n"
607 "# Automatically generated make config: don't edit\n" 600 "# Automatically generated make config: don't edit\n"
608 "# %s\n" 601 "# %s\n"
609 "%s%s"
610 "#\n"), 602 "#\n"),
611 rootmenu.prompt->text, 603 rootmenu.prompt->text);
612 use_timestamp ? "# " : "",
613 use_timestamp ? ctime(&now) : "");
614 604
615 if (!conf_get_changed()) 605 if (!conf_get_changed())
616 sym_clear_all_valid(); 606 sym_clear_all_valid();
@@ -784,7 +774,6 @@ int conf_write_autoconf(void)
784 const char *str; 774 const char *str;
785 const char *name; 775 const char *name;
786 FILE *out, *tristate, *out_h; 776 FILE *out, *tristate, *out_h;
787 time_t now;
788 int i; 777 int i;
789 778
790 sym_clear_all_valid(); 779 sym_clear_all_valid();
@@ -811,22 +800,19 @@ int conf_write_autoconf(void)
811 return 1; 800 return 1;
812 } 801 }
813 802
814 time(&now);
815 fprintf(out, "#\n" 803 fprintf(out, "#\n"
816 "# Automatically generated make config: don't edit\n" 804 "# Automatically generated make config: don't edit\n"
817 "# %s\n" 805 "# %s\n"
818 "# %s"
819 "#\n", 806 "#\n",
820 rootmenu.prompt->text, ctime(&now)); 807 rootmenu.prompt->text);
821 fprintf(tristate, "#\n" 808 fprintf(tristate, "#\n"
822 "# Automatically generated - do not edit\n" 809 "# Automatically generated - do not edit\n"
823 "\n"); 810 "\n");
824 fprintf(out_h, "/*\n" 811 fprintf(out_h, "/*\n"
825 " * Automatically generated C config: don't edit\n" 812 " * Automatically generated C config: don't edit\n"
826 " * %s\n" 813 " * %s\n"
827 " * %s"
828 " */\n", 814 " */\n",
829 rootmenu.prompt->text, ctime(&now)); 815 rootmenu.prompt->text);
830 816
831 for_all_symbols(i, sym) { 817 for_all_symbols(i, sym) {
832 sym_calc_value(sym); 818 sym_calc_value(sym);
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 3d238db49764..16bfae2d3217 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -20,12 +20,8 @@ struct file {
20 struct file *parent; 20 struct file *parent;
21 const char *name; 21 const char *name;
22 int lineno; 22 int lineno;
23 int flags;
24}; 23};
25 24
26#define FILE_BUSY 0x0001
27#define FILE_SCANNED 0x0002
28
29typedef enum tristate { 25typedef enum tristate {
30 no, mod, yes 26 no, mod, yes
31} tristate; 27} tristate;
diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c
index 455896164d72..a11d5f7b9eeb 100644
--- a/scripts/kconfig/gconf.c
+++ b/scripts/kconfig/gconf.c
@@ -253,7 +253,7 @@ void init_left_tree(void)
253 253
254 gtk_tree_view_set_model(view, model1); 254 gtk_tree_view_set_model(view, model1);
255 gtk_tree_view_set_headers_visible(view, TRUE); 255 gtk_tree_view_set_headers_visible(view, TRUE);
256 gtk_tree_view_set_rules_hint(view, FALSE); 256 gtk_tree_view_set_rules_hint(view, TRUE);
257 257
258 column = gtk_tree_view_column_new(); 258 column = gtk_tree_view_column_new();
259 gtk_tree_view_append_column(view, column); 259 gtk_tree_view_append_column(view, column);
@@ -298,7 +298,7 @@ void init_right_tree(void)
298 298
299 gtk_tree_view_set_model(view, model2); 299 gtk_tree_view_set_model(view, model2);
300 gtk_tree_view_set_headers_visible(view, TRUE); 300 gtk_tree_view_set_headers_visible(view, TRUE);
301 gtk_tree_view_set_rules_hint(view, FALSE); 301 gtk_tree_view_set_rules_hint(view, TRUE);
302 302
303 column = gtk_tree_view_column_new(); 303 column = gtk_tree_view_column_new();
304 gtk_tree_view_append_column(view, column); 304 gtk_tree_view_append_column(view, column);
@@ -756,7 +756,6 @@ void on_load_clicked(GtkButton * button, gpointer user_data)
756void on_single_clicked(GtkButton * button, gpointer user_data) 756void on_single_clicked(GtkButton * button, gpointer user_data)
757{ 757{
758 view_mode = SINGLE_VIEW; 758 view_mode = SINGLE_VIEW;
759 gtk_paned_set_position(GTK_PANED(hpaned), 0);
760 gtk_widget_hide(tree1_w); 759 gtk_widget_hide(tree1_w);
761 current = &rootmenu; 760 current = &rootmenu;
762 display_tree_part(); 761 display_tree_part();
@@ -782,7 +781,6 @@ void on_split_clicked(GtkButton * button, gpointer user_data)
782void on_full_clicked(GtkButton * button, gpointer user_data) 781void on_full_clicked(GtkButton * button, gpointer user_data)
783{ 782{
784 view_mode = FULL_VIEW; 783 view_mode = FULL_VIEW;
785 gtk_paned_set_position(GTK_PANED(hpaned), 0);
786 gtk_widget_hide(tree1_w); 784 gtk_widget_hide(tree1_w);
787 if (tree2) 785 if (tree2)
788 gtk_tree_store_clear(tree2); 786 gtk_tree_store_clear(tree2);
@@ -1444,6 +1442,12 @@ static void display_tree(struct menu *menu)
1444 if (((menu != &rootmenu) && !(menu->flags & MENU_ROOT)) 1442 if (((menu != &rootmenu) && !(menu->flags & MENU_ROOT))
1445 || (view_mode == FULL_VIEW) 1443 || (view_mode == FULL_VIEW)
1446 || (view_mode == SPLIT_VIEW))*/ 1444 || (view_mode == SPLIT_VIEW))*/
1445
1446 /* Change paned position if the view is not in 'split mode' */
1447 if (view_mode == SINGLE_VIEW || view_mode == FULL_VIEW) {
1448 gtk_paned_set_position(GTK_PANED(hpaned), 0);
1449 }
1450
1447 if (((view_mode == SINGLE_VIEW) && (menu->flags & MENU_ROOT)) 1451 if (((view_mode == SINGLE_VIEW) && (menu->flags & MENU_ROOT))
1448 || (view_mode == FULL_VIEW) 1452 || (view_mode == FULL_VIEW)
1449 || (view_mode == SPLIT_VIEW)) { 1453 || (view_mode == SPLIT_VIEW)) {
diff --git a/scripts/kconfig/lex.zconf.c_shipped b/scripts/kconfig/lex.zconf.c_shipped
index 6eb039718259..d9182916f724 100644
--- a/scripts/kconfig/lex.zconf.c_shipped
+++ b/scripts/kconfig/lex.zconf.c_shipped
@@ -2363,11 +2363,11 @@ void zconf_initscan(const char *name)
2363 2363
2364 current_file = file_lookup(name); 2364 current_file = file_lookup(name);
2365 current_file->lineno = 1; 2365 current_file->lineno = 1;
2366 current_file->flags = FILE_BUSY;
2367} 2366}
2368 2367
2369void zconf_nextfile(const char *name) 2368void zconf_nextfile(const char *name)
2370{ 2369{
2370 struct file *iter;
2371 struct file *file = file_lookup(name); 2371 struct file *file = file_lookup(name);
2372 struct buffer *buf = malloc(sizeof(*buf)); 2372 struct buffer *buf = malloc(sizeof(*buf));
2373 memset(buf, 0, sizeof(*buf)); 2373 memset(buf, 0, sizeof(*buf));
@@ -2383,18 +2383,25 @@ void zconf_nextfile(const char *name)
2383 buf->parent = current_buf; 2383 buf->parent = current_buf;
2384 current_buf = buf; 2384 current_buf = buf;
2385 2385
2386 if (file->flags & FILE_BUSY) { 2386 for (iter = current_file->parent; iter; iter = iter->parent ) {
2387 printf("%s:%d: do not source '%s' from itself\n", 2387 if (!strcmp(current_file->name,iter->name) ) {
2388 zconf_curname(), zconf_lineno(), name); 2388 printf("%s:%d: recursive inclusion detected. "
2389 exit(1); 2389 "Inclusion path:\n current file : '%s'\n",
2390 } 2390 zconf_curname(), zconf_lineno(),
2391 if (file->flags & FILE_SCANNED) { 2391 zconf_curname());
2392 printf("%s:%d: file '%s' is already sourced from '%s'\n", 2392 iter = current_file->parent;
2393 zconf_curname(), zconf_lineno(), name, 2393 while (iter && \
2394 file->parent->name); 2394 strcmp(iter->name,current_file->name)) {
2395 exit(1); 2395 printf(" included from: '%s:%d'\n",
2396 iter->name, iter->lineno-1);
2397 iter = iter->parent;
2398 }
2399 if (iter)
2400 printf(" included from: '%s:%d'\n",
2401 iter->name, iter->lineno+1);
2402 exit(1);
2403 }
2396 } 2404 }
2397 file->flags |= FILE_BUSY;
2398 file->lineno = 1; 2405 file->lineno = 1;
2399 file->parent = current_file; 2406 file->parent = current_file;
2400 current_file = file; 2407 current_file = file;
@@ -2404,8 +2411,6 @@ static void zconf_endfile(void)
2404{ 2411{
2405 struct buffer *parent; 2412 struct buffer *parent;
2406 2413
2407 current_file->flags |= FILE_SCANNED;
2408 current_file->flags &= ~FILE_BUSY;
2409 current_file = current_file->parent; 2414 current_file = current_file->parent;
2410 2415
2411 parent = current_buf->parent; 2416 parent = current_buf->parent;
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index db56377393d7..488dd7410787 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -373,18 +373,18 @@ static void print_function_line(void)
373 const int skip = 1; 373 const int skip = 1;
374 374
375 for (i = 0; i < function_keys_num; i++) { 375 for (i = 0; i < function_keys_num; i++) {
376 wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]); 376 (void) wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]);
377 mvwprintw(main_window, LINES-3, offset, 377 mvwprintw(main_window, LINES-3, offset,
378 "%s", 378 "%s",
379 function_keys[i].key_str); 379 function_keys[i].key_str);
380 wattrset(main_window, attributes[FUNCTION_TEXT]); 380 (void) wattrset(main_window, attributes[FUNCTION_TEXT]);
381 offset += strlen(function_keys[i].key_str); 381 offset += strlen(function_keys[i].key_str);
382 mvwprintw(main_window, LINES-3, 382 mvwprintw(main_window, LINES-3,
383 offset, "%s", 383 offset, "%s",
384 function_keys[i].func); 384 function_keys[i].func);
385 offset += strlen(function_keys[i].func) + skip; 385 offset += strlen(function_keys[i].func) + skip;
386 } 386 }
387 wattrset(main_window, attributes[NORMAL]); 387 (void) wattrset(main_window, attributes[NORMAL]);
388} 388}
389 389
390/* help */ 390/* help */
@@ -953,16 +953,16 @@ static void show_menu(const char *prompt, const char *instructions,
953 current_instructions = instructions; 953 current_instructions = instructions;
954 954
955 clear(); 955 clear();
956 wattrset(main_window, attributes[NORMAL]); 956 (void) wattrset(main_window, attributes[NORMAL]);
957 print_in_middle(stdscr, 1, 0, COLS, 957 print_in_middle(stdscr, 1, 0, COLS,
958 menu_backtitle, 958 menu_backtitle,
959 attributes[MAIN_HEADING]); 959 attributes[MAIN_HEADING]);
960 960
961 wattrset(main_window, attributes[MAIN_MENU_BOX]); 961 (void) wattrset(main_window, attributes[MAIN_MENU_BOX]);
962 box(main_window, 0, 0); 962 box(main_window, 0, 0);
963 wattrset(main_window, attributes[MAIN_MENU_HEADING]); 963 (void) wattrset(main_window, attributes[MAIN_MENU_HEADING]);
964 mvwprintw(main_window, 0, 3, " %s ", prompt); 964 mvwprintw(main_window, 0, 3, " %s ", prompt);
965 wattrset(main_window, attributes[NORMAL]); 965 (void) wattrset(main_window, attributes[NORMAL]);
966 966
967 set_menu_items(curses_menu, curses_menu_items); 967 set_menu_items(curses_menu, curses_menu_items);
968 968
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 06dd2e33581d..c2796b866f8f 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1489,8 +1489,7 @@ void ConfigMainWindow::saveConfigAs(void)
1489 QString s = Q3FileDialog::getSaveFileName(conf_get_configname(), NULL, this); 1489 QString s = Q3FileDialog::getSaveFileName(conf_get_configname(), NULL, this);
1490 if (s.isNull()) 1490 if (s.isNull())
1491 return; 1491 return;
1492 if (conf_write(QFile::encodeName(s))) 1492 saveConfig();
1493 QMessageBox::information(this, "qconf", _("Unable to save configuration!"));
1494} 1493}
1495 1494
1496void ConfigMainWindow::searchConfig(void) 1495void ConfigMainWindow::searchConfig(void)
@@ -1643,7 +1642,7 @@ void ConfigMainWindow::closeEvent(QCloseEvent* e)
1643 mb.setButtonText(QMessageBox::Cancel, _("Cancel Exit")); 1642 mb.setButtonText(QMessageBox::Cancel, _("Cancel Exit"));
1644 switch (mb.exec()) { 1643 switch (mb.exec()) {
1645 case QMessageBox::Yes: 1644 case QMessageBox::Yes:
1646 conf_write(NULL); 1645 saveConfig();
1647 case QMessageBox::No: 1646 case QMessageBox::No:
1648 e->accept(); 1647 e->accept();
1649 break; 1648 break;
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 3dbaec185cc4..b22f884f9022 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -294,11 +294,11 @@ void zconf_initscan(const char *name)
294 294
295 current_file = file_lookup(name); 295 current_file = file_lookup(name);
296 current_file->lineno = 1; 296 current_file->lineno = 1;
297 current_file->flags = FILE_BUSY;
298} 297}
299 298
300void zconf_nextfile(const char *name) 299void zconf_nextfile(const char *name)
301{ 300{
301 struct file *iter;
302 struct file *file = file_lookup(name); 302 struct file *file = file_lookup(name);
303 struct buffer *buf = malloc(sizeof(*buf)); 303 struct buffer *buf = malloc(sizeof(*buf));
304 memset(buf, 0, sizeof(*buf)); 304 memset(buf, 0, sizeof(*buf));
@@ -314,18 +314,25 @@ void zconf_nextfile(const char *name)
314 buf->parent = current_buf; 314 buf->parent = current_buf;
315 current_buf = buf; 315 current_buf = buf;
316 316
317 if (file->flags & FILE_BUSY) { 317 for (iter = current_file->parent; iter; iter = iter->parent ) {
318 printf("%s:%d: do not source '%s' from itself\n", 318 if (!strcmp(current_file->name,iter->name) ) {
319 zconf_curname(), zconf_lineno(), name); 319 printf("%s:%d: recursive inclusion detected. "
320 exit(1); 320 "Inclusion path:\n current file : '%s'\n",
321 } 321 zconf_curname(), zconf_lineno(),
322 if (file->flags & FILE_SCANNED) { 322 zconf_curname());
323 printf("%s:%d: file '%s' is already sourced from '%s'\n", 323 iter = current_file->parent;
324 zconf_curname(), zconf_lineno(), name, 324 while (iter && \
325 file->parent->name); 325 strcmp(iter->name,current_file->name)) {
326 exit(1); 326 printf(" included from: '%s:%d'\n",
327 iter->name, iter->lineno-1);
328 iter = iter->parent;
329 }
330 if (iter)
331 printf(" included from: '%s:%d'\n",
332 iter->name, iter->lineno+1);
333 exit(1);
334 }
327 } 335 }
328 file->flags |= FILE_BUSY;
329 file->lineno = 1; 336 file->lineno = 1;
330 file->parent = current_file; 337 file->parent = current_file;
331 current_file = file; 338 current_file = file;
@@ -335,8 +342,6 @@ static void zconf_endfile(void)
335{ 342{
336 struct buffer *parent; 343 struct buffer *parent;
337 344
338 current_file->flags |= FILE_SCANNED;
339 current_file->flags &= ~FILE_BUSY;
340 current_file = current_file->parent; 345 current_file = current_file->parent;
341 346
342 parent = current_buf->parent; 347 parent = current_buf->parent;
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index a834b935f536..006960ebbce9 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -26,9 +26,9 @@ RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \
26 else echo rpm; fi) 26 else echo rpm; fi)
27 27
28# Remove hyphens since they have special meaning in RPM filenames 28# Remove hyphens since they have special meaning in RPM filenames
29KERNELPATH := kernel-$(subst -,,$(KERNELRELEASE)) 29KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE))
30MKSPEC := $(srctree)/scripts/package/mkspec 30MKSPEC := $(srctree)/scripts/package/mkspec
31PREV := set -e; cd ..; 31PREV := set -e; cd -P ..;
32 32
33# rpm-pkg 33# rpm-pkg
34# --------------------------------------------------------------------------- 34# ---------------------------------------------------------------------------
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index e1c1d5b8ca70..4bf17ddf7c7f 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -22,7 +22,7 @@ if [ "`grep CONFIG_DRM=y .config | cut -f2 -d\=`" = "y" ]; then
22fi 22fi
23 23
24PROVIDES="$PROVIDES kernel-$KERNELRELEASE" 24PROVIDES="$PROVIDES kernel-$KERNELRELEASE"
25__KERNELRELEASE=`echo $KERNELRELEASE | sed -e "s/-//g"` 25__KERNELRELEASE=`echo $KERNELRELEASE | sed -e "s/-/_/g"`
26 26
27echo "Name: kernel" 27echo "Name: kernel"
28echo "Summary: The Linux Kernel" 28echo "Summary: The Linux Kernel"
@@ -47,6 +47,18 @@ echo ""
47echo "%description" 47echo "%description"
48echo "The Linux Kernel, the operating system core itself" 48echo "The Linux Kernel, the operating system core itself"
49echo "" 49echo ""
50echo "%package headers"
51echo "Summary: Header files for the Linux kernel for use by glibc"
52echo "Group: Development/System"
53echo "Obsoletes: kernel-headers"
54echo "Provides: kernel-headers = %{version}"
55echo "%description headers"
56echo "Kernel-headers includes the C header files that specify the interface"
57echo "between the Linux kernel and userspace libraries and programs. The"
58echo "header files define structures and constants that are needed for"
59echo "building most standard programs and are also needed for rebuilding the"
60echo "glibc package."
61echo ""
50 62
51if ! $PREBUILT; then 63if ! $PREBUILT; then
52echo "%prep" 64echo "%prep"
@@ -83,6 +95,7 @@ echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
83echo "%endif" 95echo "%endif"
84echo "%endif" 96echo "%endif"
85 97
98echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install'
86echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE" 99echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
87 100
88echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE" 101echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE"
@@ -105,3 +118,7 @@ echo "/lib/modules/$KERNELRELEASE"
105echo "/lib/firmware" 118echo "/lib/firmware"
106echo "/boot/*" 119echo "/boot/*"
107echo "" 120echo ""
121echo "%files headers"
122echo '%defattr (-, root, root)'
123echo "/usr/include"
124echo ""
diff --git a/scripts/patch-kernel b/scripts/patch-kernel
index 46a59cae3a0a..20fb25c23382 100755
--- a/scripts/patch-kernel
+++ b/scripts/patch-kernel
@@ -250,7 +250,7 @@ while : # incrementing SUBLEVEL (s in v.p.s)
250do 250do
251 CURRENTFULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL" 251 CURRENTFULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL"
252 EXTRAVER= 252 EXTRAVER=
253 if [ $STOPFULLVERSION = $CURRENTFULLVERSION ]; then 253 if [ x$STOPFULLVERSION = x$CURRENTFULLVERSION ]; then
254 echo "Stopping at $CURRENTFULLVERSION base as requested." 254 echo "Stopping at $CURRENTFULLVERSION base as requested."
255 break 255 break
256 fi 256 fi
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 8d9c48f13774..cd1f779fa51d 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -62,8 +62,7 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
62struct cgroup_subsys devices_subsys; 62struct cgroup_subsys devices_subsys;
63 63
64static int devcgroup_can_attach(struct cgroup_subsys *ss, 64static int devcgroup_can_attach(struct cgroup_subsys *ss,
65 struct cgroup *new_cgroup, struct task_struct *task, 65 struct cgroup *new_cgroup, struct task_struct *task)
66 bool threadgroup)
67{ 66{
68 if (current != task && !capable(CAP_SYS_ADMIN)) 67 if (current != task && !capable(CAP_SYS_ADMIN))
69 return -EPERM; 68 return -EPERM;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 6c0480db8885..a3063eb3dc23 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -847,6 +847,7 @@ void key_replace_session_keyring(void)
847 new-> sgid = old-> sgid; 847 new-> sgid = old-> sgid;
848 new->fsgid = old->fsgid; 848 new->fsgid = old->fsgid;
849 new->user = get_uid(old->user); 849 new->user = get_uid(old->user);
850 new->user_ns = new->user->user_ns;
850 new->group_info = get_group_info(old->group_info); 851 new->group_info = get_group_info(old->group_info);
851 852
852 new->securebits = old->securebits; 853 new->securebits = old->securebits;
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index fcb89cb0f223..d515b2128a4e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -752,10 +752,9 @@ int avc_ss_reset(u32 seqno)
752int avc_has_perm_noaudit(u32 ssid, u32 tsid, 752int avc_has_perm_noaudit(u32 ssid, u32 tsid,
753 u16 tclass, u32 requested, 753 u16 tclass, u32 requested,
754 unsigned flags, 754 unsigned flags,
755 struct av_decision *in_avd) 755 struct av_decision *avd)
756{ 756{
757 struct avc_node *node; 757 struct avc_node *node;
758 struct av_decision avd_entry, *avd;
759 int rc = 0; 758 int rc = 0;
760 u32 denied; 759 u32 denied;
761 760
@@ -766,18 +765,11 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
766 node = avc_lookup(ssid, tsid, tclass); 765 node = avc_lookup(ssid, tsid, tclass);
767 if (unlikely(!node)) { 766 if (unlikely(!node)) {
768 rcu_read_unlock(); 767 rcu_read_unlock();
769
770 if (in_avd)
771 avd = in_avd;
772 else
773 avd = &avd_entry;
774
775 security_compute_av(ssid, tsid, tclass, avd); 768 security_compute_av(ssid, tsid, tclass, avd);
776 rcu_read_lock(); 769 rcu_read_lock();
777 node = avc_insert(ssid, tsid, tclass, avd); 770 node = avc_insert(ssid, tsid, tclass, avd);
778 } else { 771 } else {
779 if (in_avd) 772 memcpy(avd, &node->ae.avd, sizeof(*avd));
780 memcpy(in_avd, &node->ae.avd, sizeof(*in_avd));
781 avd = &node->ae.avd; 773 avd = &node->ae.avd;
782 } 774 }
783 775
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index c3e4b52699f4..973e00e34fa9 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2217,10 +2217,11 @@ out_unlock:
2217 goto out; 2217 goto out;
2218 } 2218 }
2219 for (i = 0, j = 0; i < mynel; i++) { 2219 for (i = 0, j = 0; i < mynel; i++) {
2220 struct av_decision dummy_avd;
2220 rc = avc_has_perm_noaudit(fromsid, mysids[i], 2221 rc = avc_has_perm_noaudit(fromsid, mysids[i],
2221 SECCLASS_PROCESS, /* kernel value */ 2222 SECCLASS_PROCESS, /* kernel value */
2222 PROCESS__TRANSITION, AVC_STRICT, 2223 PROCESS__TRANSITION, AVC_STRICT,
2223 NULL); 2224 &dummy_avd);
2224 if (!rc) 2225 if (!rc)
2225 mysids2[j++] = mysids[i]; 2226 mysids2[j++] = mysids[i];
2226 cond_resched(); 2227 cond_resched();
diff --git a/sound/core/control.c b/sound/core/control.c
index 5d98194bcad5..f8c5be464510 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -704,13 +704,12 @@ static int snd_ctl_elem_list(struct snd_card *card,
704 struct snd_ctl_elem_list list; 704 struct snd_ctl_elem_list list;
705 struct snd_kcontrol *kctl; 705 struct snd_kcontrol *kctl;
706 struct snd_ctl_elem_id *dst, *id; 706 struct snd_ctl_elem_id *dst, *id;
707 unsigned int offset, space, first, jidx; 707 unsigned int offset, space, jidx;
708 708
709 if (copy_from_user(&list, _list, sizeof(list))) 709 if (copy_from_user(&list, _list, sizeof(list)))
710 return -EFAULT; 710 return -EFAULT;
711 offset = list.offset; 711 offset = list.offset;
712 space = list.space; 712 space = list.space;
713 first = 0;
714 /* try limit maximum space */ 713 /* try limit maximum space */
715 if (space > 16384) 714 if (space > 16384)
716 return -ENOMEM; 715 return -ENOMEM;
diff --git a/sound/core/init.c b/sound/core/init.c
index 30ecad41403c..2c041bb36ab3 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -342,7 +342,6 @@ static const struct file_operations snd_shutdown_f_ops =
342int snd_card_disconnect(struct snd_card *card) 342int snd_card_disconnect(struct snd_card *card)
343{ 343{
344 struct snd_monitor_file *mfile; 344 struct snd_monitor_file *mfile;
345 struct file *file;
346 int err; 345 int err;
347 346
348 if (!card) 347 if (!card)
@@ -366,8 +365,6 @@ int snd_card_disconnect(struct snd_card *card)
366 365
367 spin_lock(&card->files_lock); 366 spin_lock(&card->files_lock);
368 list_for_each_entry(mfile, &card->files_list, list) { 367 list_for_each_entry(mfile, &card->files_list, list) {
369 file = mfile->file;
370
371 /* it's critical part, use endless loop */ 368 /* it's critical part, use endless loop */
372 /* we have no room to fail */ 369 /* we have no room to fail */
373 mfile->disconnected_f_op = mfile->file->f_op; 370 mfile->disconnected_f_op = mfile->file->f_op;
diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c
index 13b3f6f49fae..2045697f449d 100644
--- a/sound/core/oss/linear.c
+++ b/sound/core/oss/linear.c
@@ -90,11 +90,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
90 struct snd_pcm_plugin_channel *dst_channels, 90 struct snd_pcm_plugin_channel *dst_channels,
91 snd_pcm_uframes_t frames) 91 snd_pcm_uframes_t frames)
92{ 92{
93 struct linear_priv *data;
94
95 if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) 93 if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
96 return -ENXIO; 94 return -ENXIO;
97 data = (struct linear_priv *)plugin->extra_data;
98 if (frames == 0) 95 if (frames == 0)
99 return 0; 96 return 0;
100#ifdef CONFIG_SND_DEBUG 97#ifdef CONFIG_SND_DEBUG
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index abfeff1611ce..f1341308beda 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1756,8 +1756,18 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1756 wait_queue_t wait; 1756 wait_queue_t wait;
1757 int err = 0; 1757 int err = 0;
1758 snd_pcm_uframes_t avail = 0; 1758 snd_pcm_uframes_t avail = 0;
1759 long tout; 1759 long wait_time, tout;
1760 1760
1761 if (runtime->no_period_wakeup)
1762 wait_time = MAX_SCHEDULE_TIMEOUT;
1763 else {
1764 wait_time = 10;
1765 if (runtime->rate) {
1766 long t = runtime->period_size * 2 / runtime->rate;
1767 wait_time = max(t, wait_time);
1768 }
1769 wait_time = msecs_to_jiffies(wait_time * 1000);
1770 }
1761 init_waitqueue_entry(&wait, current); 1771 init_waitqueue_entry(&wait, current);
1762 add_wait_queue(&runtime->tsleep, &wait); 1772 add_wait_queue(&runtime->tsleep, &wait);
1763 for (;;) { 1773 for (;;) {
@@ -1765,9 +1775,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
1765 err = -ERESTARTSYS; 1775 err = -ERESTARTSYS;
1766 break; 1776 break;
1767 } 1777 }
1768 set_current_state(TASK_INTERRUPTIBLE);
1769 snd_pcm_stream_unlock_irq(substream); 1778 snd_pcm_stream_unlock_irq(substream);
1770 tout = schedule_timeout(msecs_to_jiffies(10000)); 1779 tout = schedule_timeout_interruptible(wait_time);
1771 snd_pcm_stream_lock_irq(substream); 1780 snd_pcm_stream_lock_irq(substream);
1772 switch (runtime->status->state) { 1781 switch (runtime->status->state) {
1773 case SNDRV_PCM_STATE_SUSPENDED: 1782 case SNDRV_PCM_STATE_SUSPENDED:
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 1a07750f3836..1c6be91dfb98 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1481,11 +1481,20 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
1481 break; /* all drained */ 1481 break; /* all drained */
1482 init_waitqueue_entry(&wait, current); 1482 init_waitqueue_entry(&wait, current);
1483 add_wait_queue(&to_check->sleep, &wait); 1483 add_wait_queue(&to_check->sleep, &wait);
1484 set_current_state(TASK_INTERRUPTIBLE);
1485 snd_pcm_stream_unlock_irq(substream); 1484 snd_pcm_stream_unlock_irq(substream);
1486 up_read(&snd_pcm_link_rwsem); 1485 up_read(&snd_pcm_link_rwsem);
1487 snd_power_unlock(card); 1486 snd_power_unlock(card);
1488 tout = schedule_timeout(10 * HZ); 1487 if (runtime->no_period_wakeup)
1488 tout = MAX_SCHEDULE_TIMEOUT;
1489 else {
1490 tout = 10;
1491 if (runtime->rate) {
1492 long t = runtime->period_size * 2 / runtime->rate;
1493 tout = max(t, tout);
1494 }
1495 tout = msecs_to_jiffies(tout * 1000);
1496 }
1497 tout = schedule_timeout_interruptible(tout);
1489 snd_power_lock(card); 1498 snd_power_lock(card);
1490 down_read(&snd_pcm_link_rwsem); 1499 down_read(&snd_pcm_link_rwsem);
1491 snd_pcm_stream_lock_irq(substream); 1500 snd_pcm_stream_lock_irq(substream);
@@ -1518,13 +1527,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
1518static int snd_pcm_drop(struct snd_pcm_substream *substream) 1527static int snd_pcm_drop(struct snd_pcm_substream *substream)
1519{ 1528{
1520 struct snd_pcm_runtime *runtime; 1529 struct snd_pcm_runtime *runtime;
1521 struct snd_card *card;
1522 int result = 0; 1530 int result = 0;
1523 1531
1524 if (PCM_RUNTIME_CHECK(substream)) 1532 if (PCM_RUNTIME_CHECK(substream))
1525 return -ENXIO; 1533 return -ENXIO;
1526 runtime = substream->runtime; 1534 runtime = substream->runtime;
1527 card = substream->pcm->card;
1528 1535
1529 if (runtime->status->state == SNDRV_PCM_STATE_OPEN || 1536 if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1530 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || 1537 runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED ||
@@ -2056,7 +2063,6 @@ static int snd_pcm_open_file(struct file *file,
2056{ 2063{
2057 struct snd_pcm_file *pcm_file; 2064 struct snd_pcm_file *pcm_file;
2058 struct snd_pcm_substream *substream; 2065 struct snd_pcm_substream *substream;
2059 struct snd_pcm_str *str;
2060 int err; 2066 int err;
2061 2067
2062 if (rpcm_file) 2068 if (rpcm_file)
@@ -2073,7 +2079,6 @@ static int snd_pcm_open_file(struct file *file,
2073 } 2079 }
2074 pcm_file->substream = substream; 2080 pcm_file->substream = substream;
2075 if (substream->ref_count == 1) { 2081 if (substream->ref_count == 1) {
2076 str = substream->pstr;
2077 substream->file = pcm_file; 2082 substream->file = pcm_file;
2078 substream->pcm_release = pcm_release_private; 2083 substream->pcm_release = pcm_release_private;
2079 } 2084 }
@@ -3015,11 +3020,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_status =
3015static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, 3020static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
3016 struct vm_area_struct *area) 3021 struct vm_area_struct *area)
3017{ 3022{
3018 struct snd_pcm_runtime *runtime;
3019 long size; 3023 long size;
3020 if (!(area->vm_flags & VM_READ)) 3024 if (!(area->vm_flags & VM_READ))
3021 return -EINVAL; 3025 return -EINVAL;
3022 runtime = substream->runtime;
3023 size = area->vm_end - area->vm_start; 3026 size = area->vm_end - area->vm_start;
3024 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) 3027 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
3025 return -EINVAL; 3028 return -EINVAL;
@@ -3054,11 +3057,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_control =
3054static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, 3057static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
3055 struct vm_area_struct *area) 3058 struct vm_area_struct *area)
3056{ 3059{
3057 struct snd_pcm_runtime *runtime;
3058 long size; 3060 long size;
3059 if (!(area->vm_flags & VM_READ)) 3061 if (!(area->vm_flags & VM_READ))
3060 return -EINVAL; 3062 return -EINVAL;
3061 runtime = substream->runtime;
3062 size = area->vm_end - area->vm_start; 3063 size = area->vm_end - area->vm_start;
3063 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) 3064 if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
3064 return -EINVAL; 3065 return -EINVAL;
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index e7a8e9e4edb2..f9077361c119 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -467,13 +467,11 @@ int snd_seq_queue_timer_open(int queueid)
467int snd_seq_queue_timer_close(int queueid) 467int snd_seq_queue_timer_close(int queueid)
468{ 468{
469 struct snd_seq_queue *queue; 469 struct snd_seq_queue *queue;
470 struct snd_seq_timer *tmr;
471 int result = 0; 470 int result = 0;
472 471
473 queue = queueptr(queueid); 472 queue = queueptr(queueid);
474 if (queue == NULL) 473 if (queue == NULL)
475 return -EINVAL; 474 return -EINVAL;
476 tmr = queue->timer;
477 snd_seq_timer_close(queue); 475 snd_seq_timer_close(queue);
478 queuefree(queue); 476 queuefree(queue);
479 return result; 477 return result;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 8edd998509f7..45b4a8d70e08 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4719,7 +4719,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
4719 cfg->dig_out_pins[0], cfg->dig_out_pins[1]); 4719 cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
4720 snd_printd(" inputs:"); 4720 snd_printd(" inputs:");
4721 for (i = 0; i < cfg->num_inputs; i++) { 4721 for (i = 0; i < cfg->num_inputs; i++) {
4722 snd_printdd(" %s=0x%x", 4722 snd_printd(" %s=0x%x",
4723 hda_get_autocfg_input_label(codec, cfg, i), 4723 hda_get_autocfg_input_label(codec, cfg, i),
4724 cfg->inputs[i].pin); 4724 cfg->inputs[i].pin);
4725 } 4725 }
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 74b0560289c0..b05f7be9dc1b 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -312,23 +312,6 @@ out_fail:
312 return -EINVAL; 312 return -EINVAL;
313} 313}
314 314
315static int hdmi_eld_valid(struct hda_codec *codec, hda_nid_t nid)
316{
317 int eldv;
318 int present;
319
320 present = snd_hda_pin_sense(codec, nid);
321 eldv = (present & AC_PINSENSE_ELDV);
322 present = (present & AC_PINSENSE_PRESENCE);
323
324#ifdef CONFIG_SND_DEBUG_VERBOSE
325 printk(KERN_INFO "HDMI: sink_present = %d, eld_valid = %d\n",
326 !!present, !!eldv);
327#endif
328
329 return eldv && present;
330}
331
332int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid) 315int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid)
333{ 316{
334 return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_DIP_SIZE, 317 return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_DIP_SIZE,
@@ -343,7 +326,7 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
343 int size; 326 int size;
344 unsigned char *buf; 327 unsigned char *buf;
345 328
346 if (!hdmi_eld_valid(codec, nid)) 329 if (!eld->eld_valid)
347 return -ENOENT; 330 return -ENOENT;
348 331
349 size = snd_hdmi_get_eld_size(codec, nid); 332 size = snd_hdmi_get_eld_size(codec, nid);
@@ -477,6 +460,8 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
477 460
478 snd_iprintf(buffer, "monitor_present\t\t%d\n", e->monitor_present); 461 snd_iprintf(buffer, "monitor_present\t\t%d\n", e->monitor_present);
479 snd_iprintf(buffer, "eld_valid\t\t%d\n", e->eld_valid); 462 snd_iprintf(buffer, "eld_valid\t\t%d\n", e->eld_valid);
463 if (!e->eld_valid)
464 return;
480 snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name); 465 snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
481 snd_iprintf(buffer, "connection_type\t\t%s\n", 466 snd_iprintf(buffer, "connection_type\t\t%s\n",
482 eld_connection_type_names[e->conn_type]); 467 eld_connection_type_names[e->conn_type]);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 43a036716d25..348705666f99 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -391,6 +391,7 @@ struct azx {
391 391
392 /* chip type specific */ 392 /* chip type specific */
393 int driver_type; 393 int driver_type;
394 unsigned int driver_caps;
394 int playback_streams; 395 int playback_streams;
395 int playback_index_offset; 396 int playback_index_offset;
396 int capture_streams; 397 int capture_streams;
@@ -464,6 +465,34 @@ enum {
464 AZX_NUM_DRIVERS, /* keep this as last entry */ 465 AZX_NUM_DRIVERS, /* keep this as last entry */
465}; 466};
466 467
468/* driver quirks (capabilities) */
469/* bits 0-7 are used for indicating driver type */
470#define AZX_DCAPS_NO_TCSEL (1 << 8) /* No Intel TCSEL bit */
471#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
472#define AZX_DCAPS_ATI_SNOOP (1 << 10) /* ATI snoop enable */
473#define AZX_DCAPS_NVIDIA_SNOOP (1 << 11) /* Nvidia snoop enable */
474#define AZX_DCAPS_SCH_SNOOP (1 << 12) /* SCH/PCH snoop enable */
475#define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */
476#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */
477#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
478#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
479#define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */
480#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
481#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
482
483/* quirks for ATI SB / AMD Hudson */
484#define AZX_DCAPS_PRESET_ATI_SB \
485 (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
486 AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
487
488/* quirks for ATI/AMD HDMI */
489#define AZX_DCAPS_PRESET_ATI_HDMI \
490 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
491
492/* quirks for Nvidia */
493#define AZX_DCAPS_PRESET_NVIDIA \
494 (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI)
495
467static char *driver_short_names[] __devinitdata = { 496static char *driver_short_names[] __devinitdata = {
468 [AZX_DRIVER_ICH] = "HDA Intel", 497 [AZX_DRIVER_ICH] = "HDA Intel",
469 [AZX_DRIVER_PCH] = "HDA Intel PCH", 498 [AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -566,7 +595,7 @@ static void azx_init_cmd_io(struct azx *chip)
566 /* reset the rirb hw write pointer */ 595 /* reset the rirb hw write pointer */
567 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST); 596 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
568 /* set N=1, get RIRB response interrupt for new entry */ 597 /* set N=1, get RIRB response interrupt for new entry */
569 if (chip->driver_type == AZX_DRIVER_CTX) 598 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
570 azx_writew(chip, RINTCNT, 0xc0); 599 azx_writew(chip, RINTCNT, 0xc0);
571 else 600 else
572 azx_writew(chip, RINTCNT, 1); 601 azx_writew(chip, RINTCNT, 1);
@@ -1056,19 +1085,24 @@ static void azx_init_pci(struct azx *chip)
1056 * codecs. 1085 * codecs.
1057 * The PCI register TCSEL is defined in the Intel manuals. 1086 * The PCI register TCSEL is defined in the Intel manuals.
1058 */ 1087 */
1059 if (chip->driver_type != AZX_DRIVER_ATI && 1088 if (chip->driver_caps & AZX_DCAPS_NO_TCSEL) {
1060 chip->driver_type != AZX_DRIVER_ATIHDMI) 1089 snd_printdd(SFX "Clearing TCSEL\n");
1061 update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0); 1090 update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0);
1091 }
1062 1092
1063 switch (chip->driver_type) { 1093 /* For ATI SB450/600/700/800/900 and AMD Hudson azalia HD audio,
1064 case AZX_DRIVER_ATI: 1094 * we need to enable snoop.
1065 /* For ATI SB450 azalia HD audio, we need to enable snoop */ 1095 */
1096 if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) {
1097 snd_printdd(SFX "Enabling ATI snoop\n");
1066 update_pci_byte(chip->pci, 1098 update_pci_byte(chip->pci,
1067 ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 1099 ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
1068 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP); 1100 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
1069 break; 1101 }
1070 case AZX_DRIVER_NVIDIA: 1102
1071 /* For NVIDIA HDA, enable snoop */ 1103 /* For NVIDIA HDA, enable snoop */
1104 if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) {
1105 snd_printdd(SFX "Enabling Nvidia snoop\n");
1072 update_pci_byte(chip->pci, 1106 update_pci_byte(chip->pci,
1073 NVIDIA_HDA_TRANSREG_ADDR, 1107 NVIDIA_HDA_TRANSREG_ADDR,
1074 0x0f, NVIDIA_HDA_ENABLE_COHBITS); 1108 0x0f, NVIDIA_HDA_ENABLE_COHBITS);
@@ -1078,9 +1112,10 @@ static void azx_init_pci(struct azx *chip)
1078 update_pci_byte(chip->pci, 1112 update_pci_byte(chip->pci,
1079 NVIDIA_HDA_OSTRM_COH, 1113 NVIDIA_HDA_OSTRM_COH,
1080 0x01, NVIDIA_HDA_ENABLE_COHBIT); 1114 0x01, NVIDIA_HDA_ENABLE_COHBIT);
1081 break; 1115 }
1082 case AZX_DRIVER_SCH: 1116
1083 case AZX_DRIVER_PCH: 1117 /* Enable SCH/PCH snoop if needed */
1118 if (chip->driver_caps & AZX_DCAPS_SCH_SNOOP) {
1084 pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop); 1119 pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
1085 if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) { 1120 if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
1086 pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC, 1121 pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
@@ -1091,14 +1126,6 @@ static void azx_init_pci(struct azx *chip)
1091 (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) 1126 (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
1092 ? "Failed" : "OK"); 1127 ? "Failed" : "OK");
1093 } 1128 }
1094 break;
1095 default:
1096 /* AMD Hudson needs the similar snoop, as it seems... */
1097 if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
1098 update_pci_byte(chip->pci,
1099 ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
1100 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
1101 break;
1102 } 1129 }
1103} 1130}
1104 1131
@@ -1152,7 +1179,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1152 status = azx_readb(chip, RIRBSTS); 1179 status = azx_readb(chip, RIRBSTS);
1153 if (status & RIRB_INT_MASK) { 1180 if (status & RIRB_INT_MASK) {
1154 if (status & RIRB_INT_RESPONSE) { 1181 if (status & RIRB_INT_RESPONSE) {
1155 if (chip->driver_type == AZX_DRIVER_CTX) 1182 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1156 udelay(80); 1183 udelay(80);
1157 azx_update_rirb(chip); 1184 azx_update_rirb(chip);
1158 } 1185 }
@@ -1421,8 +1448,10 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
1421 if (err < 0) 1448 if (err < 0)
1422 return err; 1449 return err;
1423 1450
1424 if (chip->driver_type == AZX_DRIVER_NVIDIA) 1451 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1452 snd_printd(SFX "Enable delay in RIRB handling\n");
1425 chip->bus->needs_damn_long_delay = 1; 1453 chip->bus->needs_damn_long_delay = 1;
1454 }
1426 1455
1427 codecs = 0; 1456 codecs = 0;
1428 max_slots = azx_max_codecs[chip->driver_type]; 1457 max_slots = azx_max_codecs[chip->driver_type];
@@ -1457,9 +1486,8 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
1457 * sequence like the pin-detection. It seems that forcing the synced 1486 * sequence like the pin-detection. It seems that forcing the synced
1458 * access works around the stall. Grrr... 1487 * access works around the stall. Grrr...
1459 */ 1488 */
1460 if (chip->pci->vendor == PCI_VENDOR_ID_AMD || 1489 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1461 chip->pci->vendor == PCI_VENDOR_ID_ATI) { 1490 snd_printd(SFX "Enable sync_write for stable communication\n");
1462 snd_printk(KERN_INFO SFX "Enable sync_write for AMD chipset\n");
1463 chip->bus->sync_write = 1; 1491 chip->bus->sync_write = 1;
1464 chip->bus->allow_bus_reset = 1; 1492 chip->bus->allow_bus_reset = 1;
1465 } 1493 }
@@ -1720,7 +1748,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
1720 1748
1721 stream_tag = azx_dev->stream_tag; 1749 stream_tag = azx_dev->stream_tag;
1722 /* CA-IBG chips need the playback stream starting from 1 */ 1750 /* CA-IBG chips need the playback stream starting from 1 */
1723 if (chip->driver_type == AZX_DRIVER_CTX && 1751 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
1724 stream_tag > chip->capture_streams) 1752 stream_tag > chip->capture_streams)
1725 stream_tag -= chip->capture_streams; 1753 stream_tag -= chip->capture_streams;
1726 return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, 1754 return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
@@ -2365,20 +2393,14 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
2365 } 2393 }
2366 2394
2367 /* Check VIA/ATI HD Audio Controller exist */ 2395 /* Check VIA/ATI HD Audio Controller exist */
2368 switch (chip->driver_type) { 2396 if (chip->driver_caps & AZX_DCAPS_POSFIX_VIA) {
2369 case AZX_DRIVER_VIA: 2397 snd_printd(SFX "Using VIACOMBO position fix\n");
2370 /* Use link position directly, avoid any transfer problem. */
2371 return POS_FIX_VIACOMBO; 2398 return POS_FIX_VIACOMBO;
2372 case AZX_DRIVER_ATI: 2399 }
2373 /* ATI chipsets don't work well with position-buffer */ 2400 if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
2401 snd_printd(SFX "Using LPIB position fix\n");
2374 return POS_FIX_LPIB; 2402 return POS_FIX_LPIB;
2375 case AZX_DRIVER_GENERIC:
2376 /* AMD chipsets also don't work with position-buffer */
2377 if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
2378 return POS_FIX_LPIB;
2379 break;
2380 } 2403 }
2381
2382 return POS_FIX_AUTO; 2404 return POS_FIX_AUTO;
2383} 2405}
2384 2406
@@ -2460,8 +2482,8 @@ static void __devinit check_msi(struct azx *chip)
2460 } 2482 }
2461 2483
2462 /* NVidia chipsets seem to cause troubles with MSI */ 2484 /* NVidia chipsets seem to cause troubles with MSI */
2463 if (chip->driver_type == AZX_DRIVER_NVIDIA) { 2485 if (chip->driver_caps & AZX_DCAPS_NO_MSI) {
2464 printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n"); 2486 printk(KERN_INFO "hda_intel: Disabling MSI\n");
2465 chip->msi = 0; 2487 chip->msi = 0;
2466 } 2488 }
2467} 2489}
@@ -2471,7 +2493,7 @@ static void __devinit check_msi(struct azx *chip)
2471 * constructor 2493 * constructor
2472 */ 2494 */
2473static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, 2495static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2474 int dev, int driver_type, 2496 int dev, unsigned int driver_caps,
2475 struct azx **rchip) 2497 struct azx **rchip)
2476{ 2498{
2477 struct azx *chip; 2499 struct azx *chip;
@@ -2499,7 +2521,8 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2499 chip->card = card; 2521 chip->card = card;
2500 chip->pci = pci; 2522 chip->pci = pci;
2501 chip->irq = -1; 2523 chip->irq = -1;
2502 chip->driver_type = driver_type; 2524 chip->driver_caps = driver_caps;
2525 chip->driver_type = driver_caps & 0xff;
2503 check_msi(chip); 2526 check_msi(chip);
2504 chip->dev_index = dev; 2527 chip->dev_index = dev;
2505 INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work); 2528 INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
@@ -2563,8 +2586,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2563 snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap); 2586 snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap);
2564 2587
2565 /* disable SB600 64bit support for safety */ 2588 /* disable SB600 64bit support for safety */
2566 if ((chip->driver_type == AZX_DRIVER_ATI) || 2589 if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
2567 (chip->driver_type == AZX_DRIVER_ATIHDMI)) {
2568 struct pci_dev *p_smbus; 2590 struct pci_dev *p_smbus;
2569 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, 2591 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
2570 PCI_DEVICE_ID_ATI_SBX00_SMBUS, 2592 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
@@ -2574,19 +2596,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2574 gcap &= ~ICH6_GCAP_64OK; 2596 gcap &= ~ICH6_GCAP_64OK;
2575 pci_dev_put(p_smbus); 2597 pci_dev_put(p_smbus);
2576 } 2598 }
2577 } else {
2578 /* FIXME: not sure whether this is really needed, but
2579 * Hudson isn't stable enough for allowing everything...
2580 * let's check later again.
2581 */
2582 if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
2583 gcap &= ~ICH6_GCAP_64OK;
2584 } 2599 }
2585 2600
2586 /* disable 64bit DMA address for Teradici */ 2601 /* disable 64bit DMA address on some devices */
2587 /* it does not work with device 6549:1200 subsys e4a2:040b */ 2602 if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
2588 if (chip->driver_type == AZX_DRIVER_TERA) 2603 snd_printd(SFX "Disabling 64bit DMA\n");
2589 gcap &= ~ICH6_GCAP_64OK; 2604 gcap &= ~ICH6_GCAP_64OK;
2605 }
2590 2606
2591 /* allow 64bit DMA address if supported by H/W */ 2607 /* allow 64bit DMA address if supported by H/W */
2592 if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) 2608 if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
@@ -2788,38 +2804,62 @@ static void __devexit azx_remove(struct pci_dev *pci)
2788/* PCI IDs */ 2804/* PCI IDs */
2789static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { 2805static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2790 /* CPT */ 2806 /* CPT */
2791 { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, 2807 { PCI_DEVICE(0x8086, 0x1c20),
2808 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
2792 /* PBG */ 2809 /* PBG */
2793 { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH }, 2810 { PCI_DEVICE(0x8086, 0x1d20),
2811 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
2794 /* Panther Point */ 2812 /* Panther Point */
2795 { PCI_DEVICE(0x8086, 0x1e20), .driver_data = AZX_DRIVER_PCH }, 2813 { PCI_DEVICE(0x8086, 0x1e20),
2814 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
2796 /* SCH */ 2815 /* SCH */
2797 { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, 2816 { PCI_DEVICE(0x8086, 0x811b),
2817 .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP },
2798 /* Generic Intel */ 2818 /* Generic Intel */
2799 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID), 2819 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
2800 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2820 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2801 .class_mask = 0xffffff, 2821 .class_mask = 0xffffff,
2802 .driver_data = AZX_DRIVER_ICH }, 2822 .driver_data = AZX_DRIVER_ICH },
2803 /* ATI SB 450/600 */ 2823 /* ATI SB 450/600/700/800/900 */
2804 { PCI_DEVICE(0x1002, 0x437b), .driver_data = AZX_DRIVER_ATI }, 2824 { PCI_DEVICE(0x1002, 0x437b),
2805 { PCI_DEVICE(0x1002, 0x4383), .driver_data = AZX_DRIVER_ATI }, 2825 .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
2826 { PCI_DEVICE(0x1002, 0x4383),
2827 .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
2828 /* AMD Hudson */
2829 { PCI_DEVICE(0x1022, 0x780d),
2830 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
2806 /* ATI HDMI */ 2831 /* ATI HDMI */
2807 { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI }, 2832 { PCI_DEVICE(0x1002, 0x793b),
2808 { PCI_DEVICE(0x1002, 0x7919), .driver_data = AZX_DRIVER_ATIHDMI }, 2833 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2809 { PCI_DEVICE(0x1002, 0x960f), .driver_data = AZX_DRIVER_ATIHDMI }, 2834 { PCI_DEVICE(0x1002, 0x7919),
2810 { PCI_DEVICE(0x1002, 0x970f), .driver_data = AZX_DRIVER_ATIHDMI }, 2835 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2811 { PCI_DEVICE(0x1002, 0xaa00), .driver_data = AZX_DRIVER_ATIHDMI }, 2836 { PCI_DEVICE(0x1002, 0x960f),
2812 { PCI_DEVICE(0x1002, 0xaa08), .driver_data = AZX_DRIVER_ATIHDMI }, 2837 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2813 { PCI_DEVICE(0x1002, 0xaa10), .driver_data = AZX_DRIVER_ATIHDMI }, 2838 { PCI_DEVICE(0x1002, 0x970f),
2814 { PCI_DEVICE(0x1002, 0xaa18), .driver_data = AZX_DRIVER_ATIHDMI }, 2839 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2815 { PCI_DEVICE(0x1002, 0xaa20), .driver_data = AZX_DRIVER_ATIHDMI }, 2840 { PCI_DEVICE(0x1002, 0xaa00),
2816 { PCI_DEVICE(0x1002, 0xaa28), .driver_data = AZX_DRIVER_ATIHDMI }, 2841 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2817 { PCI_DEVICE(0x1002, 0xaa30), .driver_data = AZX_DRIVER_ATIHDMI }, 2842 { PCI_DEVICE(0x1002, 0xaa08),
2818 { PCI_DEVICE(0x1002, 0xaa38), .driver_data = AZX_DRIVER_ATIHDMI }, 2843 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2819 { PCI_DEVICE(0x1002, 0xaa40), .driver_data = AZX_DRIVER_ATIHDMI }, 2844 { PCI_DEVICE(0x1002, 0xaa10),
2820 { PCI_DEVICE(0x1002, 0xaa48), .driver_data = AZX_DRIVER_ATIHDMI }, 2845 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2846 { PCI_DEVICE(0x1002, 0xaa18),
2847 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2848 { PCI_DEVICE(0x1002, 0xaa20),
2849 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2850 { PCI_DEVICE(0x1002, 0xaa28),
2851 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2852 { PCI_DEVICE(0x1002, 0xaa30),
2853 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2854 { PCI_DEVICE(0x1002, 0xaa38),
2855 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2856 { PCI_DEVICE(0x1002, 0xaa40),
2857 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2858 { PCI_DEVICE(0x1002, 0xaa48),
2859 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2821 /* VIA VT8251/VT8237A */ 2860 /* VIA VT8251/VT8237A */
2822 { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA }, 2861 { PCI_DEVICE(0x1106, 0x3288),
2862 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
2823 /* SIS966 */ 2863 /* SIS966 */
2824 { PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS }, 2864 { PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS },
2825 /* ULI M5461 */ 2865 /* ULI M5461 */
@@ -2828,9 +2868,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2828 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), 2868 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
2829 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2869 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2830 .class_mask = 0xffffff, 2870 .class_mask = 0xffffff,
2831 .driver_data = AZX_DRIVER_NVIDIA }, 2871 .driver_data = AZX_DRIVER_NVIDIA | AZX_DCAPS_PRESET_NVIDIA },
2832 /* Teradici */ 2872 /* Teradici */
2833 { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA }, 2873 { PCI_DEVICE(0x6549, 0x1200),
2874 .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
2834 /* Creative X-Fi (CA0110-IBG) */ 2875 /* Creative X-Fi (CA0110-IBG) */
2835#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) 2876#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
2836 /* the following entry conflicts with snd-ctxfi driver, 2877 /* the following entry conflicts with snd-ctxfi driver,
@@ -2840,10 +2881,13 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2840 { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID), 2881 { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID),
2841 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2882 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2842 .class_mask = 0xffffff, 2883 .class_mask = 0xffffff,
2843 .driver_data = AZX_DRIVER_CTX }, 2884 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
2885 AZX_DCAPS_RIRB_PRE_DELAY },
2844#else 2886#else
2845 /* this entry seems still valid -- i.e. without emu20kx chip */ 2887 /* this entry seems still valid -- i.e. without emu20kx chip */
2846 { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX }, 2888 { PCI_DEVICE(0x1102, 0x0009),
2889 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
2890 AZX_DCAPS_RIRB_PRE_DELAY },
2847#endif 2891#endif
2848 /* Vortex86MX */ 2892 /* Vortex86MX */
2849 { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, 2893 { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
@@ -2853,11 +2897,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
2853 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID), 2897 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
2854 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2898 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2855 .class_mask = 0xffffff, 2899 .class_mask = 0xffffff,
2856 .driver_data = AZX_DRIVER_GENERIC }, 2900 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
2857 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID), 2901 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID),
2858 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2902 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2859 .class_mask = 0xffffff, 2903 .class_mask = 0xffffff,
2860 .driver_data = AZX_DRIVER_GENERIC }, 2904 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
2861 { 0, } 2905 { 0, }
2862}; 2906};
2863MODULE_DEVICE_TABLE(pci, azx_ids); 2907MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index f1b3875c57df..696ac2590307 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3159,6 +3159,7 @@ static const struct snd_pci_quirk ad1988_cfg_tbl[] = {
3159 SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG), 3159 SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG),
3160 SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG), 3160 SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG),
3161 SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG), 3161 SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG),
3162 SND_PCI_QUIRK(0x1043, 0x82c0, "Asus M3N-HT Deluxe", AD1988_6STACK_DIG),
3162 SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG), 3163 SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG),
3163 {} 3164 {}
3164}; 3165};
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4f37477d3c71..3e6b9a8539c2 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3098,7 +3098,9 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3098 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), 3098 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
3099 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), 3099 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
3100 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), 3100 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
3101 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
3101 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), 3102 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
3103 SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
3102 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ 3104 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
3103 {} 3105 {}
3104}; 3106};
@@ -3433,7 +3435,9 @@ static void cx_auto_parse_output(struct hda_codec *codec)
3433 break; 3435 break;
3434 } 3436 }
3435 } 3437 }
3436 if (spec->auto_mute && cfg->line_out_pins[0] && 3438 if (spec->auto_mute &&
3439 cfg->line_out_pins[0] &&
3440 cfg->line_out_type != AUTO_PIN_SPEAKER_OUT &&
3437 cfg->line_out_pins[0] != cfg->hp_pins[0] && 3441 cfg->line_out_pins[0] != cfg->hp_pins[0] &&
3438 cfg->line_out_pins[0] != cfg->speaker_pins[0]) { 3442 cfg->line_out_pins[0] != cfg->speaker_pins[0]) {
3439 for (i = 0; i < cfg->line_outs; i++) { 3443 for (i = 0; i < cfg->line_outs; i++) {
@@ -3481,25 +3485,32 @@ static void cx_auto_update_speakers(struct hda_codec *codec)
3481{ 3485{
3482 struct conexant_spec *spec = codec->spec; 3486 struct conexant_spec *spec = codec->spec;
3483 struct auto_pin_cfg *cfg = &spec->autocfg; 3487 struct auto_pin_cfg *cfg = &spec->autocfg;
3484 int on; 3488 int on = 1;
3485 3489
3486 if (!spec->auto_mute) 3490 /* turn on HP EAPD when HP jacks are present */
3487 on = 0; 3491 if (spec->auto_mute)
3488 else 3492 on = spec->hp_present;
3489 on = spec->hp_present | spec->line_present;
3490 cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on); 3493 cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on);
3491 do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, !on); 3494 /* mute speakers in auto-mode if HP or LO jacks are plugged */
3495 if (spec->auto_mute)
3496 on = !(spec->hp_present ||
3497 (spec->detect_line && spec->line_present));
3498 do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, on);
3492 3499
3493 /* toggle line-out mutes if needed, too */ 3500 /* toggle line-out mutes if needed, too */
3494 /* if LO is a copy of either HP or Speaker, don't need to handle it */ 3501 /* if LO is a copy of either HP or Speaker, don't need to handle it */
3495 if (cfg->line_out_pins[0] == cfg->hp_pins[0] || 3502 if (cfg->line_out_pins[0] == cfg->hp_pins[0] ||
3496 cfg->line_out_pins[0] == cfg->speaker_pins[0]) 3503 cfg->line_out_pins[0] == cfg->speaker_pins[0])
3497 return; 3504 return;
3498 if (!spec->automute_lines || !spec->auto_mute) 3505 if (spec->auto_mute) {
3499 on = 0; 3506 /* mute LO in auto-mode when HP jack is present */
3500 else 3507 if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT ||
3501 on = spec->hp_present; 3508 spec->automute_lines)
3502 do_automute(codec, cfg->line_outs, cfg->line_out_pins, !on); 3509 on = !spec->hp_present;
3510 else
3511 on = 1;
3512 }
3513 do_automute(codec, cfg->line_outs, cfg->line_out_pins, on);
3503} 3514}
3504 3515
3505static void cx_auto_hp_automute(struct hda_codec *codec) 3516static void cx_auto_hp_automute(struct hda_codec *codec)
@@ -3696,13 +3707,14 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec,
3696{ 3707{
3697 struct conexant_spec *spec = codec->spec; 3708 struct conexant_spec *spec = codec->spec;
3698 hda_nid_t adc; 3709 hda_nid_t adc;
3710 int changed = 1;
3699 3711
3700 if (!imux->num_items) 3712 if (!imux->num_items)
3701 return 0; 3713 return 0;
3702 if (idx >= imux->num_items) 3714 if (idx >= imux->num_items)
3703 idx = imux->num_items - 1; 3715 idx = imux->num_items - 1;
3704 if (spec->cur_mux[0] == idx) 3716 if (spec->cur_mux[0] == idx)
3705 return 0; 3717 changed = 0;
3706 adc = spec->imux_info[idx].adc; 3718 adc = spec->imux_info[idx].adc;
3707 select_input_connection(codec, spec->imux_info[idx].adc, 3719 select_input_connection(codec, spec->imux_info[idx].adc,
3708 spec->imux_info[idx].pin); 3720 spec->imux_info[idx].pin);
@@ -3715,7 +3727,7 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec,
3715 spec->cur_adc_format); 3727 spec->cur_adc_format);
3716 } 3728 }
3717 spec->cur_mux[0] = idx; 3729 spec->cur_mux[0] = idx;
3718 return 1; 3730 return changed;
3719} 3731}
3720 3732
3721static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol, 3733static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol,
@@ -3789,7 +3801,7 @@ static void cx_auto_check_auto_mic(struct hda_codec *codec)
3789 int pset[INPUT_PIN_ATTR_NORMAL + 1]; 3801 int pset[INPUT_PIN_ATTR_NORMAL + 1];
3790 int i; 3802 int i;
3791 3803
3792 for (i = 0; i < INPUT_PIN_ATTR_NORMAL; i++) 3804 for (i = 0; i < ARRAY_SIZE(pset); i++)
3793 pset[i] = -1; 3805 pset[i] = -1;
3794 for (i = 0; i < spec->private_imux.num_items; i++) { 3806 for (i = 0; i < spec->private_imux.num_items; i++) {
3795 hda_nid_t pin = spec->imux_info[i].pin; 3807 hda_nid_t pin = spec->imux_info[i].pin;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 322901873222..bd0ae697f9c4 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -48,8 +48,8 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
48 * 48 *
49 * The HDA correspondence of pipes/ports are converter/pin nodes. 49 * The HDA correspondence of pipes/ports are converter/pin nodes.
50 */ 50 */
51#define MAX_HDMI_CVTS 3 51#define MAX_HDMI_CVTS 4
52#define MAX_HDMI_PINS 3 52#define MAX_HDMI_PINS 4
53 53
54struct hdmi_spec { 54struct hdmi_spec {
55 int num_cvts; 55 int num_cvts;
@@ -78,10 +78,6 @@ struct hdmi_spec {
78 */ 78 */
79 struct hda_multi_out multiout; 79 struct hda_multi_out multiout;
80 const struct hda_pcm_stream *pcm_playback; 80 const struct hda_pcm_stream *pcm_playback;
81
82 /* misc flags */
83 /* PD bit indicates only the update, not the current state */
84 unsigned int old_pin_detect:1;
85}; 81};
86 82
87 83
@@ -300,13 +296,6 @@ static int hda_node_index(hda_nid_t *nids, hda_nid_t nid)
300 return -EINVAL; 296 return -EINVAL;
301} 297}
302 298
303static void hdmi_get_show_eld(struct hda_codec *codec, hda_nid_t pin_nid,
304 struct hdmi_eld *eld)
305{
306 if (!snd_hdmi_get_eld(eld, codec, pin_nid))
307 snd_hdmi_show_eld(eld);
308}
309
310#ifdef BE_PARANOID 299#ifdef BE_PARANOID
311static void hdmi_get_dip_index(struct hda_codec *codec, hda_nid_t pin_nid, 300static void hdmi_get_dip_index(struct hda_codec *codec, hda_nid_t pin_nid,
312 int *packet_index, int *byte_index) 301 int *packet_index, int *byte_index)
@@ -694,35 +683,20 @@ static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
694static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) 683static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
695{ 684{
696 struct hdmi_spec *spec = codec->spec; 685 struct hdmi_spec *spec = codec->spec;
697 int tag = res >> AC_UNSOL_RES_TAG_SHIFT; 686 int pin_nid = res >> AC_UNSOL_RES_TAG_SHIFT;
698 int pind = !!(res & AC_UNSOL_RES_PD); 687 int pd = !!(res & AC_UNSOL_RES_PD);
699 int eldv = !!(res & AC_UNSOL_RES_ELDV); 688 int eldv = !!(res & AC_UNSOL_RES_ELDV);
700 int index; 689 int index;
701 690
702 printk(KERN_INFO 691 printk(KERN_INFO
703 "HDMI hot plug event: Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 692 "HDMI hot plug event: Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
704 tag, pind, eldv); 693 pin_nid, pd, eldv);
705 694
706 index = hda_node_index(spec->pin, tag); 695 index = hda_node_index(spec->pin, pin_nid);
707 if (index < 0) 696 if (index < 0)
708 return; 697 return;
709 698
710 if (spec->old_pin_detect) { 699 hdmi_present_sense(codec, pin_nid, &spec->sink_eld[index]);
711 if (pind)
712 hdmi_present_sense(codec, tag, &spec->sink_eld[index]);
713 pind = spec->sink_eld[index].monitor_present;
714 }
715
716 spec->sink_eld[index].monitor_present = pind;
717 spec->sink_eld[index].eld_valid = eldv;
718
719 if (pind && eldv) {
720 hdmi_get_show_eld(codec, spec->pin[index],
721 &spec->sink_eld[index]);
722 /* TODO: do real things about ELD */
723 }
724
725 snd_hda_input_jack_report(codec, tag);
726} 700}
727 701
728static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) 702static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -903,13 +877,33 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, hda_nid_t pin_nid)
903static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, 877static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
904 struct hdmi_eld *eld) 878 struct hdmi_eld *eld)
905{ 879{
880 /*
881 * Always execute a GetPinSense verb here, even when called from
882 * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
883 * response's PD bit is not the real PD value, but indicates that
884 * the real PD value changed. An older version of the HD-audio
885 * specification worked this way. Hence, we just ignore the data in
886 * the unsolicited response to avoid custom WARs.
887 */
906 int present = snd_hda_pin_sense(codec, pin_nid); 888 int present = snd_hda_pin_sense(codec, pin_nid);
907 889
890 memset(eld, 0, sizeof(*eld));
891
908 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE); 892 eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
909 eld->eld_valid = !!(present & AC_PINSENSE_ELDV); 893 if (eld->monitor_present)
894 eld->eld_valid = !!(present & AC_PINSENSE_ELDV);
895 else
896 eld->eld_valid = 0;
910 897
911 if (present & AC_PINSENSE_ELDV) 898 printk(KERN_INFO
912 hdmi_get_show_eld(codec, pin_nid, eld); 899 "HDMI status: Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
900 pin_nid, eld->monitor_present, eld->eld_valid);
901
902 if (eld->eld_valid)
903 if (!snd_hdmi_get_eld(eld, codec, pin_nid))
904 snd_hdmi_show_eld(eld);
905
906 snd_hda_input_jack_report(codec, pin_nid);
913} 907}
914 908
915static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) 909static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
@@ -927,7 +921,6 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
927 SND_JACK_VIDEOOUT, NULL); 921 SND_JACK_VIDEOOUT, NULL);
928 if (err < 0) 922 if (err < 0)
929 return err; 923 return err;
930 snd_hda_input_jack_report(codec, pin_nid);
931 924
932 hdmi_present_sense(codec, pin_nid, &spec->sink_eld[spec->num_pins]); 925 hdmi_present_sense(codec, pin_nid, &spec->sink_eld[spec->num_pins]);
933 926
@@ -1034,6 +1027,7 @@ static char *generic_hdmi_pcm_names[MAX_HDMI_CVTS] = {
1034 "HDMI 0", 1027 "HDMI 0",
1035 "HDMI 1", 1028 "HDMI 1",
1036 "HDMI 2", 1029 "HDMI 2",
1030 "HDMI 3",
1037}; 1031};
1038 1032
1039/* 1033/*
@@ -1490,18 +1484,6 @@ static const struct hda_codec_ops nvhdmi_patch_ops_2ch = {
1490 .free = generic_hdmi_free, 1484 .free = generic_hdmi_free,
1491}; 1485};
1492 1486
1493static int patch_nvhdmi_8ch_89(struct hda_codec *codec)
1494{
1495 struct hdmi_spec *spec;
1496 int err = patch_generic_hdmi(codec);
1497
1498 if (err < 0)
1499 return err;
1500 spec = codec->spec;
1501 spec->old_pin_detect = 1;
1502 return 0;
1503}
1504
1505static int patch_nvhdmi_2ch(struct hda_codec *codec) 1487static int patch_nvhdmi_2ch(struct hda_codec *codec)
1506{ 1488{
1507 struct hdmi_spec *spec; 1489 struct hdmi_spec *spec;
@@ -1515,7 +1497,6 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
1515 spec->multiout.num_dacs = 0; /* no analog */ 1497 spec->multiout.num_dacs = 0; /* no analog */
1516 spec->multiout.max_channels = 2; 1498 spec->multiout.max_channels = 2;
1517 spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x; 1499 spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
1518 spec->old_pin_detect = 1;
1519 spec->num_cvts = 1; 1500 spec->num_cvts = 1;
1520 spec->cvt[0] = nvhdmi_master_con_nid_7x; 1501 spec->cvt[0] = nvhdmi_master_con_nid_7x;
1521 spec->pcm_playback = &nvhdmi_pcm_playback_2ch; 1502 spec->pcm_playback = &nvhdmi_pcm_playback_2ch;
@@ -1658,28 +1639,28 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
1658{ .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, 1639{ .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
1659{ .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, 1640{ .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x },
1660{ .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x }, 1641{ .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x },
1661{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1642{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_generic_hdmi },
1662{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1643{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_generic_hdmi },
1663{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_nvhdmi_8ch_89 }, 1644{ .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_generic_hdmi },
1664{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1645{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_generic_hdmi },
1665{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1646{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_generic_hdmi },
1666{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1647{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_generic_hdmi },
1667{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1648{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_generic_hdmi },
1668{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1649{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_generic_hdmi },
1669{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1650{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_generic_hdmi },
1670{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1651{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_generic_hdmi },
1671{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1652{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_generic_hdmi },
1672/* 17 is known to be absent */ 1653/* 17 is known to be absent */
1673{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1654{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_generic_hdmi },
1674{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1655{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_generic_hdmi },
1675{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1656{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_generic_hdmi },
1676{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1657{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_generic_hdmi },
1677{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1658{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_generic_hdmi },
1678{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1659{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_generic_hdmi },
1679{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1660{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_generic_hdmi },
1680{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1661{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
1681{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1662{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
1682{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1663{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
1683{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 1664{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
1684{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 1665{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
1685{ .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi }, 1666{ .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 28afbbf69ce0..95572d290c27 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -146,7 +146,7 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd)
146 "at91sam9g20ek_wm8731 " 146 "at91sam9g20ek_wm8731 "
147 ": at91sam9g20ek_wm8731_init() called\n"); 147 ": at91sam9g20ek_wm8731_init() called\n");
148 148
149 ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL, 149 ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_MCLK,
150 MCLK_RATE, SND_SOC_CLOCK_IN); 150 MCLK_RATE, SND_SOC_CLOCK_IN);
151 if (ret < 0) { 151 if (ret < 0) {
152 printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret); 152 printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret);
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index b8066ef10bb0..46dbfd067f79 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -153,8 +153,7 @@ static int cq93vc_resume(struct snd_soc_codec *codec)
153 153
154static int cq93vc_probe(struct snd_soc_codec *codec) 154static int cq93vc_probe(struct snd_soc_codec *codec)
155{ 155{
156 struct davinci_vc *davinci_vc = 156 struct davinci_vc *davinci_vc = codec->dev->platform_data;
157 mfd_get_data(to_platform_device(codec->dev));
158 157
159 davinci_vc->cq93vc.codec = codec; 158 davinci_vc->cq93vc.codec = codec;
160 codec->control_data = davinci_vc; 159 codec->control_data = davinci_vc;
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 575238d68e5e..bec788b12613 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -26,7 +26,6 @@
26#include <linux/pm.h> 26#include <linux/pm.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/mfd/core.h>
30#include <linux/i2c/twl.h> 29#include <linux/i2c/twl.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32#include <sound/core.h> 31#include <sound/core.h>
@@ -733,8 +732,7 @@ static int aif_event(struct snd_soc_dapm_widget *w,
733 732
734static void headset_ramp(struct snd_soc_codec *codec, int ramp) 733static void headset_ramp(struct snd_soc_codec *codec, int ramp)
735{ 734{
736 struct twl4030_codec_audio_data *pdata = 735 struct twl4030_codec_audio_data *pdata = codec->dev->platform_data;
737 mfd_get_data(to_platform_device(codec->dev));
738 unsigned char hs_gain, hs_pop; 736 unsigned char hs_gain, hs_pop;
739 struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec); 737 struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
740 /* Base values for ramp delay calculation: 2^19 - 2^26 */ 738 /* Base values for ramp delay calculation: 2^19 - 2^26 */
@@ -2299,7 +2297,7 @@ static struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
2299 2297
2300static int __devinit twl4030_codec_probe(struct platform_device *pdev) 2298static int __devinit twl4030_codec_probe(struct platform_device *pdev)
2301{ 2299{
2302 struct twl4030_codec_audio_data *pdata = mfd_get_data(pdev); 2300 struct twl4030_codec_audio_data *pdata = pdev->dev.platform_data;
2303 2301
2304 if (!pdata) { 2302 if (!pdata) {
2305 dev_err(&pdev->dev, "platform_data is missing\n"); 2303 dev_err(&pdev->dev, "platform_data is missing\n");
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index c8a874d0d4ca..5836201834d9 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -441,8 +441,7 @@ EXPORT_SYMBOL_GPL(wl1273_get_format);
441 441
442static int wl1273_probe(struct snd_soc_codec *codec) 442static int wl1273_probe(struct snd_soc_codec *codec)
443{ 443{
444 struct wl1273_core **core = 444 struct wl1273_core **core = codec->dev->platform_data;
445 mfd_get_data(to_platform_device(codec->dev));
446 struct wl1273_priv *wl1273; 445 struct wl1273_priv *wl1273;
447 int r; 446 int r;
448 447
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index 14d0716bf009..bcc208967917 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -22,7 +22,7 @@ SND_SOC_DAPM_ADC("ADC", "wm1250-ev1 Capture", SND_SOC_NOPM, 0, 0),
22SND_SOC_DAPM_DAC("DAC", "wm1250-ev1 Playback", SND_SOC_NOPM, 0, 0), 22SND_SOC_DAPM_DAC("DAC", "wm1250-ev1 Playback", SND_SOC_NOPM, 0, 0),
23 23
24SND_SOC_DAPM_INPUT("WM1250 Input"), 24SND_SOC_DAPM_INPUT("WM1250 Input"),
25SND_SOC_DAPM_INPUT("WM1250 Output"), 25SND_SOC_DAPM_OUTPUT("WM1250 Output"),
26}; 26};
27 27
28static const struct snd_soc_dapm_route wm1250_ev1_dapm_routes[] = { 28static const struct snd_soc_dapm_route wm1250_ev1_dapm_routes[] = {
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 736b785e3756..fbee556cbf35 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1378,7 +1378,7 @@ static void wm8400_probe_deferred(struct work_struct *work)
1378 1378
1379static int wm8400_codec_probe(struct snd_soc_codec *codec) 1379static int wm8400_codec_probe(struct snd_soc_codec *codec)
1380{ 1380{
1381 struct wm8400 *wm8400 = mfd_get_data(to_platform_device(codec->dev)); 1381 struct wm8400 *wm8400 = dev_get_platdata(codec->dev);
1382 struct wm8400_priv *priv; 1382 struct wm8400_priv *priv;
1383 int ret; 1383 int ret;
1384 u16 reg; 1384 u16 reg;
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 6dec7cee2cb4..2dc964b55e4f 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -198,7 +198,7 @@ static int wm8731_check_osc(struct snd_soc_dapm_widget *source,
198{ 198{
199 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec); 199 struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec);
200 200
201 return wm8731->sysclk_type == WM8731_SYSCLK_MCLK; 201 return wm8731->sysclk_type == WM8731_SYSCLK_XTAL;
202} 202}
203 203
204static const struct snd_soc_dapm_route wm8731_intercon[] = { 204static const struct snd_soc_dapm_route wm8731_intercon[] = {
diff --git a/sound/soc/codecs/wm8915.c b/sound/soc/codecs/wm8915.c
index ccc9bd832794..a0b1a7278284 100644
--- a/sound/soc/codecs/wm8915.c
+++ b/sound/soc/codecs/wm8915.c
@@ -19,7 +19,6 @@
19#include <linux/gcd.h> 19#include <linux/gcd.h>
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/delay.h>
23#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/workqueue.h> 24#include <linux/workqueue.h>
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index 13e05a302a92..9259f1f34899 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -205,7 +205,7 @@ static struct snd_soc_dai_driver davinci_vcif_dai = {
205 205
206static int davinci_vcif_probe(struct platform_device *pdev) 206static int davinci_vcif_probe(struct platform_device *pdev)
207{ 207{
208 struct davinci_vc *davinci_vc = mfd_get_data(pdev); 208 struct davinci_vc *davinci_vc = pdev->dev.platform_data;
209 struct davinci_vcif_dev *davinci_vcif_dev; 209 struct davinci_vcif_dev *davinci_vcif_dev;
210 int ret; 210 int ret;
211 211
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index b5922984eac6..99054cf1f68f 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -65,14 +65,6 @@ config SND_OMAP_SOC_OVERO
65 Say Y if you want to add support for SoC audio on the 65 Say Y if you want to add support for SoC audio on the
66 Gumstix Overo or CompuLab CM-T35 66 Gumstix Overo or CompuLab CM-T35
67 67
68config SND_OMAP_SOC_OMAP2EVM
69 tristate "SoC Audio support for OMAP2EVM board"
70 depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP2EVM
71 select SND_OMAP_SOC_MCBSP
72 select SND_SOC_TWL4030
73 help
74 Say Y if you want to add support for SoC audio on the omap2evm board.
75
76config SND_OMAP_SOC_OMAP3EVM 68config SND_OMAP_SOC_OMAP3EVM
77 tristate "SoC Audio support for OMAP3EVM board" 69 tristate "SoC Audio support for OMAP3EVM board"
78 depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP3EVM 70 depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP3EVM
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index ba9fc650db28..6c2c87eed5bb 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -13,7 +13,6 @@ snd-soc-rx51-objs := rx51.o
13snd-soc-ams-delta-objs := ams-delta.o 13snd-soc-ams-delta-objs := ams-delta.o
14snd-soc-osk5912-objs := osk5912.o 14snd-soc-osk5912-objs := osk5912.o
15snd-soc-overo-objs := overo.o 15snd-soc-overo-objs := overo.o
16snd-soc-omap2evm-objs := omap2evm.o
17snd-soc-omap3evm-objs := omap3evm.o 16snd-soc-omap3evm-objs := omap3evm.o
18snd-soc-am3517evm-objs := am3517evm.o 17snd-soc-am3517evm-objs := am3517evm.o
19snd-soc-sdp3430-objs := sdp3430.o 18snd-soc-sdp3430-objs := sdp3430.o
diff --git a/sound/soc/omap/omap2evm.c b/sound/soc/omap/omap2evm.c
deleted file mode 100644
index 29b60d6796e7..000000000000
--- a/sound/soc/omap/omap2evm.c
+++ /dev/null
@@ -1,139 +0,0 @@
1/*
2 * omap2evm.c -- SoC audio machine driver for omap2evm board
3 *
4 * Author: Arun KS <arunks@mistralsolutions.com>
5 *
6 * Based on sound/soc/omap/overo.c by Steve Sakoman
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/clk.h>
25#include <linux/platform_device.h>
26#include <sound/core.h>
27#include <sound/pcm.h>
28#include <sound/soc.h>
29
30#include <asm/mach-types.h>
31#include <mach/hardware.h>
32#include <mach/gpio.h>
33#include <plat/mcbsp.h>
34
35#include "omap-mcbsp.h"
36#include "omap-pcm.h"
37
38static int omap2evm_hw_params(struct snd_pcm_substream *substream,
39 struct snd_pcm_hw_params *params)
40{
41 struct snd_soc_pcm_runtime *rtd = substream->private_data;
42 struct snd_soc_dai *codec_dai = rtd->codec_dai;
43 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
44 int ret;
45
46 /* Set codec DAI configuration */
47 ret = snd_soc_dai_set_fmt(codec_dai,
48 SND_SOC_DAIFMT_I2S |
49 SND_SOC_DAIFMT_NB_NF |
50 SND_SOC_DAIFMT_CBM_CFM);
51 if (ret < 0) {
52 printk(KERN_ERR "can't set codec DAI configuration\n");
53 return ret;
54 }
55
56 /* Set cpu DAI configuration */
57 ret = snd_soc_dai_set_fmt(cpu_dai,
58 SND_SOC_DAIFMT_I2S |
59 SND_SOC_DAIFMT_NB_NF |
60 SND_SOC_DAIFMT_CBM_CFM);
61 if (ret < 0) {
62 printk(KERN_ERR "can't set cpu DAI configuration\n");
63 return ret;
64 }
65
66 /* Set the codec system clock for DAC and ADC */
67 ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
68 SND_SOC_CLOCK_IN);
69 if (ret < 0) {
70 printk(KERN_ERR "can't set codec system clock\n");
71 return ret;
72 }
73
74 return 0;
75}
76
77static struct snd_soc_ops omap2evm_ops = {
78 .hw_params = omap2evm_hw_params,
79};
80
81/* Digital audio interface glue - connects codec <--> CPU */
82static struct snd_soc_dai_link omap2evm_dai = {
83 .name = "TWL4030",
84 .stream_name = "TWL4030",
85 .cpu_dai_name = "omap-mcbsp-dai.1",
86 .codec_dai_name = "twl4030-hifi",
87 .platform_name = "omap-pcm-audio",
88 .codec_name = "twl4030-codec",
89 .ops = &omap2evm_ops,
90};
91
92/* Audio machine driver */
93static struct snd_soc_card snd_soc_omap2evm = {
94 .name = "omap2evm",
95 .dai_link = &omap2evm_dai,
96 .num_links = 1,
97};
98
99static struct platform_device *omap2evm_snd_device;
100
101static int __init omap2evm_soc_init(void)
102{
103 int ret;
104
105 if (!machine_is_omap2evm())
106 return -ENODEV;
107 printk(KERN_INFO "omap2evm SoC init\n");
108
109 omap2evm_snd_device = platform_device_alloc("soc-audio", -1);
110 if (!omap2evm_snd_device) {
111 printk(KERN_ERR "Platform device allocation failed\n");
112 return -ENOMEM;
113 }
114
115 platform_set_drvdata(omap2evm_snd_device, &snd_soc_omap2evm);
116
117 ret = platform_device_add(omap2evm_snd_device);
118 if (ret)
119 goto err1;
120
121 return 0;
122
123err1:
124 printk(KERN_ERR "Unable to add platform device\n");
125 platform_device_put(omap2evm_snd_device);
126
127 return ret;
128}
129module_init(omap2evm_soc_init);
130
131static void __exit omap2evm_soc_exit(void)
132{
133 platform_device_unregister(omap2evm_snd_device);
134}
135module_exit(omap2evm_soc_exit);
136
137MODULE_AUTHOR("Arun KS <arunks@mistralsolutions.com>");
138MODULE_DESCRIPTION("ALSA SoC omap2evm");
139MODULE_LICENSE("GPL");
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 2afabaf59491..1a591f1ebfbd 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -151,13 +151,13 @@ static struct snd_soc_ops raumfeld_cs4270_ops = {
151 .hw_params = raumfeld_cs4270_hw_params, 151 .hw_params = raumfeld_cs4270_hw_params,
152}; 152};
153 153
154static int raumfeld_line_suspend(struct snd_soc_card *card) 154static int raumfeld_analog_suspend(struct snd_soc_card *card)
155{ 155{
156 raumfeld_enable_audio(false); 156 raumfeld_enable_audio(false);
157 return 0; 157 return 0;
158} 158}
159 159
160static int raumfeld_line_resume(struct snd_soc_card *card) 160static int raumfeld_analog_resume(struct snd_soc_card *card)
161{ 161{
162 raumfeld_enable_audio(true); 162 raumfeld_enable_audio(true);
163 return 0; 163 return 0;
@@ -225,32 +225,53 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
225 .hw_params = raumfeld_ak4104_hw_params, 225 .hw_params = raumfeld_ak4104_hw_params,
226}; 226};
227 227
228static struct snd_soc_dai_link raumfeld_dai[] = { 228#define DAI_LINK_CS4270 \
229{ \
230 .name = "CS4270", \
231 .stream_name = "CS4270", \
232 .cpu_dai_name = "pxa-ssp-dai.0", \
233 .platform_name = "pxa-pcm-audio", \
234 .codec_dai_name = "cs4270-hifi", \
235 .codec_name = "cs4270-codec.0-0048", \
236 .ops = &raumfeld_cs4270_ops, \
237}
238
239#define DAI_LINK_AK4104 \
240{ \
241 .name = "ak4104", \
242 .stream_name = "Playback", \
243 .cpu_dai_name = "pxa-ssp-dai.1", \
244 .codec_dai_name = "ak4104-hifi", \
245 .platform_name = "pxa-pcm-audio", \
246 .ops = &raumfeld_ak4104_ops, \
247 .codec_name = "spi0.0", \
248}
249
250static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] =
229{ 251{
230 .name = "ak4104", 252 DAI_LINK_CS4270,
231 .stream_name = "Playback", 253 DAI_LINK_AK4104,
232 .cpu_dai_name = "pxa-ssp-dai.1", 254};
233 .codec_dai_name = "ak4104-hifi", 255
234 .platform_name = "pxa-pcm-audio", 256static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] =
235 .ops = &raumfeld_ak4104_ops,
236 .codec_name = "ak4104-codec.0",
237},
238{ 257{
239 .name = "CS4270", 258 DAI_LINK_CS4270,
240 .stream_name = "CS4270", 259};
241 .cpu_dai_name = "pxa-ssp-dai.0", 260
242 .platform_name = "pxa-pcm-audio", 261static struct snd_soc_card snd_soc_raumfeld_connector = {
243 .codec_dai_name = "cs4270-hifi", 262 .name = "Raumfeld Connector",
244 .codec_name = "cs4270-codec.0-0048", 263 .dai_link = snd_soc_raumfeld_connector_dai,
245 .ops = &raumfeld_cs4270_ops, 264 .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai),
246},}; 265 .suspend_post = raumfeld_analog_suspend,
247 266 .resume_pre = raumfeld_analog_resume,
248static struct snd_soc_card snd_soc_raumfeld = { 267};
249 .name = "Raumfeld", 268
250 .dai_link = raumfeld_dai, 269static struct snd_soc_card snd_soc_raumfeld_speaker = {
251 .suspend_post = raumfeld_line_suspend, 270 .name = "Raumfeld Speaker",
252 .resume_pre = raumfeld_line_resume, 271 .dai_link = snd_soc_raumfeld_speaker_dai,
253 .num_links = ARRAY_SIZE(raumfeld_dai), 272 .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai),
273 .suspend_post = raumfeld_analog_suspend,
274 .resume_pre = raumfeld_analog_resume,
254}; 275};
255 276
256static struct platform_device *raumfeld_audio_device; 277static struct platform_device *raumfeld_audio_device;
@@ -271,22 +292,25 @@ static int __init raumfeld_audio_init(void)
271 292
272 set_max9485_clk(MAX9485_MCLK_FREQ_122880); 293 set_max9485_clk(MAX9485_MCLK_FREQ_122880);
273 294
274 /* Register LINE and SPDIF */ 295 /* Register analog device */
275 raumfeld_audio_device = platform_device_alloc("soc-audio", 0); 296 raumfeld_audio_device = platform_device_alloc("soc-audio", 0);
276 if (!raumfeld_audio_device) 297 if (!raumfeld_audio_device)
277 return -ENOMEM; 298 return -ENOMEM;
278 299
279 platform_set_drvdata(raumfeld_audio_device,
280 &snd_soc_raumfeld);
281 ret = platform_device_add(raumfeld_audio_device);
282
283 /* no S/PDIF on Speakers */
284 if (machine_is_raumfeld_speaker()) 300 if (machine_is_raumfeld_speaker())
301 platform_set_drvdata(raumfeld_audio_device,
302 &snd_soc_raumfeld_speaker);
303
304 if (machine_is_raumfeld_connector())
305 platform_set_drvdata(raumfeld_audio_device,
306 &snd_soc_raumfeld_connector);
307
308 ret = platform_device_add(raumfeld_audio_device);
309 if (ret < 0)
285 return ret; 310 return ret;
286 311
287 raumfeld_enable_audio(true); 312 raumfeld_enable_audio(true);
288 313 return 0;
289 return ret;
290} 314}
291 315
292static void __exit raumfeld_audio_exit(void) 316static void __exit raumfeld_audio_exit(void)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index bb7cd5812945..d75043ed7fc0 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1306,10 +1306,6 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
1306 /* no, then find CPU DAI from registered DAIs*/ 1306 /* no, then find CPU DAI from registered DAIs*/
1307 list_for_each_entry(cpu_dai, &dai_list, list) { 1307 list_for_each_entry(cpu_dai, &dai_list, list) {
1308 if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) { 1308 if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) {
1309
1310 if (!try_module_get(cpu_dai->dev->driver->owner))
1311 return -ENODEV;
1312
1313 rtd->cpu_dai = cpu_dai; 1309 rtd->cpu_dai = cpu_dai;
1314 goto find_codec; 1310 goto find_codec;
1315 } 1311 }
@@ -1622,11 +1618,15 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num)
1622 1618
1623 /* probe the cpu_dai */ 1619 /* probe the cpu_dai */
1624 if (!cpu_dai->probed) { 1620 if (!cpu_dai->probed) {
1621 if (!try_module_get(cpu_dai->dev->driver->owner))
1622 return -ENODEV;
1623
1625 if (cpu_dai->driver->probe) { 1624 if (cpu_dai->driver->probe) {
1626 ret = cpu_dai->driver->probe(cpu_dai); 1625 ret = cpu_dai->driver->probe(cpu_dai);
1627 if (ret < 0) { 1626 if (ret < 0) {
1628 printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n", 1627 printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n",
1629 cpu_dai->name); 1628 cpu_dai->name);
1629 module_put(cpu_dai->dev->driver->owner);
1630 return ret; 1630 return ret;
1631 } 1631 }
1632 } 1632 }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 456617e63789..999bb08cdfb1 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1110,7 +1110,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
1110 trace_snd_soc_dapm_start(card); 1110 trace_snd_soc_dapm_start(card);
1111 1111
1112 list_for_each_entry(d, &card->dapm_list, list) 1112 list_for_each_entry(d, &card->dapm_list, list)
1113 if (d->n_widgets) 1113 if (d->n_widgets || d->codec == NULL)
1114 d->dev_power = 0; 1114 d->dev_power = 0;
1115 1115
1116 /* Check which widgets we need to power and store them in 1116 /* Check which widgets we need to power and store them in
diff --git a/sound/usb/card.c b/sound/usb/card.c
index a90662af2d6b..220c6167dd86 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -48,6 +48,7 @@
48#include <linux/usb/audio.h> 48#include <linux/usb/audio.h>
49#include <linux/usb/audio-v2.h> 49#include <linux/usb/audio-v2.h>
50 50
51#include <sound/control.h>
51#include <sound/core.h> 52#include <sound/core.h>
52#include <sound/info.h> 53#include <sound/info.h>
53#include <sound/pcm.h> 54#include <sound/pcm.h>
@@ -492,14 +493,6 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
492 } 493 }
493 } 494 }
494 495
495 chip->txfr_quirk = 0;
496 err = 1; /* continue */
497 if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
498 /* need some special handlings */
499 if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
500 goto __error;
501 }
502
503 /* 496 /*
504 * For devices with more than one control interface, we assume the 497 * For devices with more than one control interface, we assume the
505 * first contains the audio controls. We might need a more specific 498 * first contains the audio controls. We might need a more specific
@@ -508,6 +501,14 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
508 if (!chip->ctrl_intf) 501 if (!chip->ctrl_intf)
509 chip->ctrl_intf = alts; 502 chip->ctrl_intf = alts;
510 503
504 chip->txfr_quirk = 0;
505 err = 1; /* continue */
506 if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
507 /* need some special handlings */
508 if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
509 goto __error;
510 }
511
511 if (err > 0) { 512 if (err > 0) {
512 /* create normal USB audio interfaces */ 513 /* create normal USB audio interfaces */
513 if (snd_usb_create_streams(chip, ifnum) < 0 || 514 if (snd_usb_create_streams(chip, ifnum) < 0 ||
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index eab06edcc9b7..c22fa76e363a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -86,16 +86,6 @@ struct mixer_build {
86 const struct usbmix_selector_map *selector_map; 86 const struct usbmix_selector_map *selector_map;
87}; 87};
88 88
89enum {
90 USB_MIXER_BOOLEAN,
91 USB_MIXER_INV_BOOLEAN,
92 USB_MIXER_S8,
93 USB_MIXER_U8,
94 USB_MIXER_S16,
95 USB_MIXER_U16,
96};
97
98
99/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ 89/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
100enum { 90enum {
101 USB_XU_CLOCK_RATE = 0xe301, 91 USB_XU_CLOCK_RATE = 0xe301,
@@ -535,20 +525,21 @@ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_ou
535 * if failed, give up and free the control instance. 525 * if failed, give up and free the control instance.
536 */ 526 */
537 527
538static int add_control_to_empty(struct mixer_build *state, struct snd_kcontrol *kctl) 528int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
529 struct snd_kcontrol *kctl)
539{ 530{
540 struct usb_mixer_elem_info *cval = kctl->private_data; 531 struct usb_mixer_elem_info *cval = kctl->private_data;
541 int err; 532 int err;
542 533
543 while (snd_ctl_find_id(state->chip->card, &kctl->id)) 534 while (snd_ctl_find_id(mixer->chip->card, &kctl->id))
544 kctl->id.index++; 535 kctl->id.index++;
545 if ((err = snd_ctl_add(state->chip->card, kctl)) < 0) { 536 if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) {
546 snd_printd(KERN_ERR "cannot add control (err = %d)\n", err); 537 snd_printd(KERN_ERR "cannot add control (err = %d)\n", err);
547 return err; 538 return err;
548 } 539 }
549 cval->elem_id = &kctl->id; 540 cval->elem_id = &kctl->id;
550 cval->next_id_elem = state->mixer->id_elems[cval->id]; 541 cval->next_id_elem = mixer->id_elems[cval->id];
551 state->mixer->id_elems[cval->id] = cval; 542 mixer->id_elems[cval->id] = cval;
552 return 0; 543 return 0;
553} 544}
554 545
@@ -984,6 +975,9 @@ static struct snd_kcontrol_new usb_feature_unit_ctl_ro = {
984 .put = NULL, 975 .put = NULL,
985}; 976};
986 977
978/* This symbol is exported in order to allow the mixer quirks to
979 * hook up to the standard feature unit control mechanism */
980struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl;
987 981
988/* 982/*
989 * build a feature control 983 * build a feature control
@@ -1176,7 +1170,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1176 1170
1177 snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", 1171 snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n",
1178 cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); 1172 cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res);
1179 add_control_to_empty(state, kctl); 1173 snd_usb_mixer_add_control(state->mixer, kctl);
1180} 1174}
1181 1175
1182 1176
@@ -1340,7 +1334,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state,
1340 1334
1341 snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n", 1335 snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n",
1342 cval->id, kctl->id.name, cval->channels, cval->min, cval->max); 1336 cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
1343 add_control_to_empty(state, kctl); 1337 snd_usb_mixer_add_control(state->mixer, kctl);
1344} 1338}
1345 1339
1346 1340
@@ -1641,7 +1635,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw
1641 1635
1642 snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n", 1636 snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n",
1643 cval->id, kctl->id.name, cval->channels, cval->min, cval->max); 1637 cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
1644 if ((err = add_control_to_empty(state, kctl)) < 0) 1638 if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
1645 return err; 1639 return err;
1646 } 1640 }
1647 return 0; 1641 return 0;
@@ -1858,7 +1852,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void
1858 1852
1859 snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n", 1853 snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
1860 cval->id, kctl->id.name, desc->bNrInPins); 1854 cval->id, kctl->id.name, desc->bNrInPins);
1861 if ((err = add_control_to_empty(state, kctl)) < 0) 1855 if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
1862 return err; 1856 return err;
1863 1857
1864 return 0; 1858 return 0;
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index b4a2c8165e4b..ae1a14dcfe82 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -24,7 +24,16 @@ struct usb_mixer_interface {
24 u8 xonar_u1_status; 24 u8 xonar_u1_status;
25}; 25};
26 26
27#define MAX_CHANNELS 10 /* max logical channels */ 27#define MAX_CHANNELS 16 /* max logical channels */
28
29enum {
30 USB_MIXER_BOOLEAN,
31 USB_MIXER_INV_BOOLEAN,
32 USB_MIXER_S8,
33 USB_MIXER_U8,
34 USB_MIXER_S16,
35 USB_MIXER_U16,
36};
28 37
29struct usb_mixer_elem_info { 38struct usb_mixer_elem_info {
30 struct usb_mixer_interface *mixer; 39 struct usb_mixer_interface *mixer;
@@ -55,4 +64,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
55void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer); 64void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer);
56int snd_usb_mixer_activate(struct usb_mixer_interface *mixer); 65int snd_usb_mixer_activate(struct usb_mixer_interface *mixer);
57 66
67int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
68 struct snd_kcontrol *kctl);
69
58#endif /* __USBMIXER_H */ 70#endif /* __USBMIXER_H */
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 9146cffa6ede..3d0f4873112b 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -40,6 +40,8 @@
40#include "mixer_quirks.h" 40#include "mixer_quirks.h"
41#include "helper.h" 41#include "helper.h"
42 42
43extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl;
44
43/* 45/*
44 * Sound Blaster remote control configuration 46 * Sound Blaster remote control configuration
45 * 47 *
@@ -492,6 +494,69 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
492 return err; 494 return err;
493} 495}
494 496
497/* M-Audio FastTrack Ultra quirks */
498
499/* private_free callback */
500static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
501{
502 kfree(kctl->private_data);
503 kctl->private_data = NULL;
504}
505
506static int snd_maudio_ftu_create_ctl(struct usb_mixer_interface *mixer,
507 int in, int out, const char *name)
508{
509 struct usb_mixer_elem_info *cval;
510 struct snd_kcontrol *kctl;
511
512 cval = kzalloc(sizeof(*cval), GFP_KERNEL);
513 if (!cval)
514 return -ENOMEM;
515
516 cval->id = 5;
517 cval->mixer = mixer;
518 cval->val_type = USB_MIXER_S16;
519 cval->channels = 1;
520 cval->control = out + 1;
521 cval->cmask = 1 << in;
522
523 kctl = snd_ctl_new1(snd_usb_feature_unit_ctl, cval);
524 if (!kctl) {
525 kfree(cval);
526 return -ENOMEM;
527 }
528
529 snprintf(kctl->id.name, sizeof(kctl->id.name), name);
530 kctl->private_free = usb_mixer_elem_free;
531 return snd_usb_mixer_add_control(mixer, kctl);
532}
533
534static int snd_maudio_ftu_create_mixer(struct usb_mixer_interface *mixer)
535{
536 char name[64];
537 int in, out, err;
538
539 for (out = 0; out < 8; out++) {
540 for (in = 0; in < 8; in++) {
541 snprintf(name, sizeof(name),
542 "AIn%d - Out%d Capture Volume", in + 1, out + 1);
543 err = snd_maudio_ftu_create_ctl(mixer, in, out, name);
544 if (err < 0)
545 return err;
546 }
547
548 for (in = 8; in < 16; in++) {
549 snprintf(name, sizeof(name),
550 "DIn%d - Out%d Playback Volume", in - 7, out + 1);
551 err = snd_maudio_ftu_create_ctl(mixer, in, out, name);
552 if (err < 0)
553 return err;
554 }
555 }
556
557 return 0;
558}
559
495void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, 560void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
496 unsigned char samplerate_id) 561 unsigned char samplerate_id)
497{ 562{
@@ -533,6 +598,11 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
533 snd_audigy2nx_proc_read); 598 snd_audigy2nx_proc_read);
534 break; 599 break;
535 600
601 case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */
602 case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
603 err = snd_maudio_ftu_create_mixer(mixer);
604 break;
605
536 case USB_ID(0x0b05, 0x1739): 606 case USB_ID(0x0b05, 0x1739):
537 case USB_ID(0x0b05, 0x1743): 607 case USB_ID(0x0b05, 0x1743):
538 err = snd_xonar_u1_controls_create(mixer); 608 err = snd_xonar_u1_controls_create(mixer);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 78792a8900c3..0b2ae8e1c02d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1988,7 +1988,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1988 .data = & (const struct snd_usb_audio_quirk[]) { 1988 .data = & (const struct snd_usb_audio_quirk[]) {
1989 { 1989 {
1990 .ifnum = 0, 1990 .ifnum = 0,
1991 .type = QUIRK_IGNORE_INTERFACE 1991 .type = QUIRK_AUDIO_STANDARD_MIXER,
1992 }, 1992 },
1993 { 1993 {
1994 .ifnum = 1, 1994 .ifnum = 1,
@@ -2055,7 +2055,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2055 .data = & (const struct snd_usb_audio_quirk[]) { 2055 .data = & (const struct snd_usb_audio_quirk[]) {
2056 { 2056 {
2057 .ifnum = 0, 2057 .ifnum = 0,
2058 .type = QUIRK_IGNORE_INTERFACE 2058 .type = QUIRK_AUDIO_STANDARD_MIXER,
2059 }, 2059 },
2060 { 2060 {
2061 .ifnum = 1, 2061 .ifnum = 1,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index bd13d7257240..2e969cbb393b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -19,6 +19,7 @@
19#include <linux/usb.h> 19#include <linux/usb.h>
20#include <linux/usb/audio.h> 20#include <linux/usb/audio.h>
21 21
22#include <sound/control.h>
22#include <sound/core.h> 23#include <sound/core.h>
23#include <sound/info.h> 24#include <sound/info.h>
24#include <sound/pcm.h> 25#include <sound/pcm.h>
@@ -263,6 +264,20 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
263} 264}
264 265
265/* 266/*
267 * Create a standard mixer for the specified interface.
268 */
269static int create_standard_mixer_quirk(struct snd_usb_audio *chip,
270 struct usb_interface *iface,
271 struct usb_driver *driver,
272 const struct snd_usb_audio_quirk *quirk)
273{
274 if (quirk->ifnum < 0)
275 return 0;
276
277 return snd_usb_create_mixer(chip, quirk->ifnum, 0);
278}
279
280/*
266 * audio-interface quirks 281 * audio-interface quirks
267 * 282 *
268 * returns zero if no standard audio/MIDI parsing is needed. 283 * returns zero if no standard audio/MIDI parsing is needed.
@@ -294,7 +309,8 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
294 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, 309 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
295 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, 310 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
296 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, 311 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
297 [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk 312 [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
313 [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
298 }; 314 };
299 315
300 if (quirk->type < QUIRK_TYPE_COUNT) { 316 if (quirk->type < QUIRK_TYPE_COUNT) {
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 32f2a97f2f14..1e79986b5777 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -84,6 +84,7 @@ enum quirk_type {
84 QUIRK_AUDIO_FIXED_ENDPOINT, 84 QUIRK_AUDIO_FIXED_ENDPOINT,
85 QUIRK_AUDIO_EDIROL_UAXX, 85 QUIRK_AUDIO_EDIROL_UAXX,
86 QUIRK_AUDIO_ALIGN_TRANSFER, 86 QUIRK_AUDIO_ALIGN_TRANSFER,
87 QUIRK_AUDIO_STANDARD_MIXER,
87 88
88 QUIRK_TYPE_COUNT 89 QUIRK_TYPE_COUNT
89}; 90};